blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33c4f63224fc7c43de4b5920f527180a100ed9c8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03712/s144694352.py | 7cb35c72c8fda9d98bf5dfd772936144ca8be536 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | #!/usr/bin/env python3
# Generated by 1.1.7.1 https://github.com/kyuridenamida/atcoder-tools
def main():
H, W = map(int, input().split())
print("#"*(W+2))
for _ in range(H):
print("#{}#".format(input()))
print("#"*(W+2))
def test():
import doctest
doctest.testmod()
if __name__ == '__main__':
#test()
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
355d5d730e52abc11dde2517b53d52a39c7f29e6 | 576b680d1d3ba0f07837e7f28935b0a4632561ad | /pyrepr/repr_json.py | 0a9bb92ce96d1f2c57c17c3e119f1144ad27ba55 | [] | no_license | patarapolw/pyrepr-toml | 4b8639050b118896ef7a8cd99067864e9968fea8 | 187bc1cd2adf2e487712d2d629564f4d2c972dcb | refs/heads/master | 2020-04-02T02:10:32.258042 | 2018-10-20T10:45:16 | 2018-10-20T10:45:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | import json
from json.encoder import _make_iterencode
from .util import hyper_markdownify
class ReprJSONEncoder(json.JSONEncoder):
def iterencode(self, o, _one_shot=False):
if self.check_circular:
markers = {}
else:
markers = None
_iterencode = _make_iterencode(
markers, self.default, hyper_markdownify, self.indent, str,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
| [
"patarapolw@gmail.com"
] | patarapolw@gmail.com |
1006a1042d8562501d612446f32f554d87edcacb | 80217a305516de4c1921833b222f094f0148e9f9 | /backend/task_marker_21840/wsgi.py | 9f4c0dac7059e2f594e0ec93c50fe0ea283ef0a8 | [] | no_license | crowdbotics-apps/task-marker-21840 | ea9bcd56f7450a83ac6818c24d7294795deab092 | 8bcd4803dd4b0b56242f5bb92117230f1746f7e2 | refs/heads/master | 2023-01-02T08:14:51.362346 | 2020-10-22T19:26:21 | 2020-10-22T19:26:21 | 306,438,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
WSGI config for task_marker_21840 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'task_marker_21840.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
823ac15ec7ef31233f51a529dbf71638c8f76b59 | 72b77f97876983025eb05a5aa1d6f248a1be3074 | /binarysearch/ugly_number.py | 731272b3b9501c721d2305ba2085830cacf9bc46 | [
"Apache-2.0"
] | permissive | erjan/coding_exercises | 4c6bccb2cdac65ccbc3107a482914275ecd157f7 | 68dac358a6d4dabd41d47dbd4addb2ec50e0ca11 | refs/heads/master | 2023-09-02T07:25:30.886175 | 2023-08-27T06:13:06 | 2023-08-27T06:13:06 | 236,281,070 | 5 | 0 | Apache-2.0 | 2020-05-05T15:08:49 | 2020-01-26T07:32:09 | Python | UTF-8 | Python | false | false | 342 | py |
'''
Given an integer n, return whether its prime factors only include 2, 3 or 5.
'''
class Solution:
def solve(self, n):
num = n
if num == 0: return False
while num % 5 == 0: num /= 5
while num % 3 == 0: num /= 3
while num % 2 == 0: num /= 2
return num == 1
| [
"noreply@github.com"
] | erjan.noreply@github.com |
f4d92f33eefb8ade936fbd2367669894919aa93a | a7361705b32e868557dd033aa99d74889c70808c | /braintels_Sockets/servidor_socket.py | cfe9e8e22c5621a4b6635c7baa010d788146892c | [] | no_license | jorgepdsML/PYTHON_NETWORKING | ce6c3e11018b4592a13eaabcc57de6af0a57d39f | 829a7c92634c7ba78a84dbd9fea22cfe8452b371 | refs/heads/master | 2020-12-23T10:15:18.709084 | 2020-02-08T01:12:52 | 2020-02-08T01:12:52 | 237,121,762 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | import socket,pickle,time
#ESTABLECER LA IP DEL SERVIDOR (DE ESTE ORDENADOR)
HOST = '127.0.0.1'
PORT = 65432 # PUERTO
#instanciar un objeto de la clase socket del modulo socket
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((HOST,PORT))
#escuchar conexiones del cliente
s.listen()
#aceptar la conexión
cliente,direccion=s.accept()
with cliente:
#recibir maximo 1000 bytes del cliente
dato=cliente.recv(1000)
if not dato:
#cliente se ha desconectado
print("DESCONECTADO")
else:
print("-------EL CLIENTE NOS DICE ------")
#mostrar lo que el cliente me ha enviado
print(pickle.loads(dato))
time.sleep(2)
#devolver un mensaje
cliente.sendall(pickle.dumps("*** AQUI PUES MASCOTA :v :v *** "))
s.close()
| [
"noreply@github.com"
] | jorgepdsML.noreply@github.com |
2ef6777840283a29dde9cda0fbea7f05a7bc5f59 | 4ee504feeb5388ed70f4ffef2caf851eb5edd299 | /pinax/wiki/views.py | 9c75aa7f693e06ddd39e31db3c3c7a5247217b8f | [
"MIT"
] | permissive | adamfeldman/pinax-wiki | cbf2e1d9cdce853a0b959f037e35c6b923feba31 | 06c65bf00e9cf69493ca2d97cd45d167756f054d | refs/heads/master | 2021-01-12T13:51:16.242419 | 2016-03-15T12:54:33 | 2016-03-15T12:54:33 | 69,196,399 | 0 | 0 | null | 2016-09-25T23:21:40 | 2016-09-25T23:21:40 | null | UTF-8 | Python | false | false | 3,556 | py | import json
from django.http import HttpResponse, Http404, HttpResponseForbidden
from django.shortcuts import redirect, render, get_object_or_404
from django.views import static
from django.views.decorators.http import require_POST
try:
from account.decorators import login_required
except ImportError:
from django.contrib.auth.decorators import login_required
from .conf import settings
from .forms import RevisionForm
from .hooks import hookset
from .models import Page, MediaFile
def index(request, binder, *args, **kwargs):
wiki = binder.lookup(*args, **kwargs)
return redirect(binder.page_url(wiki, "WikiIndex"))
def page(request, slug, binder, *args, **kwargs):
wiki = binder.lookup(*args, **kwargs)
try:
if wiki:
page = wiki.pages.get(slug=slug)
else:
page = Page.objects.get(slug=slug)
if not hookset.can_view_page(page, request.user):
raise Http404()
rev = page.revisions.latest()
return render(request, "pinax/wiki/page.html", {"revision": rev, "can_edit": hookset.can_edit_page(page, request.user)})
except Page.DoesNotExist:
return redirect(binder.edit_url(wiki, slug))
@login_required
def edit(request, slug, binder, *args, **kwargs):
wiki = binder.lookup(*args, **kwargs)
try:
if wiki:
page = wiki.pages.get(slug=slug)
else:
page = Page.objects.get(slug=slug)
rev = page.revisions.latest()
if not hookset.can_edit_page(page, request.user):
return HttpResponseForbidden()
except Page.DoesNotExist:
page = Page(wiki=wiki, slug=slug)
rev = None
if not hookset.can_edit_page(page, request.user):
raise Http404()
if request.method == "POST":
form = RevisionForm(request.POST, revision=rev)
if form.is_valid():
if page.pk is None:
page.save()
revision = form.save(commit=False)
revision.page = page
revision.created_by = request.user
revision.created_ip = request.META.get(settings.PINAX_WIKI_IP_ADDRESS_META_FIELD, "REMOTE_ADDR")
revision.parse()
revision.save()
return redirect(binder.page_url(wiki, slug))
else:
form = RevisionForm(revision=rev)
return render(request, "pinax/wiki/edit.html", {
"form": form,
"page": page,
"revision": rev,
"can_delete": hookset.can_delete_page(page, request.user)
})
def file_download(request, pk, filename):
media_file = get_object_or_404(MediaFile, pk=pk, filename=filename)
if getattr(settings, "DOCUMENTS_USE_X_ACCEL_REDIRECT", False):
response = HttpResponse()
response["X-Accel-Redirect"] = media_file.file.url
# delete content-type to allow Gondor to determine the filetype and
# we definitely don't want Django's crappy default :-)
del response["content-type"]
else:
response = static.serve(request, media_file.file.name, document_root=settings.MEDIA_ROOT)
return response
@require_POST
@login_required
def file_upload(request):
uploads = []
for f in request.FILES.getlist("files"):
media_file = request.user.media_files.create(file=f, filename=f.name)
uploads.append(media_file)
return HttpResponse(json.dumps({
"uploads": [
{"filename": m.filename, "download_url": m.download_url()}
for m in uploads
]
}), content_type="application/json")
| [
"paltman@gmail.com"
] | paltman@gmail.com |
1eab08e124fb6251930f469f3fffe440a5b49406 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/keyvault/azure-keyvault-administration/azure/keyvault/administration/_internal/challenge_auth_policy.py | 3239032e9162ffbc954449b7ec15787c522cae2c | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 5,654 | py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""Policy implementing Key Vault's challenge authentication protocol.
Normally the protocol is only used for the client's first service request, upon which:
1. The challenge authentication policy sends a copy of the request, without authorization or content.
2. Key Vault responds 401 with a header (the 'challenge') detailing how the client should authenticate such a request.
3. The policy authenticates according to the challenge and sends the original request with authorization.
The policy caches the challenge and thus knows how to authenticate future requests. However, authentication
requirements can change. For example, a vault may move to a new tenant. In such a case the policy will attempt the
protocol again.
"""
import copy
import time
from azure.core.exceptions import ServiceRequestError
from azure.core.pipeline import PipelineContext, PipelineRequest
from azure.core.pipeline.policies import HTTPPolicy
from azure.core.pipeline.transport import HttpRequest
from .http_challenge import HttpChallenge
from . import http_challenge_cache as ChallengeCache
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Any, Optional
from azure.core.credentials import AccessToken, TokenCredential
from azure.core.pipeline import PipelineResponse
def _enforce_tls(request):
# type: (PipelineRequest) -> None
if not request.http_request.url.lower().startswith("https"):
raise ServiceRequestError(
"Bearer token authentication is not permitted for non-TLS protected (non-https) URLs."
)
def _get_challenge_request(request):
# type: (PipelineRequest) -> PipelineRequest
# The challenge request is intended to provoke an authentication challenge from Key Vault, to learn how the
# service request should be authenticated. It should be identical to the service request but with no body.
challenge_request = HttpRequest(
request.http_request.method, request.http_request.url, headers=request.http_request.headers
)
challenge_request.headers["Content-Length"] = "0"
options = copy.deepcopy(request.context.options)
context = PipelineContext(request.context.transport, **options)
return PipelineRequest(http_request=challenge_request, context=context)
def _update_challenge(request, challenger):
# type: (PipelineRequest, PipelineResponse) -> HttpChallenge
"""parse challenge from challenger, cache it, return it"""
challenge = HttpChallenge(
request.http_request.url,
challenger.http_response.headers.get("WWW-Authenticate"),
response_headers=challenger.http_response.headers,
)
ChallengeCache.set_challenge_for_url(request.http_request.url, challenge)
return challenge
class ChallengeAuthPolicyBase(object):
"""Sans I/O base for challenge authentication policies"""
def __init__(self, **kwargs):
self._token = None # type: Optional[AccessToken]
super(ChallengeAuthPolicyBase, self).__init__(**kwargs)
@property
def _need_new_token(self):
# type: () -> bool
return not self._token or self._token.expires_on - time.time() < 300
class ChallengeAuthPolicy(ChallengeAuthPolicyBase, HTTPPolicy):
"""policy for handling HTTP authentication challenges"""
def __init__(self, credential, **kwargs):
# type: (TokenCredential, **Any) -> None
self._credential = credential
super(ChallengeAuthPolicy, self).__init__(**kwargs)
def send(self, request):
# type: (PipelineRequest) -> PipelineResponse
_enforce_tls(request)
challenge = ChallengeCache.get_challenge_for_url(request.http_request.url)
if not challenge:
challenge_request = _get_challenge_request(request)
challenger = self.next.send(challenge_request)
try:
challenge = _update_challenge(request, challenger)
except ValueError:
# didn't receive the expected challenge -> nothing more this policy can do
return challenger
self._handle_challenge(request, challenge)
response = self.next.send(request)
if response.http_response.status_code == 401:
# any cached token must be invalid
self._token = None
# cached challenge could be outdated; maybe this response has a new one?
try:
challenge = _update_challenge(request, response)
except ValueError:
# 401 with no legible challenge -> nothing more this policy can do
return response
self._handle_challenge(request, challenge)
response = self.next.send(request)
return response
def _handle_challenge(self, request, challenge):
# type: (PipelineRequest, HttpChallenge) -> None
"""authenticate according to challenge, add Authorization header to request"""
if self._need_new_token:
# azure-identity credentials require an AADv2 scope but the challenge may specify an AADv1 resource
scope = challenge.get_scope() or challenge.get_resource() + "/.default"
self._token = self._credential.get_token(scope)
# ignore mypy's warning because although self._token is Optional, get_token raises when it fails to get a token
request.http_request.headers["Authorization"] = "Bearer {}".format(self._token.token) # type: ignore
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
1fd76c565aad097015fd0f3335e3d6a2cce35c2b | 8bb4a472344fda15985ac322d14e8f4ad79c7553 | /Python3-Core/src/test/prompto/translate/oeo/TestBuiltins.py | 6655a6b6fa59d13d6d11fdb80eb356d342ddc1b4 | [] | no_license | prompto/prompto-python3 | c6b356f5af30c6826730ba7f2ad869f341983a2d | 64bd3d97d4702cc912097d41d961f7ab3fd82bee | refs/heads/master | 2022-12-24T12:33:16.251468 | 2022-11-27T17:37:56 | 2022-11-27T17:37:56 | 32,623,633 | 4 | 0 | null | 2019-05-04T11:06:05 | 2015-03-21T07:17:25 | Python | UTF-8 | Python | false | false | 4,521 | py | from prompto.parser.o.BaseOParserTest import BaseOParserTest
class TestBuiltins(BaseOParserTest):
def setUp(self):
super(type(self), self).setUp()
def testDateDayOfMonth(self):
self.compareResourceOEO("builtins/dateDayOfMonth.poc")
def testDateDayOfYear(self):
self.compareResourceOEO("builtins/dateDayOfYear.poc")
def testDateMonth(self):
self.compareResourceOEO("builtins/dateMonth.poc")
def testDateTimeDayOfMonth(self):
self.compareResourceOEO("builtins/dateTimeDayOfMonth.poc")
def testDateTimeDayOfYear(self):
self.compareResourceOEO("builtins/dateTimeDayOfYear.poc")
def testDateTimeHour(self):
self.compareResourceOEO("builtins/dateTimeHour.poc")
def testDateTimeMinute(self):
self.compareResourceOEO("builtins/dateTimeMinute.poc")
def testDateTimeMonth(self):
self.compareResourceOEO("builtins/dateTimeMonth.poc")
def testDateTimeSecond(self):
self.compareResourceOEO("builtins/dateTimeSecond.poc")
def testDateTimeTZName(self):
self.compareResourceOEO("builtins/dateTimeTZName.poc")
def testDateTimeTZOffset(self):
self.compareResourceOEO("builtins/dateTimeTZOffset.poc")
def testDateTimeYear(self):
self.compareResourceOEO("builtins/dateTimeYear.poc")
def testDateYear(self):
self.compareResourceOEO("builtins/dateYear.poc")
def testDictCount(self):
self.compareResourceOEO("builtins/dictCount.poc")
def testDictSwap(self):
self.compareResourceOEO("builtins/dictSwap.poc")
def testDocumentCount(self):
self.compareResourceOEO("builtins/documentCount.poc")
def testEnumName(self):
self.compareResourceOEO("builtins/enumName.poc")
def testEnumSymbols(self):
self.compareResourceOEO("builtins/enumSymbols.poc")
def testEnumValue(self):
self.compareResourceOEO("builtins/enumValue.poc")
def testIntegerFormat(self):
self.compareResourceOEO("builtins/integerFormat.poc")
def testListCount(self):
self.compareResourceOEO("builtins/listCount.poc")
def testListIndexOf(self):
self.compareResourceOEO("builtins/listIndexOf.poc")
def testListJoin(self):
self.compareResourceOEO("builtins/listJoin.poc")
def testPeriodDays(self):
self.compareResourceOEO("builtins/periodDays.poc")
def testPeriodHours(self):
self.compareResourceOEO("builtins/periodHours.poc")
def testPeriodMillis(self):
self.compareResourceOEO("builtins/periodMillis.poc")
def testPeriodMinutes(self):
self.compareResourceOEO("builtins/periodMinutes.poc")
def testPeriodMonths(self):
self.compareResourceOEO("builtins/periodMonths.poc")
def testPeriodSeconds(self):
self.compareResourceOEO("builtins/periodSeconds.poc")
def testPeriodWeeks(self):
self.compareResourceOEO("builtins/periodWeeks.poc")
def testPeriodYears(self):
self.compareResourceOEO("builtins/periodYears.poc")
def testSetCount(self):
self.compareResourceOEO("builtins/setCount.poc")
def testSetJoin(self):
self.compareResourceOEO("builtins/setJoin.poc")
def testTextCapitalize(self):
self.compareResourceOEO("builtins/textCapitalize.poc")
def testTextCount(self):
self.compareResourceOEO("builtins/textCount.poc")
def testTextIndexOf(self):
self.compareResourceOEO("builtins/textIndexOf.poc")
def testTextLowercase(self):
self.compareResourceOEO("builtins/textLowercase.poc")
def testTextReplace(self):
self.compareResourceOEO("builtins/textReplace.poc")
def testTextReplaceAll(self):
self.compareResourceOEO("builtins/textReplaceAll.poc")
def testTextSplit(self):
self.compareResourceOEO("builtins/textSplit.poc")
def testTextTrim(self):
self.compareResourceOEO("builtins/textTrim.poc")
def testTextUppercase(self):
self.compareResourceOEO("builtins/textUppercase.poc")
def testTimeHour(self):
self.compareResourceOEO("builtins/timeHour.poc")
def testTimeMinute(self):
self.compareResourceOEO("builtins/timeMinute.poc")
def testTimeSecond(self):
self.compareResourceOEO("builtins/timeSecond.poc")
def testTupleCount(self):
self.compareResourceOEO("builtins/tupleCount.poc")
def testTupleJoin(self):
self.compareResourceOEO("builtins/tupleJoin.poc")
| [
"eric.vergnaud@wanadoo.fr"
] | eric.vergnaud@wanadoo.fr |
b976280add3c7f8d108a2e2b62579a8e3baea2df | 38b8bceafb4d80afc7c77196eb9ee99694191bcf | /wxpython/grid4.py | 75c71ea65b3375dda4e507cea2e9ead47a246b12 | [] | no_license | tangc1986/PythonStudy | f6c5b384874e82fbf0b5f51cfb7a7a89a48ec0ff | 1ed1956758e971647426e7096ac2e8cbcca585b4 | refs/heads/master | 2021-01-23T20:39:23.930754 | 2017-10-08T07:40:32 | 2017-10-08T07:42:38 | 42,122,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | # -*- coding: UTF-8 -*-
__author__ = 'tangchao'
import wx
import wx.grid
class TestFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title="Grid Sizes",
size=(600, 300))
grid = wx.grid.Grid(self)
grid.CreateGrid(5, 5)
for row in range(5):
for col in range(5):
grid.SetCellValue(row, col, "(%s, %s)" % (row, col))
grid.SetCellSize(2, 2, 2, 3)
grid.SetColSize(1, 125)
grid.SetRowSize(1, 100)
app = wx.PySimpleApp()
frame = TestFrame()
frame.Show()
app.MainLoop() | [
"tangc1986@gmail.com"
] | tangc1986@gmail.com |
ab5a264ea0ecf8ebb13f29fe5e851642a4423712 | f4fea67c838444a53492a9193c9f42dcf6ae6bb6 | /AsFarAsFromLand.py | 13f5337ce3a385360b651f26de8599d3aac5e30b | [] | no_license | sainihimanshu1999/Graph-Solutions | 7641919173c1f878bb807bf5709250f30b9d7804 | 1889ebf7c5b04a945b23efbd14f80c191d4fbd4a | refs/heads/main | 2023-04-26T15:15:26.819831 | 2021-05-30T05:17:27 | 2021-05-30T05:17:27 | 370,977,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | '''
Matrix is given and we have to find the maximum distance between land and water
'''
from typing import Deque
def maximumDist(self,grid):
m,n = len(grid),len(grid[0])
q = Deque([(i,j) for i in range(m) for j in range(n)])
if len(q) == m*n and len(q) == 0:
return -1
level = 0
while q:
for _ in range(len(q)):
i,j = q.popleft()
for x,y in [(1,0),(-1,0),(0,1),(0,-1)]:
xi,yj = x+i,y+j
if 0<=xi<m and 0<=yj<n and grid[i][j] == 0:
q.append((xi,yj))
grid[xi][yj] = 1
level += 1
return level - 1 | [
"sainihimanshu.1999@gmail.com"
] | sainihimanshu.1999@gmail.com |
ff45cfdca02b8b29eaeeb906efdef59558db6bb6 | 1a114943c92a5db40034470ff31a79bcf8ddfc37 | /python-2/primer/11/Gui.py | e2e2e5d3f1adbfc9a4720b17ec9f89ce7fa8d8dc | [] | no_license | renwl/mylinux | 1924918599efd6766c266231d66b2a7ed6f6cdd1 | 0602fc6d2b0d254a8503e57310f848fc3e1a73b4 | refs/heads/master | 2020-07-10T22:12:03.259349 | 2017-01-02T12:32:04 | 2017-01-02T12:32:04 | 66,467,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py |
from functools import partial
import tkinter
root=tkinter.Tk()
MyButton = partial(tkinter.Button,root,
fg="white",bg="blue")
b1= MyButton(text="button 1")
b2=MyButton(text="button 2")
#qb=MyButton(text="QUIT",bg="red",
# command=root.quit)
b1.pack()
b2.pack()
#qb.pack(file=Tkinter.X,expand= True)
root.title("PFAs!")
root.mainloop()
| [
"wenliang.ren@quanray.com"
] | wenliang.ren@quanray.com |
f46aa1e56e52a41483bda536769f43fee0100aa9 | d144df1ba39aef6d740bfbb438c8f7aaff195cc9 | /distance.py | 62d1b600a0f3e44ddea373755e1a45939964934a | [] | no_license | weizhixiaoyi/text-similarity | 42018aa1a4f21f213eed7e7baa8bc2390c2db945 | aa971a1810f95e4f12a88eefec12bc7269b06cb6 | refs/heads/master | 2021-10-16T16:12:00.876963 | 2019-02-12T03:36:37 | 2019-02-12T03:36:37 | 170,244,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,477 | py | import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
def edit_distance(str1, str2):
"""edit distance.
Parameters
----------
str1: string1
str2: string2
Returns
-------
distance
"""
m, n = len(str1), len(str2)
if m == 0: return n
if n == 0: return m
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1): dp[i][0] = i
for j in range(1, n + 1): dp[0][j] = j
for i in range(1, m + 1):
for j in range(1, n + 1):
if str1[i - 1] == str2[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = min(dp[i - 1][j - 1] + 1, dp[i - 1][j] + 1, dp[i][j - 1] + 1)
return float(dp[m][n])
def euclidean_distance(str1, str2):
"""euclidean distance.
Parameters
----------
str1: string1
str2: string2
Returns
-------
distance
"""
s1, s2 = ' '.join(list(str1)), ' '.join(list(str2))
cv = CountVectorizer(tokenizer=lambda s: s.split())
corpus = [s1, s2]
vectors = cv.fit_transform(corpus).toarray()
vector1 = np.mat(vectors[0])
vector2 = np.mat(vectors[1])
dis = np.sqrt((vector1 - vector2) * ((vector1 - vector2).T))
return float(dis)
def manhattan_distance(str1, str2):
"""manhattan distance.
Parameters
----------
str1: string1
str2: string2
Returns
-------
distance
"""
s1, s2 = ' '.join(list(str1)), ' '.join(list(str2))
cv = CountVectorizer(tokenizer=lambda s: s.split())
corpus = [s1, s2]
vectors = cv.fit_transform(corpus).toarray()
vector1 = np.mat(vectors[0])
vector2 = np.mat(vectors[1])
dis = np.sum(np.abs(vector1 - vector2))
return float(dis)
def jaro_distance(str1, str2):
"""jaro distance.
Parameters
----------
str1: string1
str2: string2
Returns
-------
distance
"""
if len(str1) > len(str2):
longStr = str1
shortStr = str2
else:
longStr = str2
shortStr = str1
allowRange = (len(longStr) // 2) - 1
mappingIndices = [-1] * len(shortStr)
longMatch, shortMatch = [], []
matches = 0
for i in range(0, len(shortStr)):
for j in range(max(0, i - allowRange), min(len(longStr), i + allowRange + 1)):
if shortStr[i] == longStr[j]:
matches = matches + 1
mappingIndices[i] = j
shortMatch.append(shortStr[i])
longMatch.insert(j, shortStr[i])
break
halfTransPosition = 0
for i in range(0, len(shortMatch)):
if (mappingIndices[i] != i) and (shortMatch[i] != longMatch[i]):
halfTransPosition += 1
dis = 0
if matches != 0:
dis = ((matches / len(longStr)) + (matches / len(shortStr)) +
((matches - (halfTransPosition // 2)) / matches)) / 3
return float(dis)
def jaro_winkler_distance(str1, str2):
jaro = jaro_distance(str1, str2)
prefix = 0
for i in range(0, 4):
if str1[i] == str2[i]:
prefix += 1
else:
break
dis = 0
if (jaro > 0.7):
dis = jaro + ((prefix * 0.1) * (1 - jaro))
else:
dis = jaro
return float(dis)
if __name__ == '__main__':
str1 = '你妈妈喊你回家吃饭哦,回家罗回家罗'
str2 = '你妈妈叫你回家吃饭啦,回家罗回家罗'
ans = jaro_winkler_distance(str1, str2)
print(ans)
| [
"zhenhai.gl@gmail.com"
] | zhenhai.gl@gmail.com |
0d4679b918a22de8da38164caf9e8789811ac752 | 3279cea18d23dad027fa5b64c4170926df98f2f4 | /bakerydemo/settings/base.py | af39a915e588b4aadb5b9fe0d5c6651caeb650ee | [
"LicenseRef-scancode-public-domain"
] | permissive | torchbox/dit_directory_cms_poc | 1ed53649f59640eb2d4d9372397a1e8a71f2e44e | 87c54dd35364c66c6f70481148ee2386c784f410 | refs/heads/master | 2020-04-27T07:36:48.299201 | 2019-04-17T16:43:08 | 2019-04-17T16:43:08 | 174,141,444 | 2 | 0 | null | 2019-03-18T12:37:56 | 2019-03-06T12:31:35 | Python | UTF-8 | Python | false | false | 4,639 | py | """
Django settings for temp project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(PROJECT_DIR, ...)
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "c6u0-9c!7nilj_ysatsda0(f@e_2mws2f!6m0n^o*4#*q#kzp)"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"bakerydemo.base",
"bakerydemo.blog",
"bakerydemo.breads",
"bakerydemo.locations",
"bakerydemo.search",
"richtext_poc",
"wagtail.contrib.search_promotions",
"wagtail.contrib.forms",
"wagtail.contrib.redirects",
"wagtail.embeds",
"wagtail.sites",
"wagtail.users",
"wagtail.snippets",
"wagtail.documents",
"wagtail.images",
"wagtail.search",
"wagtail.admin",
"wagtail.api.v2",
"wagtail.contrib.modeladmin",
"wagtail.contrib.routable_page",
"wagtail.core",
"rest_framework",
"modelcluster",
"taggit",
"wagtailfontawesome",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sitemaps",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"wagtail.core.middleware.SiteMiddleware",
"wagtail.contrib.redirects.middleware.RedirectMiddleware",
]
ROOT_URLCONF = "bakerydemo.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": ["bakerydemo/templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "bakerydemo.wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "bakerydemodb"),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
STATICFILES_DIRS = [os.path.join(PROJECT_DIR, "static")]
STATIC_ROOT = os.path.join(PROJECT_DIR, "collect_static")
STATIC_URL = "/static/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, "media")
MEDIA_URL = "/media/"
# Override in local settings or replace with your own key. Please don't use our demo key in production!
GOOGLE_MAP_API_KEY = "AIzaSyD31CT9P9KxvNUJOwDq2kcFEIG8ADgaFgw"
# Use Elasticsearch as the search backend for extra performance and better search results
WAGTAILSEARCH_BACKENDS = {
"default": {"BACKEND": "wagtail.search.backends.db", "INDEX": "bakerydemo"}
}
# Wagtail settings
WAGTAIL_SITE_NAME = "bakerydemo"
| [
"thibaudcolas@gmail.com"
] | thibaudcolas@gmail.com |
947f76df7c719a493f1c1c0a49e64fc6938efde5 | 176899f934288b6e742146282974cf1de4bf2441 | /scripts/rename.py | 97e74f41310c0d66471402ae44d11d288b7bfcec | [
"Apache-2.0"
] | permissive | PatrickEGorman/scrapi | 565e9c7bf2ff5e1fddbab8f691a287be9e5165d0 | 6bb5a4952a34c7507681e206457c78730c28c2e8 | refs/heads/develop | 2020-12-06T20:36:58.590067 | 2015-05-29T17:02:10 | 2015-05-29T17:02:10 | 36,506,821 | 1 | 0 | null | 2015-05-29T13:45:22 | 2015-05-29T13:45:21 | null | UTF-8 | Python | false | false | 1,630 | py | import logging
from scripts.util import documents
from scrapi import settings
from scrapi.linter import RawDocument
from scrapi.processing.elasticsearch import es
from scrapi.tasks import normalize, process_normalized, process_raw
logger = logging.getLogger(__name__)
def rename(source, target, dry=True):
assert source != target, "Can't rename {} to {}, names are the same".format(source, target)
count = 0
exceptions = []
for doc in documents(source):
count += 1
try:
raw = RawDocument({
'doc': doc.doc,
'docID': doc.docID,
'source': target,
'filetype': doc.filetype,
'timestamps': doc.timestamps,
'versions': doc.versions
})
if not dry:
process_raw(raw)
process_normalized(normalize(raw, raw['source']), raw)
logger.info('Processed document from {} with id {}'.format(source, raw['docID']))
except Exception as e:
logger.exception(e)
exceptions.append(e)
else:
if not dry:
es.delete(index=settings.ELASTIC_INDEX, doc_type=source, id=raw['docID'], ignore=[404])
es.delete(index='share_v1', doc_type=source, id=raw['docID'], ignore=[404])
logger.info('Deleted document from {} with id {}'.format(source, raw['docID']))
if dry:
logger.info('Dry run complete')
for ex in exceptions:
logger.exception(e)
logger.info('{} documents processed, with {} exceptions'.format(count, len(exceptions)))
| [
"fabian@fabianism.us"
] | fabian@fabianism.us |
a32ecc9f82723eaf976ef6d4146540243a1679b8 | cda0bf30efa432eaa22278befd0b145801b53027 | /tests/unit/core/parse/test_rollseries.py | a2a9d336206d9413b3f4f879df2cf2e4d8d82e18 | [] | no_license | pastly/craps-dice-control | e863aaa1bee104ddd403ce45a7409a48fc7e9c65 | f6f69c9220dffd5f7e2ef07c929b15b4a73bdd13 | refs/heads/master | 2020-04-26T15:44:18.346970 | 2019-10-24T01:00:04 | 2019-10-24T01:00:04 | 173,655,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,816 | py | import io
import json
import pytest
from cdc.core.parse import rollseries as rs
from cdc.lib.rollevent import RollEvent
def assert_unreached(msg=None):
if msg:
assert False, msg
else:
assert False, "Unreachable code path was reached"
def event_gen_from_str(s, starting_point=None):
fd = io.StringIO(s)
pair_gen = rs.roll_series_stream_to_dice_pairs(fd)
event_gen = rs.dice_pairs_gen_to_events(
pair_gen, starting_point=starting_point)
return event_gen
def assert_dice_event(event, type_, dice, args):
assert event.type == type_
assert event.dice == dice
assert event.value == sum(dice)
assert event.args == args
def test_simple_roll():
event_gen = event_gen_from_str("11", starting_point=4)
evs = list(event_gen)
assert len(evs) == 1
assert_dice_event(evs[0], 'roll', (1, 1), {})
def test_simple_natural():
event_gen = event_gen_from_str("3456")
evs = list(event_gen)
assert len(evs) == 2
for ev, dice in [(evs[0], (3, 4)), (evs[1], (5, 6))]:
assert_dice_event(ev, 'natural', dice, {})
def test_simple_craps():
event_gen = event_gen_from_str("111266")
evs = list(event_gen)
assert len(evs) == 3
for ev, dice in [(evs[0], (1, 1)), (evs[1], (1, 2)), (evs[2], (6, 6))]:
assert_dice_event(ev, 'craps', dice, {})
def test_simple_point_established():
event_gen = event_gen_from_str("44")
ev = list(event_gen)[0]
assert_dice_event(ev, 'point', (4, 4), {
'is_established': True,
'is_won': False,
'is_lost': False,
'point_value': 8,
})
def test_simple_point_won():
event_gen = event_gen_from_str("4426")
ev = list(event_gen)[1]
assert_dice_event(ev, 'point', (2, 6), {
'is_established': False,
'is_won': True,
'is_lost': False,
'point_value': 8,
})
def test_simple_point_lost():
event_gen = event_gen_from_str("4416")
ev = list(event_gen)[1]
assert_dice_event(ev, 'point', (1, 6), {
'is_established': False,
'is_won': False,
'is_lost': True,
'point_value': 8,
})
def test_simple_comment_1():
event_gen = event_gen_from_str("1#1\n2")
ev = list(event_gen)[0]
assert_dice_event(ev, 'craps', (1, 2), {})
def test_simple_comment_2():
event_gen = event_gen_from_str("1# 1\n2")
ev = list(event_gen)[0]
assert_dice_event(ev, 'craps', (1, 2), {})
def test_simple_comment_3():
event_gen = event_gen_from_str("1 # \n2")
ev = list(event_gen)[0]
assert_dice_event(ev, 'craps', (1, 2), {})
def test_simple_empty_line():
event_gen = event_gen_from_str("\n\n1\n2")
ev = list(event_gen)[0]
assert_dice_event(ev, 'craps', (1, 2), {})
def test_simple_bad_die_1():
event_gen = event_gen_from_str("71")
with pytest.raises(ValueError) as ex_info:
list(event_gen)[0]
assert 'ImpossibleDieValueError' in str(ex_info)
def test_simple_bad_die_2():
event_gen = event_gen_from_str(".1")
with pytest.raises(ValueError) as ex_info:
list(event_gen)[0]
assert 'ImpossibleDieValueError' in str(ex_info)
def test_simple_no_events_1():
event_gen = event_gen_from_str("")
assert not len(list(event_gen))
def test_simple_no_events_2():
event_gen = event_gen_from_str(" # ")
assert not len(list(event_gen))
def test_simple_no_events_3():
event_gen = event_gen_from_str(" \n # 12 ")
assert not len(list(event_gen))
def test_odd_num_dice():
event_gen = event_gen_from_str("666")
try:
list(event_gen)
except rs.IncompleteRollSeriesError:
pass
else:
assert_unreached()
def test_impossible_die_value_1():
event_gen = event_gen_from_str("7")
try:
list(event_gen)
except rs.ImpossibleDieValueError:
pass
else:
assert_unreached()
def test_impossible_die_value_2():
event_gen = event_gen_from_str("0")
try:
list(event_gen)
except rs.ImpossibleDieValueError:
pass
else:
assert_unreached()
def test_impossible_die_value_nonint():
event_gen = event_gen_from_str("q")
try:
list(event_gen)
except rs.ImpossibleDieValueError:
pass
else:
assert_unreached()
def test_stream_identity():
# An event string with all types of events, and all possible flags set on
# point events
ev_str = "11343366242616"
out_fd = io.StringIO()
expected_events = [_ for _ in event_gen_from_str(ev_str)]
rs.do_stream(out_fd, event_gen_from_str(ev_str))
actual_events = []
for line in filter(None, out_fd.getvalue().split('\n')):
actual_events.append(RollEvent.from_dict(json.loads(line)))
for a, b in zip(expected_events, actual_events):
assert a == b
| [
"sirmatt@ksu.edu"
] | sirmatt@ksu.edu |
b26068fd15185f4dda1e070c55faa0ecf66f918b | f662bd04d2f29ef25bbfd7e768b1e57dfbba4d9f | /apps/plmejoras/migrations/0009_auto_20190319_1051.py | 1d2aae719f33271b26db21ea1dd3a842c8776db7 | [] | no_license | DARKDEYMON/sisevadoc | f59b193688f7eca7c140a03ee414f5d20ada78c7 | 9fc0943200986824a2aab2134fdba5c9f3315798 | refs/heads/master | 2020-03-19T03:27:07.907125 | 2019-12-11T13:30:43 | 2019-12-11T13:30:43 | 135,729,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | # Generated by Django 2.0.8 on 2019-03-19 14:51
import apps.plmejoras.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plmejoras', '0008_auto_20190319_1038'),
]
operations = [
migrations.AlterField(
model_name='plan_mejoras',
name='fecha_termino_2',
field=models.DateField(blank=True, null=True, validators=[apps.plmejoras.models.validate_fecha_minima], verbose_name='2.- Fecha limite de cumplimiento a la debilidad: ¿El docente presenta el Plan de asignatura a los estudiantes al inicio de la actividad académica?.(AD-EE-ED)'),
),
]
| [
"darkdeymon04@gmail.com"
] | darkdeymon04@gmail.com |
9dff2c0207a76825d306d6ccde94cbdddb6c3046 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03110/s120799390.py | a85ae752db2895890a6eabb42cd65e8035579610 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | C = 380000.0
ans = 0.0
n = int(input())
for i in range(n):
x, u = input().split()
if u == 'JPY':
ans += int(x)
else:
ans += float(x) * C
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b13da1978e5f3532f9ba976ca7ef9fb82ee3fbd7 | 0f07107b016d2aee64788966b9f0d322ac46b998 | /moya/contextenum.py | 0f14ecc4d22ab2904fcde4c81bb3667ff64ab8a2 | [
"MIT"
] | permissive | fkztw/moya | 35f48cdc5d5723b04c671947099b0b1af1c7cc7a | 78b91d87b4519f91dfdd2b40dab44e72f201a843 | refs/heads/master | 2023-08-09T09:20:21.968908 | 2019-02-03T18:18:54 | 2019-02-03T18:18:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,288 | py | from __future__ import unicode_literals
from __future__ import print_function
from .compat import implements_to_string, string_types, text_type
@implements_to_string
class ContextEnumValue(object):
"""A single value in an enumeration"""
def __init__(self, enum, enum_id, name, description, group=None):
self.enum = enum
self.id = enum_id
self.name = name
self.description = description
self.group = group
def __repr__(self):
return "<enumvalue {}.{} ({})>".format(self.enum.name, self.name, self.id)
def __hash__(self):
return hash((self.id, self.name))
def __int__(self):
return self.id
def __str__(self):
return self.name
def __moyadbobject__(self):
return self.id
def __moyaconsole__(self, console):
console(
"<enumvalue '{}.{}' ({}))>".format(self.enum.name, self.name, self.id),
bold=True,
fg="magenta",
).nl()
def __eq__(self, other):
# Other enum values only compare if they are the same type
if isinstance(other, ContextEnumValue):
return self.enum == other.enum and self.id == other.id
if isinstance(other, string_types):
return self.name == other
try:
return self.id == int(other)
except ValueError:
pass
return False
class ContextEnum(object):
def __init__(self, name, start=1):
self.name = name
self._values = []
self._label_map = {}
self._id_map = {}
self._last_id = start - 1
def __repr__(self):
return '<enum "{}">'.format(self.name)
def __moyaconsole__(self, console):
console.text(repr(self), fg="green", bold=True)
table = []
for value in sorted(self._values, key=int):
table.append([value.name, value.id, value.description or ""])
console.table(table, header_row=("name", "id", "description"))
def __eq__(self, other):
if isinstance(other, ContextEnum):
return self.name == other.name
return False
def add_value(self, name, enum_id=None, description=None, group=None):
if enum_id is None:
enum_id = self._last_id + 1
value = ContextEnumValue(self, enum_id, name, description, group=group)
self._values.append(value)
self._label_map[value.name] = value
self._id_map[value.id] = value
self._last_id = enum_id
return value
def __getitem__(self, key):
enum_value = None
if isinstance(key, string_types):
enum_value = self._label_map[key]
else:
try:
enum_id = int(key)
except:
pass
else:
enum_value = self._id_map[enum_id]
if enum_value is None:
raise KeyError("no enum value {!r} in {!r}".format(key, self))
return enum_value
def __contains__(self, key):
try:
self[key]
except:
return False
else:
return True
def __iter__(self):
return iter(self._values[:])
@property
def choices(self):
return [(e.name, e.description or e.name) for e in self]
@property
def intchoices(self):
return [(e.id, e.description or e.name) for e in self]
def keys(self):
return [int(value) for value in self._values] + [
text_type(value) for value in self._values
]
def values(self):
return [self[key] for key in self.keys()]
def items(self):
return [(key, self[key]) for key in self.keys()]
if __name__ == "__main__":
enum = ContextEnum("moya.admin#enum.hobbits")
enum.add_value("bilbo", description="Bilbo Baggins")
enum.add_value("sam", description="Sam")
enum.add_value("isembard", description="Isembard Took")
from moya.console import Console
console = Console()
console.obj(context, enum)
e = enum["sam"]
console.obj(context, e)
print(e)
print(int(e))
print(text_type(e))
print(enum.values())
print(list(enum))
print(e == 2)
print(e == "sam")
print(e == "bilbo")
print(e == 3)
print(list(enum))
| [
"willmcgugan@gmail.com"
] | willmcgugan@gmail.com |
88aace3339a55db26e14395d480d4661d88f9640 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/containerregistry/azure-containerregistry/tests/asynctestcase.py | 58cee32899c2a5290b294f38c920311a56ffe5ef | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 1,649 | py | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure.containerregistry.aio import (
# ContainerRepository,
ContainerRegistryClient,
)
from azure.core.credentials import AccessToken
from azure.identity.aio import DefaultAzureCredential
from testcase import ContainerRegistryTestClass
class AsyncFakeTokenCredential(object):
"""Protocol for classes able to provide OAuth tokens.
:param str scopes: Lets you specify the type of access needed.
"""
def __init__(self):
self.token = AccessToken("YOU SHALL NOT PASS", 0)
async def get_token(self, *args):
return self.token
class AsyncContainerRegistryTestClass(ContainerRegistryTestClass):
def __init__(self, method_name):
super(AsyncContainerRegistryTestClass, self).__init__(method_name)
def get_credential(self):
if self.is_live:
return DefaultAzureCredential()
return AsyncFakeTokenCredential()
def create_registry_client(self, endpoint, **kwargs):
return ContainerRegistryClient(
endpoint=endpoint,
credential=self.get_credential(),
**kwargs,
)
def create_container_repository(self, endpoint, name, **kwargs):
return ContainerRepository(
endpoint=endpoint,
name=name,
credential=self.get_credential(),
**kwargs,
)
def create_anon_client(self, endpoint, **kwargs):
return ContainerRegistryClient(endpoint=endpoint, credential=None, **kwargs)
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
52f14de23e662ba3cdfb7f3070411e54602e9706 | defbefa93deb77311ff5f589372a5fafef05d7a3 | /app/simulations/run_single_sim.py | 4729aa545ff64c7580e72ba514a6c5a6ad5e2701 | [] | no_license | mccarvik/poker | 3eeb75cedee2962e33006853c76a852b83cb2ffa | 766be6582cb3b1743995555aa8239c1980178a46 | refs/heads/master | 2021-01-25T04:15:33.306903 | 2017-10-24T02:22:56 | 2017-10-24T02:22:56 | 93,417,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | import sys, json, pdb
sys.path.append("/home/ubuntu/workspace/poker")
from app.deck_utils.card import Card
from app.deck_utils.deck import Deck
from app.deck_utils.hand_rules import HandRules
from app.deck_utils.deck_funcs import getCombinations
from app.deck_utils.stats import Single_Stats
def run_simulation(hand, board):
''' Will simulate the outcome for a given hand'''
hand = [Card(h[0],h[1]) for h in hand]
board = [Card(h[0],h[1]) for h in board]
deck = Deck()
deck.removeCards(hand + board)
hand_combs = getCombinations(deck, 7-len(hand+board))
stats = Single_Stats()
for hc in hand_combs:
# print([hc[0]] + hand + board)
hr = HandRules(list(hc) + hand + board)
stats.addOutCome(hr._result)
# print(stats.returnStats())
stats.printStats()
if __name__ == "__main__":
hand = [('Q','h'), ('K', 'h')]
# board = [('4','s'), ('3', 's'), ('2', 's'), ('5', 's')]
board = [('4','s'), ('3', 's')]
run_simulation(hand, board) | [
"mccarviks@gmail.com"
] | mccarviks@gmail.com |
d7c6a4d01b1fff2af18d2e4f3c5c3004f8f30c38 | d067ba62a1956e1abb571368b02e9158c04966b6 | /xmnlp/sentiment/sentiment.py | 3f3a2b8ce576dc8f09fa27eb16e03e5cb09b1969 | [
"MIT"
] | permissive | wheniseeyou/xmnlp | 29c5ea38ffcb91fa99821788f90f1afc8d8c30bd | ad2d3c0b8875cf415c3adffc10926605da7a458b | refs/heads/master | 2020-04-12T15:17:40.957168 | 2018-09-17T08:19:09 | 2018-09-17T08:19:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,627 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# -------------------------------------------#
# author: sean lee #
# email: xmlee97@gmail.com #
#--------------------------------------------#
"""MIT License
Copyright (c) 2018 Sean
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
import sys
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf8')
import io
import os
from ..module import Module
from ..utils import safe_input
from ..postag import seg
from math import log, exp
from collections import defaultdict
class NBayes(Module):
__notsave__ = []
__onlysave__ = ['counter', 'corpus', 'total']
def __init__(self):
self.corpus = {}
self.counter = {}
self.total = 0
def process_data(self, data):
for d in data:
label = d[0]
doc = d[1]
if label not in self.corpus:
self.corpus[label] = defaultdict(int)
self.counter[label] = 0
for word in doc:
self.counter[label] += 1
self.corpus[label][word] += 1
self.total = sum(self.counter.values())
def calc_score(self, sent):
tmp = {}
for k in self.corpus:
tmp[k] = log(self.counter[k]) - log(self.total)
for word in sent:
x = float(self.corpus[k].get(word, 1)) / self.counter[k]
tmp[k] += log(x)
ret, prob = 0, 0
for k in self.corpus:
curr = 0
try:
for kk in self.corpus:
curr += exp(tmp[kk] - tmp[k])
curr = 1.0 / curr
except OverflowError:
curr = 0.0
if curr > prob:
ret, prob = k, curr
return (ret, prob)
class Sentiment(NBayes):
def filter_stopword(self, words, stopword=[]):
if len(stopword) == 0:
return words
ret = []
for word in words:
if word not in stopword:
ret.append(word)
return ret
def load_data(self, posfname, negfname):
def get_file(path):
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
if len(dirs) == 0:
for f in files:
yield os.sep.join([root, f])
else:
yield path
pos_docs = []
neg_docs = []
for fname in get_file(posfname):
with io.open(fname, 'r', encoding='utf-8') as f:
for line in f:
line = safe_input(line)
pos_docs.append(seg(line))
for fname in get_file(negfname):
with io.open(fname, 'r', encoding='utf-8') as f:
for line in f:
line = safe_input(line)
neg_docs.append(seg(line))
return pos_docs, neg_docs
def train(self, posfname, negfname, stopword=[]):
pos_docs, neg_docs = self.load_data(posfname, negfname)
data = []
for sent in neg_docs:
data.append(('neg', self.filter_stopword(sent, stopword=stopword)))
for sent in pos_docs:
data.append(('pos', self.filter_stopword(sent, stopword=stopword)))
self.process_data(data)
def predict(self, doc, stopword=[]):
sent = seg(doc)
ret, prob = self.calc_score(self.filter_stopword(sent, stopword=stopword))
if ret == 'pos':
return prob
return 1 - prob | [
"mitree@sina.com"
] | mitree@sina.com |
061cf308d22ba1f4f327b5a4a9ae3122f55e8d55 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04044/s186322243.py | 1a114e7a5f53247105fcd6cba05f286158a77b09 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | def compare_strings(s1, s2):
if s1 < s2:
return True
return False
n, l = map(int, input().split())
strings = [input() for i in range(n)]
print(''.join(sorted(strings)))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4135f9e1b309b08a79e2a7c5fa6da2848b3d26d1 | 18b3ad3b0e1f7f10969738251e1201d01dfbc6bf | /backup_files/pythonsessions/insert_cmd_data_db.py | 3e87aafa8f2be8768caed764e5532208e37bea80 | [] | no_license | sahthi/backup2 | 11d509b980e731c73733b1399a8143780779e75a | 16bed38f0867fd7c766c2a008c8d43b0660f0cb0 | refs/heads/master | 2020-03-21T12:39:56.890129 | 2018-07-09T08:12:46 | 2018-07-09T08:12:46 | 138,565,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | import MySQLdb, subprocess, re, sys, time
def get_date():
return time.ctime()
def get_ut_fa():
op = subprocess.Popen(['uptime'], stdout=subprocess.PIPE)
#print op.communicate()
u_out, u_err = op.communicate()
if u_err:
print "Error in command execution"
sys.exit(100)
#17:48:12 up 31 min, 1 user, load average: 0.13, 0.19, 0.25
mat = re.search(r"up\s(.+?),.+load\saverage:\s(.+?),", u_out)
utime = mat.group(1)
fm_avg = mat.group(2)
return utime, fm_avg
def insert_to_db():
curr_date = get_date()
utime, fm_avg = get_ut_fa()
conn = MySQLdb.connect(user="root", passwd="root", db="students")
c = conn.cursor()
c.execute('insert into system_info (utime, fma) values(%s, %s)', (utime, fm_avg))
conn.commit()
conn.close()
if __name__ == "__main__":
insert_to_db()
| [
"siddamsetty.sahithi@votarytech.com"
] | siddamsetty.sahithi@votarytech.com |
3256bc9d3399f7c5f9d6ac83a1eed907f6c9ba53 | fc0683e4b9b92b02f2bac73b5a1f9e9e6dac28bf | /Aula22 – Módulos e Pacotes/ex110 – Reduzindo ainda mais seu programa/teste.py | 618b116202551588fab65daa0b7e5c834a21a5a4 | [] | no_license | igorkoury/cev-phyton-exercicios-parte-2 | d31ab83f6f8fbe07ac31974e1a01c338280e6c6c | 1471d8d0c9a98194c3f36e562b68731e95b22882 | refs/heads/main | 2023-08-06T08:46:38.970229 | 2021-10-07T21:28:03 | 2021-10-07T21:28:03 | 414,762,200 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | '''Exercício Python 110: Adicione o módulo moeda.py criado nos
desafios anteriores, uma função chamada resumo(), que mostre
na tela algumas informações geradas pelas funções que já temos
no módulo criado até aqui.'''
import moeda
p = float(input('Digite o preço do produto: '))
moeda.resumo(p, 15, 20)
| [
"noreply@github.com"
] | igorkoury.noreply@github.com |
fa690dcd18e07099417f6e7ae4a66721e71f518e | 6219e6536774e8eeb4cadc4a84f6f2bea376c1b0 | /scraper/storage_spiders/giavuvn.py | 7b3674fc44e8c1d0fd830d4f106dfeb8301a5bfa | [
"MIT"
] | permissive | nguyenminhthai/choinho | 109d354b410b92784a9737f020894d073bea1534 | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | refs/heads/master | 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1",
'price' : "//div[@id='dpro']/div[@class='pdl']/b",
'category' : "//div[@class='dlink']/a",
'description' : "//div[@id='dpro']/div[@id='pdt1']/div[2]",
'images' : "//a[@class='imgdt']/img[@class='m']/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'giavu.vn'
allowed_domains = ['giavu.vn']
start_urls = ['http://giavu.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/\d+/\d+/[a-zA-Z0-9-]+\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['[^\d]/\d+/[a-zA-Z0-9-]+(/page-\d+)?\.html$']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| [
"nguyenchungthuy.hust@gmail.com"
] | nguyenchungthuy.hust@gmail.com |
37d544cc17f03c6d834b31fecd5185366d176571 | 9adc810b07f7172a7d0341f0b38088b4f5829cf4 | /experiments/ashvin/vae/fixed3/sawyer_pusher/vae_dense_wider2.py | 337be50432ea280d5a02a5728475f17aac45b9f0 | [
"MIT"
] | permissive | Asap7772/railrl_evalsawyer | 7ee9358b5277b9ddf2468f0c6d28beb92a5a0879 | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | refs/heads/main | 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,643 | py | from rlkit.envs.mujoco.sawyer_push_and_reach_env import \
SawyerPushAndReachXYEnv, SawyerPushAndReachXYEasyEnv
from rlkit.envs.mujoco.sawyer_push_env import SawyerPushXYEnv
from rlkit.envs.multitask.point2d import MultitaskImagePoint2DEnv
from rlkit.envs.multitask.pusher2d import FullPusher2DEnv
from rlkit.images.camera import sawyer_init_camera, \
sawyer_init_camera_zoomed_in
from rlkit.launchers.arglauncher import run_variants
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.vae.relabeled_vae_experiment import experiment
if __name__ == "__main__":
vae_paths = {
"4": "ashvin/vae/fixed3/sawyer-pusher/train-vae-wider/run5/id0/itr_480.pkl",
"16": "ashvin/vae/fixed3/sawyer-pusher/train-vae-wider/run5/id1/itr_480.pkl",
}
variant = dict(
algo_kwargs=dict(
num_epochs=205,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
tau=1e-2,
batch_size=128,
max_path_length=100,
discount=0.99,
# qf_learning_rate=1e-3,
# policy_learning_rate=1e-4,
),
env_kwargs=dict(
hide_goal=True,
# reward_info=dict(
# type="shaped",
# ),
),
replay_kwargs=dict(
fraction_goals_are_rollout_goals=0.2,
fraction_goals_are_env_goals=0.5,
),
vae_wrapped_env_kwargs=dict(
sample_from_true_prior=False,
),
algorithm='HER-TD3',
normalize=False,
rdim=4,
render=False,
env=SawyerPushAndReachXYEasyEnv,
use_env_goals=True,
vae_paths=vae_paths,
wrap_mujoco_env=True,
do_state_based_exp=False,
exploration_noise=0.1,
init_camera=sawyer_init_camera_zoomed_in,
)
n_seeds = 3
search_space = {
'exploration_type': [
'ou',
],
'algo_kwargs.num_updates_per_env_step': [1, 4],
'replay_kwargs.fraction_goals_are_env_goals': [0.0, 0.5, ],
'replay_kwargs.fraction_goals_are_rollout_goals': [0.2, ],
'vae_wrapped_env_kwargs.sample_from_true_prior': [False],
'exploration_noise': [0.2, 0.5],
'algo_kwargs.reward_scale': [1e-4],
'training_mode': ['train'],
'testing_mode': ['test', ],
'rdim': [4],
'seedid': range(n_seeds),
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
run_variants(experiment, sweeper.iterate_hyperparameters(), run_id=1)
| [
"alexanderkhazatsky@gmail.com"
] | alexanderkhazatsky@gmail.com |
84750e0763448e6efd803ff5e0ca28ddb74b0283 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03156/s145919432.py | a3259a2a3c984937831664e6a8145ee9347c7bb2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | n=int(input())
a,b=map(int,input().split())
p = [int(x) for x in input().split()]
A=0
B=0
C=0
for i in range(n):
if p[i] <= a:
A += 1
elif p[i] > a and b >= p[i]:
B += 1
else:
C += 1
print(min(A, B, C)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
06f9d8d71242fcd67622b93693247ed498383e02 | 4d107a97633559963f6510767bb9297febbcbb02 | /applications/MeshingApplication/tests/SmallTests.py | b2e4d1dcd414f04b10b8fc8807739ca9b65b027c | [] | no_license | asroy/Kratos | 45dc4a9ad77a2b203ab2e0c6c5fe030633433181 | e89d6808670d4d645319c7678da548b37825abe3 | refs/heads/master | 2021-03-24T13:28:43.618915 | 2017-12-19T15:38:20 | 2017-12-19T15:38:20 | 102,793,791 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,836 | py | import os
# Import Kratos
from KratosMultiphysics import *
# Import KratosUnittest
import KratosMultiphysics.KratosUnittest as KratosUnittest
import Kratos_Execute_Meshing_Test as Execute_Test
# This utiltiy will control the execution scope in case we need to acces files or we depend
# on specific relative locations of the files.
# TODO: Should we move this to KratosUnittest?
class controlledExecutionScope:
def __init__(self, scope):
self.currentPath = os.getcwd()
self.scope = scope
def __enter__(self):
os.chdir(self.scope)
def __exit__(self, type, value, traceback):
os.chdir(self.currentPath)
class MeshingTestFactory(KratosUnittest.TestCase):
def setUp(self):
# Within this location context:
with controlledExecutionScope(os.path.dirname(os.path.realpath(__file__))):
# Initialize GiD I/O
parameter_file = open(self.file_name + "_parameters.json", 'r')
ProjectParameters = Parameters(parameter_file.read())
# Creating the model part
self.test = Execute_Test.Kratos_Execute_Test(ProjectParameters)
def test_execution(self):
# Within this location context:
with controlledExecutionScope(os.path.dirname(os.path.realpath(__file__))):
self.test.Solve()
def tearDown(self):
pass
class TwoDDynamicBeamTest(MeshingTestFactory):
file_name = "mmg_lagrangian_test/beam2D_test"
class TwoDDynamicBeamLineLoadTest(MeshingTestFactory):
file_name = "mmg_lagrangian_test/beam2D_line_load_test"
class ThreeDDynamicBeamTest(MeshingTestFactory):
file_name = "mmg_lagrangian_test/beam3D_test"
class TwoDDynamicPlasticBeamTest(MeshingTestFactory):
file_name = "mmg_lagrangian_test/beam2D_internal_variables_interpolation_test"
| [
"vmataix@cimne.upc.edu"
] | vmataix@cimne.upc.edu |
6a5f28ab4b6e958f2922790f0f74673bed384643 | 90c5c9df3d0639e1f8420b592a77df752ab79746 | /tests/migrations/0003_auto_20191220_0911.py | 52222805c9f0df3dc9d219975835dd1fb27cd2e2 | [
"BSD-3-Clause"
] | permissive | intellineers/django-bridger | 4a16e5e63c697671740d965c3fcab02a89b8b27f | ed097984a99df7da40a4d01bd00c56e3c6083056 | refs/heads/master | 2023-08-14T05:41:38.003086 | 2021-09-06T16:51:56 | 2021-09-06T16:51:56 | 221,709,929 | 2 | 1 | BSD-3-Clause | 2023-07-22T21:39:45 | 2019-11-14T13:57:09 | Python | UTF-8 | Python | false | false | 543 | py | # Generated by Django 2.2.9 on 2019-12-20 09:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("tests", "0002_relatedmodeltest"),
]
operations = [
migrations.AddField(
model_name="modeltest", name="float_field", field=models.FloatField(default=1), preserve_default=False,
),
migrations.AddField(
model_name="modeltest", name="percent_field", field=models.FloatField(default=1), preserve_default=False,
),
]
| [
"c.wittlinger@intellineers.com"
] | c.wittlinger@intellineers.com |
9847a45cacd0a7625f93aa8595f9afa5b8a5c16d | cc622e69e45db2e3e3172fcf8598ba7d2b64d5d8 | /taxcalc/_version.py | 56b977ec9aec1b043dce330de18274a2afde9280 | [
"MIT"
] | permissive | SherwinLott/Tax-Calculator | 533c7e381ab5e56bccc03821fe3a4a30a6e6f753 | b5276854031225c004eb795ad5b85f76fe94172d | refs/heads/master | 2020-04-01T23:23:16.691655 | 2015-04-17T14:43:05 | 2015-04-17T14:43:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,420 | py |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.12 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = ""
parentdir_prefix = "taxcalc-"
versionfile_source = "taxcalc/_version.py"
import os, sys, re, subprocess, errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames, "full": git_full }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
| [
"tj.alumbaugh@continuum.io"
] | tj.alumbaugh@continuum.io |
370daefbe01246a1903960925230387d185d47cd | 47a6f4e5f92413fda00ea31a2ac3894749dd76d5 | /cvx/op/postprocessing.py | aff84211b0f16e12cfd22d4370114c5fd9c5a2fb | [] | no_license | jtuyls/cvx | 3e569e73cc3a77226014c2921c2ad752a340931e | ede40c895a48103312aabdecc4ffbc8f97c711c8 | refs/heads/master | 2020-08-05T05:51:07.297147 | 2020-03-17T18:17:19 | 2020-03-17T18:17:19 | 212,419,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | """
CV postprocessing operations
Authors: Jorn Tuyls
"""
import cv2
import numpy as np
from .tools import softmax
def central_crop(height, width, channels):
# type: (str/int, str/int, str/int) -> Function
"""
Return a wrapper function that takes in an image and centrally crops
an image of provided height, width and channels
"""
height, width, channels = int(height), int(width), int(channels)
def _central_crop(img):
# !! img should be in HWC layout
img_h, img_w, img_c = img.shape
if height > img_h:
raise ValueError("Provided crop height is larger than provided"\
" image height.")
if width > img_w:
raise ValueError("Provided crop width is larger than provided"\
" image width.")
if channels > img_c:
raise ValueError("Provided crop channels value is larger than"
" provided image channels.")
start_h = int((img_h - height) / 2)
end_h = start_h + height
start_w = int((img_w - width) / 2)
end_w = start_w + width
start_c = int((img_c - channels) / 2)
end_c = start_c + channels
return img[start_h:end_h, start_w:end_w, start_c:end_c]
return _central_crop | [
"jornt@xilinx.com"
] | jornt@xilinx.com |
22774504cce4883aefe3fdae4ef4056acda15052 | b1d7cf329110f02b8175303ebd09475136e84b0e | /enderecos/migrations/0001_initial.py | 10bdeace7f9208ad618fce5bb3bc078303660d8a | [] | no_license | Aleleonel/projeto_rest | 9df70817f9955399afb75b02121aa9500c9492d1 | a72b4e3b17c22efdbd8001f843c21aa24e9e9ae6 | refs/heads/master | 2022-05-24T10:56:10.818898 | 2020-04-30T15:49:33 | 2020-04-30T15:49:33 | 260,252,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | # Generated by Django 3.0.5 on 2020-04-30 14:26
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Endereco',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('linha1', models.CharField(max_length=150)),
('linha2', models.CharField(blank=True, max_length=150, null=True)),
('cidade', models.CharField(max_length=100)),
('estado', models.CharField(max_length=50)),
('pais', models.CharField(max_length=70)),
('latitude', models.IntegerField(blank=True, null=True)),
('longitude', models.IntegerField(blank=True, null=True)),
],
),
]
| [
"aleleonel@gmail.com"
] | aleleonel@gmail.com |
1bc4f38343078af5d25b0a62599c3eece7efd669 | c08b96db4551a3cedbc091b9b19f668e8e58e53e | /tests/test_tasks_publishnb.py | 58e30fb2afe980157a20f04a8cc14c4c0ab3d33d | [
"MIT"
] | permissive | lsst-sqre/sqre-uservice-nbreport | efa1163cc58f7388742d0acfbf14a28150a2da59 | e5911ab1a1f2dfae46cdae0337138cbac786872b | refs/heads/master | 2020-03-23T23:12:05.391843 | 2018-08-21T22:20:11 | 2018-08-21T22:20:11 | 142,221,500 | 1 | 0 | null | 2018-08-21T22:27:53 | 2018-07-24T23:07:15 | Python | UTF-8 | Python | false | false | 1,089 | py | """Tests for the `uservice_nbreport.tasks.publishnb` module.
"""
import responses
from uservice_nbreport.tasks.publishnb import get_edition_url
@responses.activate
def test_get_edition_url():
responses.add(
responses.GET,
'https://keeper.lsst.codes/products/testr-000/editions/',
status=200,
json={
'editions': [
'https://keeper.lsst.codes/editions/119',
'https://keeper.lsst.codes/editions/120'
]
}
)
responses.add(
responses.GET,
'https://keeper.lsst.codes/editions/120',
status=200,
json={
'slug': 'test',
}
)
responses.add(
responses.GET,
'https://keeper.lsst.codes/editions/119',
status=200,
json={
'slug': '1'
}
)
edition_url = get_edition_url(
keeper_url='https://keeper.lsst.codes',
ltd_token='testtoken',
ltd_product='testr-000',
instance_id='1')
assert edition_url == 'https://keeper.lsst.codes/editions/119'
| [
"jsick@lsst.org"
] | jsick@lsst.org |
ef3af1aa439186701c7df404f7ab023da5a62fae | 741ee09b8b73187fab06ecc1f07f46a6ba77e85c | /AutonomousSourceCode/data/raw/sort/c8249785-8c09-49ae-a506-d5303e3f9b3c__sort_words.py | 995c6f3081573379333cfb90301cf89e7624a03d | [] | no_license | erickmiller/AutomatousSourceCode | fbe8c8fbf215430a87a8e80d0479eb9c8807accb | 44ee2fb9ac970acf7389e5da35b930d076f2c530 | refs/heads/master | 2021-05-24T01:12:53.154621 | 2020-11-20T23:50:11 | 2020-11-20T23:50:11 | 60,889,742 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # import re
# def sort_words(s):
# results = re.findall("[\w;]+", s)
# return "\n".join(map(str, sorted(results)))
# print sort_words(" one, ,two three,4,")
# ############## This works as well
def sort_words(s):
for i in sorted("\n".join(s.split(',')).split()):
print i
# print count_words(" one, ,two three,4,")
print sort_words(" one, ,two three,4,") | [
"erickmiller@gmail.com"
] | erickmiller@gmail.com |
63d28708c0e3a847179936dab12755af642dbfbe | 1f71d796efcddf51a46cf74f59584f76d56c664e | /venv/Scripts/easy_install-3.7-script.py | e6b080fdbdd6095b4de6bd52dc13c8cc3e704ce3 | [] | no_license | vunited/flask_studentManagement | 12021c7811af2cf95f04fcf635dd62bac0a5b5fa | 9ae15d0e9fd6d4b9111d4f3b3b90d52b4db8ab7a | refs/heads/master | 2020-11-29T11:26:47.109519 | 2019-12-27T14:03:19 | 2019-12-27T14:03:19 | 230,102,430 | 2 | 0 | null | 2019-12-27T14:07:08 | 2019-12-25T12:51:04 | Python | UTF-8 | Python | false | false | 474 | py | #!C:\Users\Administrator\Desktop\flask_studentManagement\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"you@example.com"
] | you@example.com |
9c7f3644b9c1ba9169318bd1f02e1f2aa12186d7 | ab8a34e5b821dde7b09abe37c838de046846484e | /twilio/sample-code-master/preview/sync/document_permission/delete-default/delete-default.6.x.py | dbe85db59955ddeaee70650345a046325e275ae2 | [] | no_license | sekharfly/twilio | 492b599fff62618437c87e05a6c201d6de94527a | a2847e4c79f9fbf5c53f25c8224deb11048fe94b | refs/heads/master | 2020-03-29T08:39:00.079997 | 2018-09-21T07:20:24 | 2018-09-21T07:20:24 | 149,721,431 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | # Download the helper library from https://www.twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
client.preview.sync.services('ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.documents('ETXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.document_permissions('identity') \
.delete()
| [
"sekharfly@gmail.com"
] | sekharfly@gmail.com |
79dc80302a4a44c8de34cbaed417dd4234182c32 | 0115a30d4d26932dfde5752b8533d886f182ebfa | /research/plot_data_heatmap.py | 477258764852cf477503227bf68d23351b27e03e | [] | no_license | mattbellis/Siena_College_Physics_2012_2013_Cosmology | 4d8c8282cc875a4b89fe470db7b0d77122262451 | fd05a64e0280cf3b1e7bd13f23eaee0bbe11c132 | refs/heads/master | 2020-04-06T06:38:48.367779 | 2013-10-02T05:53:31 | 2013-10-02T05:53:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,378 | py | ################################################################################
# This is a very fast way of reading in a text file, when
# you know how the data is formatted, e.g. how many columns
# there are.
#
# Depending on the size of the file, this still may take some time (~5-20 sec),
# but is still faster than other traditional ways of reading in files.
#
# The trade-off is that this method works best when you have a good amount of
# memory (RAM) available.
################################################################################
import numpy as np
# Pyplot is module for plotting in matplotlib library.
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# We need to give the full path to the directory. This will obviously be
# different on your machine, so you will want to edit this by hand.
#infile = open('/Users/Chris/Desktop/M_Bellis Research/astro_data/wechsler_gals_1M.cat')
infile = open('/home/bellis/Work/Astronomy/catalogs/Wechsler/wechsler_gals.cat')
# This command will take the entire file, split it into different values using
# whitespace (tab,space,end-of-line), and iterpret the entries as floats
# (as opposed to strings, characters, or integers).
content = np.array(infile.read().split()).astype('float')
# Now we have this *huge* array. We want to pull out the values we want.
# In this case, we know that the columns are RA, Dec, and z.
# First, how big is this array.
nentries = len(content)
# Next, how many galaxies are in this file?
ncolumns = 3
ngals = nentries/ncolumns
print "# galaxies: %d" % (ngals)
# Now we just need to make an array that has the index of each value we
# want to extract.
index = np.arange(0,nentries,ncolumns)
# So for three columns, this index array looks like
# [0,3,6,9,12,...,nentries-2]
# We can use this now to pull out the columns we want!
ra = content[index]
dec = content[index+1]
z = content[index+2]
# Let's make sure these arrays at least have the same size.
print "\nNumber of entries in coordinate arrays"
print "# ra coords: %d" % (len(ra))
print "# dec coords: %d" % (len(dec))
print "# z coords: %d" % (len(z))
# And just for the heck of it, we can dump the first 5 entries of each array.
print "\nFirst five entries in arrays."
print ra[0:5]
print dec[0:5]
print z[0:5]
print "\n"
# Choose 10k random pts from 1M range.
index = range(100000)
np.random.shuffle(index)
index=index[0:100000]
radius = z[index].copy()
theta = np.deg2rad(ra[index])
phi = np.deg2rad(dec[index])
#radius = z.copy()
#theta = np.deg2rad(ra)
#phi = np.deg2rad(dec)
# Does this free up memory for us?
#del ra
#del dec
#del z
x = radius*np.cos(theta)*np.cos(phi)
y = radius*np.sin(theta)*np.cos(phi)
z = radius*np.sin(phi)
# Plotting RA vs. Dec
fig = plt.figure()
#ax = plt.subplot(111,polar=True)
#ax = fig.add_axes([0.1, -0.75, 0.8, 1.6], projection='polar')
ax = fig.add_axes([0.1, -0.75, 0.8, 1.6])
# Heat map
heatmap, xedges, yedges = np.histogram2d(x, y, bins=200)
#heatmap, xedges, yedges = np.histogram2d(x, y, bins=100)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
heatmap = np.log(heatmap)
plt.clf()
plt.imshow(heatmap,extent=extent,cmap=plt.cm.winter)
plt.show()
# Draw plot
plt.show()
ax.set_title('RA v. Dec for slices of Z')
ax.xlabel('Right Ascension')
ax.ylabel('Declination')
# Save plot file
fig.savefig('Ra_v_Dec_100k.png')
| [
"matthew.bellis@gmail.com"
] | matthew.bellis@gmail.com |
f0d5850e364accfc24295ae3d9d98a0046bde1b2 | d3ce58c4576431df14de0990f45cfd574f0aa45f | /.history/riskCalculator/forms_20201020003540.py | 1ff165ad8f4a597786110d5dd3dd387a85489954 | [] | no_license | rahulsolankib/portfolio | fe93f0e6b0b28990f0b9fad84dbf7c3aa07243c4 | 281ed429e2590376aee4649b2ea7b3e8facaf6f1 | refs/heads/master | 2023-01-02T06:55:21.319094 | 2020-10-26T08:55:22 | 2020-10-26T08:55:22 | 305,586,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | from django import forms
from .models import Que,RiskModel
class QForm(forms.Form):
age_group=(
(4,'Less than 25 years')
,(3,'25-35 years')
,(2,'36-40 years')
,(1,'51 above')
)
age_group=(
(3,'more than 5 years')
,(2,'2-5 years')
,(1,'less than 2 years')
)
ques1 = forms.TypedChoiceField(label='Which age group do you belong?',choices=age_group, coerce=int, initial=4)
ques2 = forms.TypedChoiceField(label='When do you think you need your capital?',choices=age_group, coerce=int, initial=1)
ques3 = forms.CharField()
ques4 = forms.CharField()
ques5 = forms.CharField()
class QueForm(forms.ModelForm):
class Meta:
model = Que
fields = ['ques1','ques2','ques3','ques4','ques5']
class RiskForm(forms.ModelForm):
class Meta:
model = RiskModel
fields = ['userid','risk_score'] | [
"rahulsolankib@gmail.com"
] | rahulsolankib@gmail.com |
673a4ecf7c8354ffe8868fcaabe971c1ab0b0bed | 36c5770217c104bea5cc1e5e43a9faa803daccec | /2021/Day_10/test_day10.py | 509a257a78a2eee6ec1a3644be31a90002e9419e | [] | no_license | sco1/adventofcode | 3a2ac0905c04e5a42d409d27e71dc7c5b3cf33a4 | cb029bb825f35944f505f8c88346bd2504695821 | refs/heads/main | 2023-04-30T10:25:02.770042 | 2023-04-17T01:07:46 | 2023-04-17T18:11:35 | 160,292,002 | 0 | 1 | null | 2023-04-06T13:17:54 | 2018-12-04T03:37:54 | Python | UTF-8 | Python | false | false | 704 | py | from textwrap import dedent
from .aoc_2021_day10 import parse_subsystem_code, score_autocomplete
SAMPLE_INPUT = dedent(
"""\
[({(<(())[]>[[{[]{<()<>>
[(()[<>])]({[<{<<[]>>(
{([(<{}[<>[]}>{[]{[(<()>
(((({<>}<{<{<>}{[]{[]{}
[[<[([]))<([[{}[[()]]]
[{[{({}]{}}([{[{{{}}([]
{<[[]]>}<{[{[{[]{()[[[]
[<(<(<(<{}))><([]([]()
<{([([[(<>()){}]>(<<{{
<{([{{}}[<[[[<>{}]]]>[]]
"""
).splitlines()
def test_part_one() -> None:
syntax_score, _ = parse_subsystem_code(SAMPLE_INPUT)
assert syntax_score == 26397
def test_part_two() -> None:
_, incomplete_lines = parse_subsystem_code(SAMPLE_INPUT)
assert score_autocomplete(incomplete_lines) == 288957
| [
"sco1.git@gmail.com"
] | sco1.git@gmail.com |
afac01645fddae62858ab76b0947a5a0723f02f3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02675/s941998787.py | 41727de13518ee3d758d33916cfee0252c8a5388 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | N= int(input())
N1=N%10
if N1==2:
print("hon")
elif N1==4:
print("hon")
elif N1==5:
print("hon")
elif N1==7:
print("hon")
elif N1==9:
print("hon")
elif N1==0:
print("pon")
elif N1==1:
print("pon")
elif N1==6:
print("pon")
elif N1==8:
print("pon")
else:
print("bon")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
45dfdc4a46f7d22d0bcaa32665e633280c4e5cd3 | 18eee1dc9d6b3e97aa1bd99addb5401bad2a8647 | /apps/goods/filters.py | dde361f4a48c97e84ac6ef82523c0a202e91b7d6 | [
"Apache-2.0"
] | permissive | xxcfun/mxshop-api | 1a2b1e4c7e4ae86b47e27c16f5dde401a0ff4af0 | 1472ad0d959439ea80c1f8d8bfd3629c15d3017d | refs/heads/master | 2023-08-18T19:34:47.941932 | 2021-09-14T10:57:26 | 2021-09-14T10:57:26 | 380,106,131 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | import django_filters
from goods.models import Goods
from django.db.models import Q
class GoodsFilter(django_filters.rest_framework.FilterSet):
""" 商品过滤的类 """
# 参数解读:name是要过滤的字段,lookup是执行的行为
pricemin = django_filters.NumberFilter(field_name='shop_price', lookup_expr='gte')
pricemax = django_filters.NumberFilter(field_name='shop_price', lookup_expr='lte')
top_category = django_filters.NumberFilter(field_name='category', method='top_category_filter')
def top_category_filter(self, queryset, name, value):
# 不管当前点击的是一级分类二级分类还是三级分类,都能找到
return queryset.filter(Q(category_id=value) | Q(category__parent_category_id=value) | Q(category__parent_category__parent_category_id=value))
class Meta:
model = Goods
fields = ['pricemin', 'pricemax', 'is_hot', 'is_new']
| [
"55070348+hhdMrLion@users.noreply.github.com"
] | 55070348+hhdMrLion@users.noreply.github.com |
e250315934d3aab16f7dc6c05d0fe65f7ab19055 | 3670f2ca6f5609e14cce8c31cb1348052d0b6358 | /xacro/rqt_runtime_monitor/setup.py | 030f81f149fd6d14d091aef5c2627c152acf4f95 | [] | no_license | jincheng-ai/ros-melodic-python3-opencv4 | b0f4d3860ab7ae3d683ade8aa03e74341eff7fcf | 47c74188560c2274b8304647722d0c9763299a4b | refs/heads/main | 2023-05-28T17:37:34.345164 | 2021-06-17T09:59:25 | 2021-06-17T09:59:25 | 377,856,153 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['rqt_runtime_monitor'],
package_dir={'': 'src'}
)
setup(**d)
| [
"shuyuanhao@cetiti.com"
] | shuyuanhao@cetiti.com |
0952c45060b73fc80ede7f00cc3160109fa1c450 | 073c2fd73875ce4e7d061623b8403f8d77c45d92 | /cohesity_management_sdk/models/restore_app_object.py | 9fdd52cdbb80907188731b0c0d483c183d9e339a | [
"Apache-2.0"
] | permissive | naveena-maplelabs/management-sdk-python | b11441b2edccc5a1262785bd559ad4b3ea984c3b | 06ce4119d955dc08cdbc5109c935afcfcd9d65ab | refs/heads/master | 2021-05-20T10:52:12.776816 | 2020-03-10T03:28:08 | 2020-03-10T03:28:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,477 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Cohesity Inc.
import cohesity_management_sdk.models.entity_proto
import cohesity_management_sdk.models.restore_app_object_params
class RestoreAppObject(object):
"""Implementation of the 'RestoreAppObject' model.
Message that captures information about an application object being
restored.
Attributes:
app_entity (EntityProto): Specifies the attributes and the latest
statistics about an entity.
display_name (string): The proper display name of this object in the
UI, if app_entity is not empty. For example, for SQL databases the
name should also include the instance name.
restore_params (RestoreAppObjectParams): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"app_entity":'appEntity',
"display_name":'displayName',
"restore_params":'restoreParams'
}
def __init__(self,
app_entity=None,
display_name=None,
restore_params=None):
"""Constructor for the RestoreAppObject class"""
# Initialize members of the class
self.app_entity = app_entity
self.display_name = display_name
self.restore_params = restore_params
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
app_entity = cohesity_management_sdk.models.entity_proto.EntityProto.from_dictionary(dictionary.get('appEntity')) if dictionary.get('appEntity') else None
display_name = dictionary.get('displayName')
restore_params = cohesity_management_sdk.models.restore_app_object_params.RestoreAppObjectParams.from_dictionary(dictionary.get('restoreParams')) if dictionary.get('restoreParams') else None
# Return an object of this model
return cls(app_entity,
display_name,
restore_params)
| [
"ashish@cohesity.com"
] | ashish@cohesity.com |
aa2759ad7133838c170a215aae51575e1f6c5d36 | 603d37a05bada0fae1d468cc36d80d6b9d10ac09 | /randlov1998/balance_lspi.py | a10bc0a144ac92a2ccb1acd8ac405fa24c51b22f | [
"MIT"
] | permissive | eejd/agent-bicycle | 8b8b5162177e21f27889ca0b89348000c1f724d8 | 1ecc3fcad8504385e9e85ccbc539464cb4e6c4e6 | refs/heads/master | 2020-12-31T06:23:01.487407 | 2013-12-11T04:48:16 | 2013-12-11T04:48:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | from pybrain.rl.agents.linearfa import LinearFA_Agent
from pybrain.rl.learners.valuebased.linearfa import LSPI
from pybrain.rl.experiments import EpisodicExperiment
from environment import Environment
from tasks import LSPIBalanceTask
from training import LinearFATraining
task = LSPIBalanceTask()
learner = LSPI(task.nactions, task.outdim)
# TODO this LSPI does not have eligibility traces.
#learner._lambda = 0.95
task.discount = learner.rewardDiscount
agent = LinearFA_Agent(learner)
# The state has a huge number of dimensions, and the logging causes me to run
# out of memory. We needn't log, since learning is done online.
agent.logging = False
performance_agent = LinearFA_Agent(learner)
performance_agent.logging = False
performance_agent.greedy = True
performance_agent.learning = False
experiment = EpisodicExperiment(task, agent)
# TODO PyBrain says that the learning rate needs to decay, but I don't see that
# described in Randlov's paper.
# A higher number here means the learning rate decays slower.
learner.learningRateDecay = 100000
# NOTE increasing this number above from the default of 100 is what got the
# learning to actually happen, and fixed the bug/issue where the performance
# agent's performance stopped improving.
tr = LinearFATraining('balance_lspi', experiment,
performance_agent, verbose=True)
tr.train(55000, performance_interval=10, n_performance_episodes=5)
| [
"cld72@cornell.edu"
] | cld72@cornell.edu |
7e48a295782fb5d9b146dadab137c5711928f165 | 76adbcc676882343e166485f42c4e8fc38b851f8 | /constants/ad.py | defc860fdf9ebbb5d315cfee86c06adb7a30e0bf | [
"MIT"
] | permissive | adWharf/core | 5856b123fccabfc812707a605270015ed0750304 | f7e04db8b9635f0adf67d9f7488ae64f291a564c | refs/heads/master | 2020-03-18T05:35:01.017239 | 2018-06-07T03:52:29 | 2018-06-07T03:52:29 | 134,350,070 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: william
@contact: 1342247033@qq.com
@site: http://www.xiaolewei.com
@file: ad_status.py
@time: 12/04/2018 17:10
"""
from enum import Enum
ADSTATUS_UNKNOW = -1 # 未知
ADSTATUS_NORMAL = 0 # 正常
ADSTATUS_PENDING = 1 # 待审核
ADSTATUS_DENIED = 2 # 审核不通过
ADSTATUS_FROZEN = 3 # 冻结
ADSTATUS_SUSPEND = 4 # 挂起
ADSTATUS_PREPARE = 5 # 准备状态
ADSTATUS_DELETED = 6 # 删除
AD_BID_TYPE = Enum('AD_BID_TYPE', ('CPM', 'OCPM'))
AD_BID_TYPE_OCPM_OPT_MORE_CLICK = 2
AD_BID_TYPE_OCPM_OPT_MORE_ORDER = 7
| [
"1342247033@qq.com"
] | 1342247033@qq.com |
c046458b9836688b0409b199f115260b9bf29216 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_117/1559.py | daf42e84e31ecc0e6ae24ab62ef2ed6872425d11 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,439 | py | #f = open('B-small-attempt0.in')
f = open('B-large.in')
#f = open('test.in')
count = int(f.readline())
output = ''
def check():
global matrix,rowCount,columnCount
currentRow = 0
currentMin = 100
for i in range(0,rowCount):
tempMin = min(matrix[i])
if tempMin < currentMin:
currentMin = tempMin
currentRow = i
minIndex = matrix[currentRow].index(currentMin)
if matrix[currentRow].count(currentMin) == len(matrix[currentRow]):
del matrix[currentRow]
rowCount -= 1
if rowCount == 0:
return True
return check()
else:
for j in range(0,rowCount):
if matrix[j][minIndex] != currentMin:
return False
del matrix[j][minIndex]
columnCount -= 1
if columnCount == 0:
return True
return check()
for i in range(0,count):
rowAndColumn = f.readline().split()
rowCount = int(rowAndColumn[0])
columnCount = int(rowAndColumn[1])
matrix = [[]] * rowCount
for j in range(0,rowCount):
matrix[j] = f.readline().split()
for k in range(0,len(matrix[j])):
matrix[j][k] = int(matrix[j][k])
if check():
output += 'Case #' + str(i+1) + ': YES\n'
else:
output += 'Case #' + str(i+1) + ': NO\n'
print(output)
newf = open('output.txt','w')
newf.write(output)
#Case #1: YES
#Case #2: NO
#Case #3: YES
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
042656e281ad8a91f55e1a538bda15e8a457df7e | e0b6f5bd451aa8af3273fbc948799637681342e1 | /scripts/wm_representation/functions/encoding_leave_one_out.py | 255b51c1b420fca68791c82c9ac918ab9f5aeeab | [] | no_license | davidbestue/encoding | 6b304f6e7429f94f97bd562c7544d1fdccf7bdc1 | c27319aa3bb652b3bfc6b7340044c0fda057bc62 | refs/heads/master | 2022-05-05T23:41:42.419252 | 2022-04-27T08:34:52 | 2022-04-27T08:34:52 | 144,248,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,238 | py |
from model_functions import *
from fake_data_generator import *
from Weights_matrixs import *
from Representation import *
from process_encoding import *
from process_wm import *
from data_to_use import *
from bootstrap_functions import *
from leave_one_out import *
from joblib import Parallel, delayed
import multiprocessing
import time
import random
#
numcores = multiprocessing.cpu_count() - 3
Subjects=['d001', 'n001', 'b001', 'r001', 's001', 'l001']
brain_regions = ['visual', 'ips', 'pfc']
path_save_signal='/home/david/Desktop/target_close/signal_encoding.xlsx'
path_save_shuffle='/home/david/Desktop/target_close/shuffle_encoding.xlsx'
Reconstructions=[]
Reconstructions_shuff=[]
for Subject in Subjects:
for Brain_region in brain_regions:
print(Subject + ', ' + Brain_region)
#plt.figure()
### Data to use
enc_fmri_paths, enc_beh_paths, wm_fmri_paths, wm_beh_paths, masks = data_to_use( Subject, 'together', Brain_region)
##### Process training data
training_dataset, training_targets = process_encoding_files(enc_fmri_paths, masks, enc_beh_paths, sys_use='unix', hd=6, TR=2.335) #4
error_= Pop_vect_leave_one_out(training_dataset, training_targets) #no hay que hacer paralel porque no hay multiple wm
Reconstruction = pd.DataFrame([error_]) #solo hay 1!
Reconstruction.columns=['decoding']
Reconstruction['region'] = Brain_region
Reconstruction['subject'] = Subject
Reconstruction['label'] = 'signal'
Reconstructions.append(Reconstruction)
#
error_shuff = shuff_Pop_vect_leave_one_out2(training_dataset, training_targets, 10)
Reconstruction_shuff = pd.DataFrame(error_shuff)
Reconstruction_shuff.columns=['decoding']
Reconstruction_shuff['region'] = Brain_region
Reconstruction_shuff['subject'] = Subject
Reconstruction_shuff['label'] = 'shuffle'
Reconstructions_shuff.append(Reconstruction_shuff)
### Save signal from the reconstructions and shuffles
Decoding_df = pd.concat(Reconstructions, axis=0)
Decoding_df.to_excel( path_save_signal )
Shuffle_df = pd.concat(Reconstructions_shuff, axis=0)
Shuffle_df.to_excel( path_save_shuffle ) | [
"davidsanchezbestue@hotmail.com"
] | davidsanchezbestue@hotmail.com |
37747e30a88b90ba50ba53fe7451a8b52b8155e2 | 236402efa32923fefc9f3924ba4155142e8052fe | /2017/_10_knot_hash_test.py | c48df44d9d3edca3a18e7a40edb3223d50d2b400 | [
"MIT"
] | permissive | pchudzik/adventofcode | 7c32126948ea57cdef3858ae3eb63cafdd67abb0 | 72304880c6b080d6c177d11fc9b9eb7b58e876b7 | refs/heads/master | 2022-05-08T00:20:58.586672 | 2022-04-29T19:30:34 | 2022-04-29T19:30:34 | 164,089,632 | 0 | 0 | MIT | 2022-04-22T14:29:37 | 2019-01-04T09:51:33 | Python | UTF-8 | Python | false | false | 800 | py | import pytest
from _10_knot_hash import single_round, knot_hash
@pytest.mark.parametrize(
"puzzle, positions, result",
[
([0, 1, 2, 3, 4], [3], [2, 1, 0, 3, 4]),
([0, 1, 2, 3, 4], [3, 4], [4, 3, 0, 1, 2]),
([0, 1, 2, 3, 4], [3, 4, 1], [4, 3, 0, 1, 2]),
([0, 1, 2, 3, 4], [3, 4, 1, 5], [3, 4, 2, 1, 0]),
]
)
def test_rotate(puzzle, positions, result):
assert single_round(puzzle, positions)[0] == result
@pytest.mark.parametrize(
"puzzle, result", [
("", "a2582a3a0e66e6e86e3812dcb672a272"),
("AoC 2017", "33efeb34ea91902bb2f59c9920caa6cd"),
("1,2,3", "3efbe78a8d82f29979031a4aa0b16a9d"),
("1,2,4", "63960835bcdc130f0b66d7ff4f6a5a8e")])
def test_knot_hash(puzzle, result):
assert knot_hash(puzzle) == result
| [
"pawel.chudzik@gmail.com"
] | pawel.chudzik@gmail.com |
70b65b3099824795baa28a830cdabe6a194a359a | aeb4759e515adc4493f8d062011814c9fc749ad8 | /src/desktop/Level/Rating/rating.py | 7a4e53d7c1140920469deadf1d6f4d9b16af9c67 | [] | no_license | cloew/PyMine | 3b67f54168ddfe8a2b0262f929e6688e4797486a | eac57ca9c585ec86befff126d50c9df3614b104f | refs/heads/master | 2021-01-16T18:56:55.431131 | 2014-06-23T02:43:17 | 2014-06-23T02:43:17 | 4,244,619 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 279 | py |
class Rating:
""" Represents the Rating """
def __init__(self, level):
""" Initialize the Rating """
self.level = level
self.awarded = False
def checkAwarded(self):
""" Check if the Rating should be awarded """ | [
"cloew123@gmail.com"
] | cloew123@gmail.com |
a32c536428203a1d2fab7eea522cf5846aa50345 | 73f4a527f2dbe9bcfbceab7cab1370c23bbbfa36 | /lec4_serving/send_url.py | 24be6fa0cc47546b98000ea82c02f272420c3463 | [] | no_license | pai-plznw4me/network_study | c5962706c29c5475badb3d32c8e21f20dd21e67a | 845dd045e68bce670b241cf9f1553c23344fb984 | refs/heads/master | 2022-12-14T02:32:30.427499 | 2020-09-13T15:07:43 | 2020-09-13T15:07:43 | 292,311,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | import json
import base64
from PIL import Image
import numpy as np
import io
import matplotlib.pyplot as plt
import tensorflow as tf
# Image to binary
img_path = "/Users/seongjungkim/PycharmProjects/network_study/lec4_serving/sample.png"
f = open(img_path, mode='rb')
image = f.read()
b_image = base64.encodebytes(image).decode("utf-8")
data = {'image': b_image}
j_image = json.dumps(data)
# binary to Image
raw_image = base64.b64decode(b_image)
img = Image.open(io.BytesIO(raw_image))
#
data = json.dumps({"signature_name": "serving_default", "instances": b_image})
print('Data: {} ... {}'.format(data[:50], data[len(data)-52:]))
| [
"plznw4me@naver.com"
] | plznw4me@naver.com |
b25ec287087535274a5c4e7bd595d08c888d6d73 | dab869acd10a3dc76e2a924e24b6a4dffe0a875f | /Laban/LabanLib/analysis/spreadindAndClosing.py | 66d70d80949167c1383fa65ab43fe1af03e37b9b | [] | no_license | ranBernstein/Laban | d82aff9b0483dd007e03a06e51f7d635f62ed05d | 54c88afa9493deacbdd182904cc5d180ecb208b4 | refs/heads/master | 2021-01-23T13:17:51.777880 | 2017-02-14T09:02:54 | 2017-02-14T09:02:54 | 25,508,010 | 3 | 1 | null | 2017-02-14T09:02:55 | 2014-10-21T07:16:01 | Tcl | UTF-8 | Python | false | false | 1,282 | py | from LabanLib.LabanUtils import AbstractLabanAnalyzer
from LabanLib.LabanUtils import AbstractAnalysis
import mocapUtils.kinect.angleExtraction as ae
class SpreadindAndClosing(AbstractAnalysis.AbstractAnalysis):
def getPositiveAndNegetive(self):
return 'Spreading', 'Closing'
def wrapper(self, lineInFloats, headers, jointsIndices):
return ae.calcAverageDistanceOfIndicesFromLine(lineInFloats, \
jointsIndices, *self.extractor.getLongAxeIndices(headers))
def analyze(inputFile):
extractor = AbstractLabanAnalyzer.getExtractor(inputFile)
analysis = SpreadindAndClosing(extractor)
return analysis.extract(inputFile)
"""
def spreadindAndClosingWrapper(extractor, lineInFloats, headers, jointsIndices):
return ae.calcAverageDistanceOfIndicesFromLine(lineInFloats, \
jointsIndices, *extractor.getLongAxeIndices(headers))
def extractSpreadindAndClosing(extractor, fileName):
return extractor.extractLaban(fileName, extractor.spreadindAndClosingWrapper)
def plotSpreadindAndClosing(extractor, fileName):
input = extractor.extractLaban(fileName, extractor.spreadindAndClosingWrapper)
extractor.plotResults(input, 'Spreading', 'Closing')
""" | [
"bernstein.ran@gmail.com"
] | bernstein.ran@gmail.com |
c557512ab437d92c6ff97db1ed111ba0e9a9e98d | 1f0d46b55fe351dc61436069aca183dfa0d07e92 | /restful01/restful01/settings.py | 6499adabd3b5867434fb5271cd3e6dfa17a9fb17 | [] | no_license | aguncn/DjangoRESTfulWebServices | bfba746fc20f3aaa8bf8cedad6e569d0ea5716b2 | 1ab78478f84178d3eca9de1a262bae242930a9c5 | refs/heads/master | 2020-04-12T04:28:05.056831 | 2018-12-18T14:04:17 | 2018-12-18T14:04:17 | 162,296,613 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,141 | py | """
Django settings for restful01 project.
Generated by 'django-admin startproject' using Django 2.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6y8j^#_uy109&92vcddfse(x^=*#)%lj$k0=rm@y(1rb4j$!3_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'toys.apps.ToysConfig',
'drones.apps.DronesConfig',
'django_filters',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'restful01.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'restful01.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS':
'drones.custompagination.LimitOffsetPaginationWithUpperBound',
'PAGE_SIZE': 4,
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
'rest_framework.filters.OrderingFilter',
'rest_framework.filters.SearchFilter',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'anon': '300/hour',
'user': '100/hour',
'drones': '200/hour',
'pilots': '150/hour',
},
'DEFAULT_VERSIONING_CLASS':
'rest_framework.versioning.NamespaceVersioning',
}
| [
"aguncn@163.com"
] | aguncn@163.com |
57ece42cba9c0ad3533058c32259e55d8a82d896 | 1bd24cc6d3ebd0d57123a89589493ec8a0cfce90 | /cachemagic/client.py | 4e7a4b42650ce62a9b11315dc4dd833f21c86832 | [
"BSD-2-Clause"
] | permissive | ntucker/django-cache-magic | ff6b5892d84147c20d6500972c43ec61cf70dd80 | e6c6195fd80c846f7a49e27d1bc519ee109d80dc | refs/heads/master | 2021-01-16T20:32:44.592682 | 2015-11-26T07:24:29 | 2015-11-26T07:24:29 | 5,353,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,041 | py | import os
from eventlet.greenthread import sleep
from eventlet.queue import LightQueue
from eventlet.queue import Empty
from redis import Redis, ConnectionPool
from redis.exceptions import ConnectionError
from redis.connection import UnixDomainSocketConnection, Connection
from django_redis.client.default import DefaultClient
from django.conf import settings
_connection_pools = {}
class EventletConnectionPool(ConnectionPool):
def __init__(self, connection_class=Connection, max_connections=None,
**connection_kwargs):
self.pid = os.getpid()
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections or 2 ** 31
self._created_connections = 0
self._available_connections = LightQueue()
self._in_use_connections = set()
def get_connection(self, command_name, *keys, **options):
"Get a connection from the pool"
try:
connection = self._available_connections.get_nowait()
except Empty:
if self._created_connections < self.max_connections:
connection = self.make_connection()
else:
try:
connection = self._available_connections.get()
except Empty:
raise ConnectionError("Couldn't find a free connection")
self._in_use_connections.add(connection)
return connection
def release(self, connection):
"Releases the connection back to the pool"
self._checkpid()
if connection.pid == self.pid:
self._in_use_connections.remove(connection)
self._available_connections.put_nowait(connection)
def disconnect(self):
"Disconnects all connections in the pool"
while True:
try:
self._available_connections.get_nowait().disconnect()
except Empty:
break
for connection in self._in_use_connections:
connection.disconnect()
def get_or_create_connection_pool(**params):
global _connection_pools
key = str(params)
if key not in _connection_pools:
_connection_pools[key] = EventletConnectionPool(**params)
return _connection_pools[key]
class EventletConnectionClient(DefaultClient):
def _connect(self, host, port, db):
"""
Creates a redis connection with connection pool.
"""
kwargs = {
"db": db,
"parser_class": self.parser_class,
"password": self._options.get('PASSWORD', None),
"max_connections": settings.REDIS_POOL_SIZE,
}
if host == "unix":
kwargs.update({'path': port, 'connection_class': UnixDomainSocketConnection})
else:
kwargs.update({'host': host, 'port': port, 'connection_class': Connection})
connection_pool = get_or_create_connection_pool(**kwargs)
connection = Redis(connection_pool=connection_pool)
return connection | [
"me@ntucker.me"
] | me@ntucker.me |
4777b979f6c28f8a4170bab3f4fd94f6a7ffafe8 | afa2ebb439e6592caf42c507a789833b9fbf44b2 | /supervised_learning/0x0F-word_embeddings/4-fasttext.py | a146dc1491d8836f475b9aa168ce255865c9b637 | [] | no_license | anaruzz/holbertonschool-machine_learning | 64c66a0f1d489434dd0946193747ed296760e6c8 | 91300120d38acb6440a6dbb8c408b1193c07de88 | refs/heads/master | 2023-07-30T20:09:30.416167 | 2021-09-23T16:22:40 | 2021-09-23T16:22:40 | 279,293,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | #!/usr/bin/env python3
"""
Script that creates and trains a genism fastText model
"""
from gensim.models import FastText
def fasttext_model(sentences, size=100,
min_count=5, negative=5,
window=5, cbow=True,
iterations=5, seed=0, workers=1):
"""
Returns: the trained model
"""
model = FastText(sentences,
size=size,
min_count=min_count,
negative=negative,
window=window,
sg=not cbow,
iter=iterations,
seed=seed,
workers=workers
)
return model
| [
"laabidigh@gmail.com"
] | laabidigh@gmail.com |
b7ba2e20045fe5087b250059c90d811008bb3a0d | 6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a | /azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/backup_workload_items_operations.py | 1604739bf2f67ed41caf837bd9dff06514352a54 | [
"MIT"
] | permissive | ashirey-msft/azure-sdk-for-python | d92381d11c48f194ec9f989f5f803db614fb73f2 | e04778e13306dad2e8fb044970215bad6296afb6 | refs/heads/master | 2020-03-23T06:05:39.283442 | 2018-09-15T00:18:26 | 2018-09-15T00:18:26 | 141,188,192 | 0 | 1 | MIT | 2018-07-16T20:02:52 | 2018-07-16T20:02:52 | null | UTF-8 | Python | false | false | 5,793 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class BackupWorkloadItemsOperations(object):
"""BackupWorkloadItemsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def list(
self, vault_name, resource_group_name, fabric_name, container_name, filter=None, skip_token=None, custom_headers=None, raw=False, **operation_config):
"""Provides a pageable list of workload item of a specific container
according to the query filter and the pagination parameters.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the container.
:type fabric_name: str
:param container_name: Name of the container.
:type container_name: str
:param filter: OData filter options.
:type filter: str
:param skip_token: skipToken Filter.
:type skip_token: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of WorkloadItemResource
:rtype:
~azure.mgmt.recoveryservicesbackup.models.WorkloadItemResourcePaged[~azure.mgmt.recoveryservicesbackup.models.WorkloadItemResource]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.WorkloadItemResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkloadItemResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/items'}
| [
"noreply@github.com"
] | ashirey-msft.noreply@github.com |
c897c38ea593fd6d028a685aaec707ba4871e9b4 | dcce56815dca2b18039e392053376636505ce672 | /dumpscripts/pkgutil_nested.py | b4bb0040cc6ae75c9a0913c71ae015783378d7f1 | [] | no_license | robertopauletto/PyMOTW-it_3.0 | 28ff05d8aeccd61ade7d4107a971d9d2576fb579 | c725df4a2aa2e799a969e90c64898f08b7eaad7d | refs/heads/master | 2021-01-20T18:51:30.512327 | 2020-01-09T19:30:14 | 2020-01-09T19:30:14 | 63,536,756 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | # pkgutil_nested.py
import nested
import nested.shallow
print('nested.shallow:', nested.shallow.__file__)
nested.shallow.func()
print()
import nested.second.deep
print('nested.second.deep:', nested.second.deep.__file__)
nested.second.deep.func()
| [
"roberto.pauletto@gmail.com"
] | roberto.pauletto@gmail.com |
c929f35734c74a445856865534f29bc5fdd8f689 | 5de0c0e76bdde469156d057007a5008a63a0d66b | /buggernaut/Area.py | 5abcc8083016ae2fc276ffc7c8f945084c107c7b | [] | no_license | mattharkness/sixthdev | 6bcfd1c490efafb114dc5f014c6e5f1d91d56b4d | a7df929147d82d225606c216f69c48d898e19ebe | refs/heads/master | 2023-06-08T05:57:38.928657 | 2021-06-15T16:53:15 | 2021-06-15T16:53:15 | 338,441,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | __ver__="$Id$"
from strongbox import *
auto =None
class Area(Strongbox):
ID = attr(int, default=auto)
area = attr(str)
| [
"sabren"
] | sabren |
b33d36dbbdef12c04a89dad0a52e6461d82f31a1 | abf17bef22471c32d9b05be27704ceb7f3877b0d | /namo/multithread_scripts/threaded_test.py | f7f7ceadc0d18c0f8b9b1010d0407a107e1b1aaf | [] | no_license | beomjoonkim/adversarial_actor_critic | a91d608c1da9ac7a9b4ed4b0dce2f08d68456346 | 753a031ff70473003bdcf844c256e28337a8e8c9 | refs/heads/master | 2021-02-24T19:19:56.463064 | 2020-05-17T14:14:03 | 2020-05-17T14:14:03 | 245,438,421 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,231 | py | import numpy as np
import scipy.io as sio
import os
import sys
import threading
from Queue import Queue
from multiprocessing.pool import ThreadPool # dummy is nothing but multiprocessing but wrapper around threading
from multiprocessing import cpu_count
import pickle
import socket
import argparse
import csv
import time
import itertools
import sys
def worker_p(config):
algo = config[0]
n_data = config[1]
n_trial = config[2]
Qloss = config[3]
epoch = config[4]
n_score = config[5]
d_lr = config[6]
g_lr = config[7]
other_pi = config[8]
explr_const = config[9]
command = './test_with_gpu.sh ' + str(n_data) + ' ' + str(n_trial) + ' '\
+ str(algo) + ' ' + str(Qloss) + ' ' + str(n_score) +' '+ str(epoch)+ ' '\
+ str(d_lr) + ' ' + str(g_lr) + ' ' + str(other_pi) + ' ' + str(explr_const)
print command
os.system(command)
def worker_wrapper_multi_input(multi_args):
return worker_p(multi_args)
def main():
n_workers = 4
algo = sys.argv[1]
n_datas = [int(k) for k in sys.argv[2].split(',')]
n_datas = range(int(n_datas[0]),int(n_datas[1])+100,100)
Qloss = sys.argv[3]
epochs = [int(k) for k in sys.argv[4].split(',')]
epochs = range(int(epochs[0]),int(epochs[1])+1)
n_score = sys.argv[5]
d_lr = float(sys.argv[6])
g_lr = float(sys.argv[7])
explr_const = float(sys.argv[8])
trials = [int(k) for k in sys.argv[9].split(',')]
# Other pi???
n_workers = cpu_count()
configs = []
for n_data in n_datas:
otherpi_wfile = 'n_data_'+str(n_data)+'/onlyplace/adv/dg_lr_0.001_0.0001/n_score_5/'
for trial in trials:
otherpi_wfile = 'n_data_'+str(n_data)+'/onlyplace/adv/dg_lr_0.001_0.0001/n_score_5/n_trial_'+str(trial)+'/'
for epoch in epochs:
otherpi_wfile = 'n_data_'+str(n_data)\
+'/onlyplace/adv/dg_lr_0.001_0.0001/n_score_5/n_trial_'\
+str(trial)+'/train_results/'+'a_gen_epoch_'+str(epoch)+'.h5'
configs.append([algo,n_data,trial,Qloss,epoch,n_score,d_lr,g_lr,otherpi_wfile,explr_const])
print configs
pool = ThreadPool(n_workers)
results = pool.map(worker_wrapper_multi_input,configs)
if __name__ == '__main__':
main()
| [
"beomjoon@mit.edu"
] | beomjoon@mit.edu |
8a836540b8bcf7111da0d1aeb1a8ba28f87054d2 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_155/502.py | 2febc7c4005d384624053bf32ddf35dd96d1547e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | __author__ = 'Thanabhat Koomsubha'
def solve(cc):
S, K = input().split()
S = int(S)
sum = 0
sol = 0
for i in range(S + 1):
if int(K[i]) == 0:
continue
if sum < i:
sol += (i - sum)
sum += (i - sum)
sum += int(K[i])
print('Case #%d: %d' % (cc + 1, sol))
def main():
T = int(input())
for i in range(T):
solve(i)
main() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
5ca6d145b7e5a2b67acf13a34f61f65aca3d84d7 | a5a1a4a34d5e404d483cd442527ed154cdc4ab54 | /scripts/lt2_scripts/test_scripts/awg_trigger_jitter2.py | e46dbd0ef3c7acb49ef273d85428f4fa9adb0cb9 | [] | no_license | AdriaanRol/measurement | c0abb9cfb2e7061a060c109f6be61a420ca8586e | 32e0912b83d5ceedf00378df1d6a48feb9ab8f17 | refs/heads/master | 2021-01-20T16:48:03.044302 | 2014-03-26T15:42:09 | 2014-03-26T15:42:09 | 18,175,928 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py | import os
import qt
import numpy as np
import msvcrt
from measurement.lib.AWG_HW_sequencer_v2 import Sequence
from measurement.lib.config import awgchannels_lt2 as awgcfg
reload(awgcfg)
AWG = qt.instruments['AWG']
def generate_sequence(do_program=True):
seq = Sequence('Test')
# vars for the channel names
trigger_chan= 'trigger'
trigger2_chan= 'trigger2'
awgcfg.configure_sequence(seq, 'awg_trigger_jitter')
ename='trigger'
seq.add_element(ename,goto_target='trigger')
seq.add_pulse('trigger',trigger_chan,ename,start=0,duration=500, amplitude=1)
seq.add_pulse('wait',trigger_chan,ename,start=0,
start_reference='trigger', link_start_to = 'end',duration=2500, amplitude=0)
seq.add_pulse('trigger2',trigger2_chan,ename,start=0,duration=500, amplitude=1)
seq.add_pulse('wait2',trigger2_chan,ename,start=0,
start_reference='trigger2', link_start_to = 'end',duration=2500, amplitude=0)
#sweep the pulse length
seq.set_instrument(AWG)
seq.set_clock(1e9)
seq.set_send_waveforms(do_program)
seq.set_send_sequence(do_program)
seq.set_program_channels(True)
seq.set_start_sequence(False)
seq.force_HW_sequencing(True)
seq.send_sequence()
return True
if __name__ == "__main__":
generate_sequence()
| [
"wolfgangpfff@gmail.com"
] | wolfgangpfff@gmail.com |
b6ae99dcc6e6e51be66d43ad5943b593979f5014 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /DaVinci_v41r2/InstallArea/x86_64-slc6-gcc49-opt/python/CommonParticles/StdVeryLooseJpsi2MuMu.py | a1d496cd15b2307fbcbd8402a282ac808ceb9a75 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py | #!/usr/bin/env python
# =============================================================================
# $Id: StdVeryLooseJpsi2MuMu.py,v 1.1 2010-01-18 10:08:49 gcowan Exp $
# =============================================================================
## @file CommonParticles/StdVeryLooseJpsi2MuMu.py
# configuration file for 'Standard Very Loose Jpsi2MuMu'
# @author Greig Cowan
# @date 2009-06-23
# =============================================================================
"""
Configuration file for 'Standard Very Loose Jpsi2MuMu'
"""
__author__ = "Greig Cowan"
__version__ = "CVS tag $Name: not supported by cvs2svn $, version $Revision: 1.1 $"
# =============================================================================
__all__ = (
'StdVeryLooseJpsi2MuMu' ,
'locations'
)
# =============================================================================
from Gaudi.Configuration import *
from Configurables import CombineParticles
from CommonParticles.Utils import *
## ============================================================================
## create the algorithm
StdVeryLooseJpsi2MuMu = CombineParticles ("StdVeryLooseJpsi2MuMu")
StdVeryLooseJpsi2MuMu.Inputs = ["Phys/StdVeryLooseMuons/Particles"]
StdVeryLooseJpsi2MuMu.DecayDescriptor = "J/psi(1S) -> mu+ mu-"
StdVeryLooseJpsi2MuMu.CombinationCut = "(ADAMASS('J/psi(1S)') < 100.*MeV) & (ADOCACHI2CUT(30, ''))"
StdVeryLooseJpsi2MuMu.MotherCut = "(VFASPF(VCHI2) < 25.)"
## configure Data-On-Demand service
locations = updateDoD ( StdVeryLooseJpsi2MuMu )
## ============================================================================
if '__main__' == __name__ :
print __doc__
print __author__
print __version__
print locationsDoD ( locations )
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
f33fd4cc15561829b611c63b8394c988d0708fa6 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/77/usersdata/247/43349/submittedfiles/exercicio24.py | 6f73a9b4da2d7555834aaae02a5105c1ce437493 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | # -*- coding: utf-8 -*-
import math
a=int(input('digite a'))
b=int(input('digite b'))
if a>0 and b>0:
d=2
while d<=a:
if a%d==0 and b%d==0:
mdc=d
d=1+d
print("mdc(%d,%d)=%d" %(a,b,mdc))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
b6ce54f2fb1f2d0fd8c484522e225a8f27118954 | eaae3d8aa68a37824387ab05201a84f347004e3b | /docs/html/python_example.py | c039a2fb109d51c291515f87d2ba721a2fb82baa | [
"MIT"
] | permissive | apmoore1/rtf2xml | 254e2732e89003bc5622776a82757c20e5fe6999 | b80bd43c91c7e18088489eae9060d4d7a5b96d13 | refs/heads/master | 2021-06-10T09:15:42.018346 | 2017-01-16T19:08:59 | 2017-01-16T19:08:59 | 79,149,815 | 0 | 1 | null | 2017-01-16T19:04:48 | 2017-01-16T19:04:48 | null | UTF-8 | Python | false | false | 1,879 | py | #!/usr/bin/env python
import sys
import rtf2xml.ParseRtf
def Handle_Main():
"""Handles options and creates a parse object """
try:
parse_obj =rtf2xml.ParseRtf.ParseRtf(
in_file = 'in.rtf',
# these are optional
# determine the output file
out_file = 'out.xml',
# determine the run level. The default is 1.
run_level = 3,
# The name of a debug directory, if you are running at
# run level 3 or higer.
debug = 'debug_dir',
# Convert symbol fonts to unicode equivelents. Default
# is 1
convert_symbol = 1,
# Convert Zapf fonts to unicode equivelents. Default
# is 1.
convert_zapf = 1,
# Convert Wingding fonts to unicode equivelents.
# Default is 1.
convert_wingdings = 1,
# Convert RTF caps to real caps.
# Default is 1.
convert_caps = 1,
# Indent resulting XML.
# Default is 0 (no indent).
indent = 1,
# Form lists from RTF. Default is 1.
form_lists = 1,
# Convert headings to sections. Default is 0.
headings_to_sections = 1,
# Group paragraphs with the same style name. Default is 1.
group_styles = 1,
# Group borders. Default is 1.
group_borders = 1,
# Write or do not write paragraphs. Default is 0.
empty_paragraphs = 0,
)
parse_obj.parse_rtf()
except rtf2xml.ParseRtf.InvalidRtfException, msg:
sys.stderr.write(str(msg))
sys.exit(1)
except rtf2xml.ParseRtf.RtfInvalidCodeException, msg:
sys.stderr.write(str(msg))
sys.exit(1)
if __name__=='__main__':
Handle_Main()
| [
"paulhtremblay@gmail.com"
] | paulhtremblay@gmail.com |
f361ea6bd7ea53223419d2c8453f09fc3be2b524 | fb67821b542292fe921c9e628ebe69b9bd1ecb66 | /firstpro/firstpro/settings.py | b324acccee7bfd033b42b30bdecb4c4cc0d89c90 | [] | no_license | smrkhan123/MyDjango | 97cc13e33a686f325be03618b915cf571d4a6fc2 | be106cb64a52bf7bef8b4960089af1afe5480df6 | refs/heads/master | 2020-08-20T07:53:59.285250 | 2019-10-18T10:25:56 | 2019-10-18T10:25:56 | 215,998,313 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,236 | py |
"""
Django settings for firstpro project.
Generated by 'django-admin startproject' using Django 1.11.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*4fn4+pe&9005+*p#llam7-n%7!*!xy%_l)=a(o72kv-8=$(9k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Article',
'sameer',
'Accounts'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'firstpro.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'firstpro.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'static')
]
| [
"sk862147@gmail.com"
] | sk862147@gmail.com |
3f49b661713b37e8fcba1533d294c012ebf6b117 | 238900636ac22ba9776dfd022fcd5e2b6e788279 | /src/brainroller/findemptylocs.py | 09a7074d34a65cd3ca0d2c958b2d589bae786de2 | [] | no_license | ricepaper1/tensorflow_apps | 5b22f928a7283c353bde7242fa2d1060e6778594 | bcbf2873afc13e05cfdd0bf99bc5e222aa3820b8 | refs/heads/master | 2022-02-07T13:38:34.578175 | 2019-05-29T19:19:13 | 2019-05-29T19:19:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,505 | py | #! /usr/bin/env python
'''
Created on May 31, 2016
@author: welling
'''
import sys
import random
import os.path
import cPickle
import math
import spilltree3D
sys.path.extend(['/home/welling/Fiasco/fiasco_final/bin/LINUXX86_64',
'/home/welling/shtools/SHTOOLS-3.2'])
# sys.path.extend(['/home/welling/Fiasco/Fiasco_final/src/fmri',
# '/home/welling/Fiasco/Fiasco_final/bin/LINUXX86_64',
# '/home/welling/git/SHTOOLS'])
from traceneighbors import UsefulVtx, Vtx, loadSkipTable
# from transforms import eulerRzRyRzToTrans, transToEulerRzRyRz, makeAligningRotation
# from writegeom import writeBOV, plotSphere, writeVtkPolylines
# from sampler import ArraySampler
#from yamlblocks import BlockGenerator
radPixels = 20
cutoffRad = 10.0
baseName = 'block'
# radPixels = 100
# baseName = 'bigblock'
maxL = 48
fishCoreFile = '/pylon1/pscstaff/awetzel/ZF-test-files/60nm-cores/V4750_2150_04000-08999.vol'
fishCoreXSize = 1024
fishCoreYSize = 1024
fishCoreZSize = 4900
fishCoreXOffset = 4750. - (fishCoreXSize/2)
fishCoreYOffset = 2150. - (fishCoreYSize/2)
fishCoreZOffset = 4000
#baseDir = '/pylon2/pscstaff/welling'
baseDir = '/home/welling/brainroller'
usefulVtxFile = os.path.join(baseDir, 'useful_trace_neighborhoods.pkl')
skipFile = os.path.join(baseDir, 'skips.txt')
traceFile = os.path.join(baseDir, 'traces.pkl')
class PlainPt(object):
def __init__(self, x, y, z):
self.coords = [x, y, z]
def getLoc(self):
return self.coords
class BoundedRandomSampler(object):
def __init__(self, rMax):
self.rMax = rMax
xMin = rMax
xMax = fishCoreXSize - rMax
yMin = rMax
yMax = fishCoreYSize - rMax
zMin = rMax
zMax = fishCoreYSize - rMax
self.xOffset = xMin + fishCoreXOffset
self.yOffset = yMin + fishCoreYOffset
self.zOffset = zMin + fishCoreZOffset
self.xScale = xMax - xMin
self.yScale = yMax - yMin
self.zScale = zMax - zMin
def getPt(self):
return PlainPt(random.random() * self.xScale + self.xOffset,
random.random() * self.yScale + self.yOffset,
random.random() * self.zScale + self.zOffset)
def outerClip(self, pt):
x, y, z = pt.getLoc()
if z < fishCoreZOffset - self.rMax:
return False
elif x < fishCoreXOffset - self.rMax:
return False
elif y < fishCoreYOffset - self.rMax:
return False
elif z > fishCoreZOffset + fishCoreZSize + self.rMax:
return False
elif x > fishCoreXOffset + fishCoreXSize + self.rMax:
return False
elif y > fishCoreYOffset + fishCoreYSize + self.rMax:
return False
else:
return True
def main():
edgeLen = 2*radPixels + 1
rMax = float(radPixels)
# transformer = SHTransformer(edgeLen, maxL)
# with open(usefulVtxFile, 'r') as pklF:
# with open(skipFile, 'r') as skipF:
# usefulVtxDict = UsefulVtx.load(pklF, 30000, skipF)
with open(traceFile, 'r') as f:
vtxDict, objDict = cPickle.load(f)
with open(skipFile, 'rU') as skipF:
skipTbl = loadSkipTable(skipF, 30000)
for v in vtxDict.values():
v.setSkipTable(skipTbl)
print '%d vertices in %d objects' % (len(vtxDict), len(objDict))
# print 'Loaded %d useful vertices' % len(usefulVtxDict)
ptSampler = BoundedRandomSampler(rMax)
testPts = []
for v in vtxDict.values():
if ptSampler.outerClip(v):
x, y, z = v.getLoc()
testPts.append(PlainPt(x, y, z))
print '%d useful trace points' % len(testPts)
spilltree = spilltree3D.SpTree(testPts)
print 'spilltree created'
random.seed(1234)
samplePts = []
ct = 0
tryCt = 0
cutSqr = cutoffRad * cutoffRad
while True:
pt = ptSampler.getPt()
_, sepsqr = spilltree.findApproxNearest(pt)
# print 'samplept: %s' % pt.getLoc()
# print 'nearPt: %s at %s' % (nearPt.id, nearPt.getLoc())
# print 'sepsqr: %s' % sepsqr
if sepsqr > cutSqr:
samplePts.append(tuple(pt.getLoc()))
ct += 1
tryCt += 1
if tryCt % 1000 == 1:
print '%d samples in %d tries' % (ct, tryCt)
if ct >= 5000:
break
with open('emptySamps.pkl', 'w') as f:
cPickle.dump(samplePts, f)
# blockGen = BlockGenerator(rMax, edgeLen, maxL,
# usefulVtxDict, fishCoreFile,
# fishCoreXSize, fishCoreYSize, fishCoreZSize,
# baseName=baseName)
#
# #sampleVtx = usefulVtxDict[6985]
# #sampleVtx = usefulVtxDict.values()[17]
# random.seed(1234)
# indexList = usefulVtxDict.keys()[:]
# indexList.sort()
# for idx, sampleId in enumerate(random.sample(indexList, 5000)):
# if (idx >= 4968):
# try:
# print 'starting sample %s' % sampleId
# blockGen.writeBlock(sampleId,
# {'xOffset': fishCoreXOffset,
# 'yOffset': fishCoreYOffset,
# 'zOffset': fishCoreZOffset})
# except Exception, e:
# print 'Sample id %s failed: %s' % (sampleId, e)
print 'completed main loop'
if __name__ == '__main__':
main()
| [
"welling@psc.edu"
] | welling@psc.edu |
46237aef41eb9f622b8d9a2300c0e36dec363cf0 | e0db9c0559d0cd362a0e3e7b96fd0c1c0fbc68e4 | /string21.py | 6b39115b313556a222d709f14fe90874f5bc1d37 | [] | no_license | BrettMcGregor/w3resource | ba338e91d24db773de6db6aec8c776a7df003ba0 | cea43e3f471edff1ca0843eeab1fa299f491badf | refs/heads/master | 2020-03-13T04:11:10.194964 | 2018-05-22T03:21:13 | 2018-05-22T03:21:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | # Write a Python function to convert a given string to all
# uppercase if it contains at least 2 uppercase characters in the
# first 4 characters.
stringa = "HtTps://www.w3rresource.com"
count = 0
for i in range(5):
if stringa[i].isupper():
count += 1
if count > 1:
print(stringa.upper())
else:
print(stringa)
| [
"brett.w.mcgregor@gmail.com"
] | brett.w.mcgregor@gmail.com |
a1a5068f26f9fcec4afd026d777bab1f3e4795e6 | 3cdf103f66fd032352e96640ed072e30c63e1b74 | /template/__init__.py | 6ee598aa88d19cabed6b7d21aeb4457cc7b52877 | [] | no_license | JayGitH/Bussiness-Monitoring | a193872c08553370c0f4624215a8cbf0f94ea3dc | 0535e26acf4f16a385e0da538178b36dab9bdbc9 | refs/heads/master | 2023-03-16T13:49:23.520708 | 2019-09-17T07:55:24 | 2019-09-17T07:55:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | # -*- coding:utf-8 -*-
# __author__ = Amos
# Email = 379833553@qq.com
# Create_at = 2019/1/16 2:34 PM
# FileName = __init__.py
| [
"379833553@qq.com"
] | 379833553@qq.com |
b470fb10194019ee727baacef3b307c45c089584 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Ablation4_ch016_ep010/Gather1_W_change_C_fix_2blk/ep0_test/pyr_0s/L5/step10_a.py | 068fb2c7bc9f20951d26d26257e3c2b442a0c6ca | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,723 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_0side_L5 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
import Exps_7_v3.doc3d.Ablation4_ch016_ep010.I_w_M_to_W_pyr.pyr_0s.L5.step10_a as I_w_M_to_W_p20_pyr
from Exps_7_v3.doc3d.Ablation4_ch016_ep010.W_w_M_to_C_pyr.pyr_2s.L5.step10_a import ch032_1side_6__2side_6__ep010 as W_w_M_to_C_p20_2s_L5_Mae_Sob_k09
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_v2
use_loss_obj = [mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wz").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wy").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_0side = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(I_to_Wx_Wy_Wz=I_w_M_to_W_p20_pyr.ch032_0side, W_to_Cx_Cy=W_w_M_to_C_p20_2s_L5_Mae_Sob_k09).set_result_name(result_name="p20_L5-ch032_0side")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_0side.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
68dbd7dfa64f57c6443696841085abbebad9111d | d6952f048727add5b54a521d04f6c9b5889bcd35 | /pollination_sdk/models/project_folder.py | 0e150c669fb8aacf50defcdfda3311527b182f19 | [] | no_license | TfedUD/python-sdk | bf719644041c2ab7b741af9c7fb8e5acfe085922 | 7ddc34611de44d2f9c5b217cf9b9e7cec27b2a27 | refs/heads/master | 2023-08-10T21:13:45.270193 | 2021-06-21T14:48:36 | 2021-06-21T14:51:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,473 | py | # coding: utf-8
"""
pollination-server
Pollination Server OpenAPI Definition # noqa: E501
The version of the OpenAPI document: 0.13.0
Contact: info@pollination.cloud
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from pollination_sdk.configuration import Configuration
class ProjectFolder(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'annotations': 'dict(str, str)',
'path': 'str',
'type': 'str'
}
attribute_map = {
'annotations': 'annotations',
'path': 'path',
'type': 'type'
}
def __init__(self, annotations=None, path=None, type='ProjectFolder', local_vars_configuration=None): # noqa: E501
"""ProjectFolder - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._annotations = None
self._path = None
self._type = None
self.discriminator = None
if annotations is not None:
self.annotations = annotations
if path is not None:
self.path = path
if type is not None:
self.type = type
@property
def annotations(self):
"""Gets the annotations of this ProjectFolder. # noqa: E501
An optional dictionary to add annotations to inputs. These annotations will be used by the client side libraries. # noqa: E501
:return: The annotations of this ProjectFolder. # noqa: E501
:rtype: dict(str, str)
"""
return self._annotations
@annotations.setter
def annotations(self, annotations):
"""Sets the annotations of this ProjectFolder.
An optional dictionary to add annotations to inputs. These annotations will be used by the client side libraries. # noqa: E501
:param annotations: The annotations of this ProjectFolder. # noqa: E501
:type annotations: dict(str, str)
"""
self._annotations = annotations
@property
def path(self):
"""Gets the path of this ProjectFolder. # noqa: E501
The path to a folder where files and folders can be sourced. For a local filesystem this can be \"C:\\Users\\me\\jobs\\test\". # noqa: E501
:return: The path of this ProjectFolder. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this ProjectFolder.
The path to a folder where files and folders can be sourced. For a local filesystem this can be \"C:\\Users\\me\\jobs\\test\". # noqa: E501
:param path: The path of this ProjectFolder. # noqa: E501
:type path: str
"""
self._path = path
@property
def type(self):
"""Gets the type of this ProjectFolder. # noqa: E501
:return: The type of this ProjectFolder. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ProjectFolder.
:param type: The type of this ProjectFolder. # noqa: E501
:type type: str
"""
if (self.local_vars_configuration.client_side_validation and
type is not None and not re.search(r'^ProjectFolder$', type)): # noqa: E501
raise ValueError(r"Invalid value for `type`, must be a follow pattern or equal to `/^ProjectFolder$/`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProjectFolder):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ProjectFolder):
return True
return self.to_dict() != other.to_dict()
| [
"antoinedao1@gmail.com"
] | antoinedao1@gmail.com |
3ab90496ea31ecc33f1d7296a54b98f6b01e95a3 | fe3265b72e691c6df8ecd936c25b6d48ac33b59a | /homeassistant/components/livisi/switch.py | bcb9a2044119ad26ab3c66b78575729ed4c684bc | [
"Apache-2.0"
] | permissive | bdraco/home-assistant | dcaf76c0967783a08eec30ce704e5e9603a2f0ca | bfa315be51371a1b63e04342a0b275a57ae148bd | refs/heads/dev | 2023-08-16T10:39:15.479821 | 2023-02-21T22:38:50 | 2023-02-21T22:38:50 | 218,684,806 | 13 | 7 | Apache-2.0 | 2023-02-21T23:40:57 | 2019-10-31T04:33:09 | Python | UTF-8 | Python | false | false | 5,432 | py | """Code to handle a Livisi switches."""
from __future__ import annotations
from typing import Any
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
DOMAIN,
LIVISI_REACHABILITY_CHANGE,
LIVISI_STATE_CHANGE,
LOGGER,
PSS_DEVICE_TYPE,
)
from .coordinator import LivisiDataUpdateCoordinator
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up switch device."""
coordinator: LivisiDataUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id]
@callback
def handle_coordinator_update() -> None:
"""Add switch."""
shc_devices: list[dict[str, Any]] = coordinator.data
entities: list[SwitchEntity] = []
for device in shc_devices:
if (
device["type"] == PSS_DEVICE_TYPE
and device["id"] not in coordinator.devices
):
livisi_switch: SwitchEntity = create_entity(
config_entry, device, coordinator
)
LOGGER.debug("Include device type: %s", device["type"])
coordinator.devices.add(device["id"])
entities.append(livisi_switch)
async_add_entities(entities)
config_entry.async_on_unload(
coordinator.async_add_listener(handle_coordinator_update)
)
def create_entity(
config_entry: ConfigEntry,
device: dict[str, Any],
coordinator: LivisiDataUpdateCoordinator,
) -> SwitchEntity:
"""Create Switch Entity."""
config_details: dict[str, Any] = device["config"]
capabilities: list = device["capabilities"]
room_id: str = device["location"]
room_name: str = coordinator.rooms[room_id]
livisi_switch = LivisiSwitch(
config_entry,
coordinator,
unique_id=device["id"],
manufacturer=device["manufacturer"],
device_type=device["type"],
name=config_details["name"],
capability_id=capabilities[0],
room=room_name,
)
return livisi_switch
class LivisiSwitch(CoordinatorEntity[LivisiDataUpdateCoordinator], SwitchEntity):
"""Represents the Livisi Switch."""
def __init__(
self,
config_entry: ConfigEntry,
coordinator: LivisiDataUpdateCoordinator,
unique_id: str,
manufacturer: str,
device_type: str,
name: str,
capability_id: str,
room: str,
) -> None:
"""Initialize the Livisi Switch."""
self.config_entry = config_entry
self._attr_unique_id = unique_id
self._attr_name = name
self._capability_id = capability_id
self.aio_livisi = coordinator.aiolivisi
self._attr_available = False
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, unique_id)},
manufacturer=manufacturer,
model=device_type,
name=name,
suggested_area=room,
via_device=(DOMAIN, config_entry.entry_id),
)
super().__init__(coordinator)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
response = await self.aio_livisi.async_pss_set_state(
self._capability_id, is_on=True
)
if response is None:
self._attr_available = False
raise HomeAssistantError(f"Failed to turn on {self._attr_name}")
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
response = await self.aio_livisi.async_pss_set_state(
self._capability_id, is_on=False
)
if response is None:
self._attr_available = False
raise HomeAssistantError(f"Failed to turn off {self._attr_name}")
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
response = await self.coordinator.async_get_pss_state(self._capability_id)
if response is None:
self._attr_is_on = False
self._attr_available = False
else:
self._attr_is_on = response
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{LIVISI_STATE_CHANGE}_{self._capability_id}",
self.update_states,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{LIVISI_REACHABILITY_CHANGE}_{self.unique_id}",
self.update_reachability,
)
)
@callback
def update_states(self, state: bool) -> None:
"""Update the states of the switch device."""
self._attr_is_on = state
self.async_write_ha_state()
@callback
def update_reachability(self, is_reachable: bool) -> None:
"""Update the reachability of the switch device."""
self._attr_available = is_reachable
self.async_write_ha_state()
| [
"noreply@github.com"
] | bdraco.noreply@github.com |
84a74f7f1adcb4948a83a6a6bc07fe6fd2aebf37 | 69851e673bad63c54138fd4c6a7532d298b28728 | /test/asyncore_echo_server.py | b12383219e34e278fe9d0fe6ed31f733b0d8f4cc | [] | no_license | ppppdm/mtcpsoft | a23e5b7b5f0144a2bad927824194b9534ee0a2f0 | 3a02474960d2903d4979a89b1c7568932f7ec006 | refs/heads/master | 2020-05-30T15:05:12.850440 | 2013-08-02T09:00:06 | 2013-08-02T09:00:06 | 9,294,934 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,062 | py | import logging
import asyncore
import socket
logging.basicConfig(level=logging.DEBUG, format="%(created)-15s %(msecs)d %(levelname)8s %(thread)d %(name)s %(message)s")
log = logging.getLogger(__name__)
BACKLOG = 5
SIZE = 1024
class EchoHandler(asyncore.dispatcher):
def __init__(self, conn_sock, client_address, server):
self.server = server
self.client_address = client_address
self.buffer = ""
# We dont have anything to write, to start with
self.is_writable = False
# Create ourselves, but with an already provided socket
asyncore.dispatcher.__init__(self, conn_sock)
log.debug("created handler; waiting for loop")
def readable(self):
return True # We are always happy to read
def writable(self):
return self.is_writable # But we might not have
# anything to send all the time
def handle_read(self):
log.debug("handle_read")
data = self.recv(SIZE)
log.debug("after recv")
if data:
log.debug("got data")
self.buffer += data
self.is_writable = True # sth to send back now
else:
log.debug("got null data")
def handle_write(self):
log.debug("handle_write")
if self.buffer:
sent = self.send(self.buffer)
log.debug("sent data")
self.buffer = self.buffer[sent:]
else:
log.debug("nothing to send")
if len(self.buffer) == 0:
self.is_writable = False
# Will this ever get called? Does loop() call
# handle_close() if we called close, to start with?
def handle_close(self):
log.debug("handle_close")
log.info("conn_closed: client_address=%s:%s" % \
(self.client_address[0],
self.client_address[1]))
self.close()
#pass
class EchoServer(asyncore.dispatcher):
allow_reuse_address = False
request_queue_size = 5
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
def __init__(self, address, handlerClass=EchoHandler):
self.address = address
self.handlerClass = handlerClass
asyncore.dispatcher.__init__(self)
self.create_socket(self.address_family,
self.socket_type)
if self.allow_reuse_address:
self.set_resue_addr()
self.server_bind()
self.server_activate()
def server_bind(self):
self.bind(self.address)
log.debug("bind: address=%s:%s" % (self.address[0], self.address[1]))
def server_activate(self):
self.listen(self.request_queue_size)
log.debug("listen: backlog=%d" % self.request_queue_size)
def fileno(self):
return self.socket.fileno()
def serve_forever(self):
asyncore.loop()
# TODO: try to implement handle_request()
# Internal use
def handle_accept(self):
(conn_sock, client_address) = self.accept()
if self.verify_request(conn_sock, client_address):
self.process_request(conn_sock, client_address)
def verify_request(self, conn_sock, client_address):
return True
def process_request(self, conn_sock, client_address):
log.info("conn_made: client_address=%s:%s" % \
(client_address[0],
client_address[1]))
self.handlerClass(conn_sock, client_address, self)
def handle_close(self):
self.close()
if __name__=='__main__':
import asyncore_echo_server
interface = 'localhost'
port = 6001
server = asyncore_echo_server.EchoServer((interface, port))
server.serve_forever()
| [
"ppppdm@gmail.com"
] | ppppdm@gmail.com |
58e929b86c5c6af28135527b1d7338671acd45f5 | a8939556f37cbc7313b7e648f0feed143951fc86 | /biblioteca/apps/permisos/migrations/0003_permiso_emma.py | ab9d6c63eda9676785137caa3ae5987b4ba00510 | [] | no_license | miemma/biblioteca | 212336177fd304be7d20f57001fb567e220bb1ef | 0c170cc9ae75f0047a6e1ef6a039d47084989333 | refs/heads/master | 2020-12-25T15:09:17.683860 | 2016-08-02T14:50:56 | 2016-08-02T14:50:56 | 51,967,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-07-27 14:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('permisos', '0002_auto_20160726_1028'),
]
operations = [
migrations.AddField(
model_name='permiso',
name='emma',
field=models.BooleanField(default=True),
),
]
| [
"mauriciodinki@gmail.com"
] | mauriciodinki@gmail.com |
a4d2108824be0ac9fcd647eb84358ec9b51fbaea | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_207/535.py | 49c82db37b6a6d5585e9da8e7da7c644ace9e867 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | # input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
t = int(input()) # read a line with a single integer
for c in range(1, t + 1):
n,r,o,y,g,b,v = map(int, input().split(" ")) # read a list of integers, 2 in this case
l = {'R':r, 'O':o, 'Y': y, 'G': g, 'B': b, 'V': v}
neigh = {'R': 'BGY', 'O': 'VGB', 'Y': 'BVR', 'G': 'VRO', 'B': 'ROY', 'V': 'OYG'}
# pierwszy nie ma znaczenia
can = True
res = ""
m = max(l, key=l.get)
res += m
l[m] = l[m] - 1
# potem najdluzszych cykli
visited = {'R': -1, 'O':-1, 'Y': -1, 'G': -1, 'B': -1, 'V': -1}
visited[m] = 0
for i in range(n-1):
prev = res[-1]
nex = dict((k, l[k]) for k in neigh[prev] if l[k])
if not nex:
can = False
break
m = min(nex, key=visited.get)
res += m
visited[m] = i
l[m] = l[m] - 1
if can and n > 1:
can = res[-1] in neigh[res[0]]
print("Case #{}: {}".format(c, res if can else "IMPOSSIBLE"))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
54b39fb0c7e6ef24872826a2dddd60ede9dae49d | f561a219c57bd75790d3155acac6f54299a88b08 | /city/admin.py | 0519873f37af6e7fa4913e0e2fe60a712456d5c7 | [] | no_license | ujjwalagrawal17/OfferCartServer | 1e81cf2dc17f19fa896062c2a084e6b232a8929e | b3cd1c5f8eecc167b6f4baebed3c4471140d905f | refs/heads/master | 2020-12-30T15:31:04.380084 | 2017-05-24T18:26:20 | 2017-05-24T18:26:20 | 91,155,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | from django.contrib import admin
from .models import *
# Register your models here.
class CityDataAdmin(admin.ModelAdmin):
list_display = ["id", "name", "created", "modified"]
admin.site.register(CityData, CityDataAdmin)
class UserCityDataAdmin(admin.ModelAdmin):
list_display = ["id", "city_id", "user_id"]
admin.site.register(UserCityData, UserCityDataAdmin)
class CityFcmDataAdmin(admin.ModelAdmin):
list_display = ["id", "city_id", "user_id"]
admin.site.register(CityFcmData, CityFcmDataAdmin)
| [
"ujjwal.iitism@gmail.com"
] | ujjwal.iitism@gmail.com |
bc9c12c96f2b5f6f38674dd2cee18c4c49df274b | a85303ac9116e57d756afd5feb9e0b22f6ebe7a4 | /tools/region_recall.py | 765657c8c9aa1bfcf4fb68703c9d956f4c26c156 | [] | no_license | TWSFar/visdrone | 866b1a80f02bd05183176047ea25a4600d34a3cc | 54bb301cfdd7b0ce44e3e4d168441721776efe11 | refs/heads/master | 2020-07-12T05:14:50.191525 | 2019-08-27T10:33:40 | 2019-08-27T10:33:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,150 | py | import os, sys
import cv2
import argparse
import numpy as np
from glob import glob
from tqdm import tqdm
import utils
import pdb
from datasets import get_dataset
def parse_args():
parser = argparse.ArgumentParser(description="show mask results")
parser.add_argument('dataset', type=str, default='VisDrone',
choices=['VisDrone', 'HKB'], help='dataset name')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
dataset = get_dataset(args.dataset)
val_list = dataset.get_imglist('val')
mask_path = '../pytorch-deeplab-xception/run/mask-hkbval'
label_object = []
detect_object = []
mask_object = []
undetected_img = []
pixel_num = []
for img_path in tqdm(val_list, ncols=80):
img_name = os.path.basename(img_path)
raw_file = os.path.join(mask_path, img_name[:-4]+'.png')
img = cv2.imread(img_path)
height, width = img.shape[:2]
mask_img = cv2.imread(raw_file, cv2.IMREAD_GRAYSCALE)
mask_h, mask_w = mask_img.shape[:2]
pixel_num.append(np.sum(mask_img))
label_box, _ = dataset.get_gtbox(img_path)
region_box, contours = utils.generate_box_from_mask(mask_img)
region_box = utils.region_postprocess(region_box, contours, (mask_w, mask_h))
region_box = utils.resize_box(region_box, (mask_w, mask_h), (width, height))
region_box = utils.generate_crop_region(region_box, (width, height))
count = 0
for box1 in label_box:
for box2 in region_box:
if utils.overlap(box2, box1):
count += 1
break
label_object.append(len(label_box))
detect_object.append(count)
mask_object.append(len(region_box))
if len(label_box) != count:
undetected_img.append(img_name)
print('recall: %f' % (np.sum(detect_object) / np.sum(label_object)))
# print('cost avg: %f, std: %f' % (np.mean(pixel_num), np.std(pixel_num)))
print('detect box avg: %f' %(np.mean(mask_object)))
print(sorted(undetected_img)) | [
"cyfhorse@gmail.com"
] | cyfhorse@gmail.com |
7828582f9ad5c1df41c4957b9ddc46bc8217c64a | ebd24e400986c57b4bb1b9578ebd8807a6db62e8 | /InstaGrade-FormBuilder/xlsxwriter/test/comparison/test_chart_format12.py | 5d79eda350742be44b8247dff8014a096551d270 | [] | no_license | nate-parrott/ig | 6abed952bf32119a536a524422037ede9b431926 | 6e0b6ac0fb4b59846680567150ce69a620e7f15d | refs/heads/master | 2021-01-12T10:15:15.825004 | 2016-12-13T21:23:17 | 2016-12-13T21:23:17 | 76,399,529 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,926 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2014, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_format12.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [54794880, 56296576]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'trendline': {
'type': 'moving_average',
'period': 2,
'line': {
'color': 'red',
'width': 1,
'dash_type': 'long_dash',
},
},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"nateparro2t@gmail.com"
] | nateparro2t@gmail.com |
1ae9b2b955575614b0511b5ddb39ed26c3e76eb2 | 0b01cb61a4ae4ae236a354cbfa23064e9057e434 | /alipay/aop/api/domain/KoubeiMerchantKbcloudSubuserinfoQueryModel.py | 6eff376ef9495acbde6543c3e202f036563ce525 | [
"Apache-2.0"
] | permissive | hipacloud/alipay-sdk-python-all | e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13 | bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d | refs/heads/master | 2022-11-14T11:12:24.441822 | 2020-07-14T03:12:15 | 2020-07-14T03:12:15 | 277,970,730 | 0 | 0 | Apache-2.0 | 2020-07-08T02:33:15 | 2020-07-08T02:33:14 | null | UTF-8 | Python | false | false | 896 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiMerchantKbcloudSubuserinfoQueryModel(object):
def __init__(self):
self._user_id = None
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiMerchantKbcloudSubuserinfoQueryModel()
if 'user_id' in d:
o.user_id = d['user_id']
return o
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
8a6360f4ec2a34b4453ff5db3ba9d22e1f3948ef | 1460bad3dfffb5d194bad82ec79c1aac32c46a4d | /06. Inventory.py | 716a0482eb6aad72539af07ef3861ee45d21fe56 | [] | no_license | antondelchev/Objects-and-Classes---Exericse | 29942f9db057995efb41b6cdc1afac0b246f5546 | 199512a917798b81518549fa1c792be07558be3f | refs/heads/main | 2023-06-01T04:29:36.817189 | 2021-06-26T11:55:40 | 2021-06-26T11:55:40 | 379,929,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | class Inventory:
def __init__(self, capacity):
self.__capacity = capacity
self.items = []
def __repr__(self):
return f"Items: {', '.join(self.items)}.\nCapacity left: {self.get_capacity() - len(self.items)}"
def add_item(self, item):
if len(self.items) < self.__capacity:
self.items.append(item)
else:
return "not enough room in the inventory"
def get_capacity(self):
return self.__capacity
inventory = Inventory(2)
inventory.add_item("potion")
inventory.add_item("sword")
print(inventory.add_item("bottle"))
print(inventory.get_capacity())
print(inventory)
| [
"noreply@github.com"
] | antondelchev.noreply@github.com |
ef72b21dbd259ad4ab9f45b39c810dc2058e2b49 | 46ce3ba4d13a4d6aa20cbfc167937882b18b7f79 | /text-to-speech/caching.py | a590f96f840d3dbaf479ab8954f2f50bc7bb8a20 | [
"MIT"
] | permissive | hsouporto/Bahasa-NLP-Tensorflow | 835645b9cc68b0b69e331298648f820981508be6 | 4e6427230e36c2d79ec951c7f2c3501bf75f9a8a | refs/heads/master | 2022-03-04T17:07:14.443843 | 2019-11-24T17:33:02 | 2019-11-24T17:33:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,272 | py | import numpy as np
import librosa
import os
import scipy
import tqdm
sampling_rate = 22050
n_fft = 2048
frame_shift = 0.0125
frame_length = 0.05
fourier_window_size = 2048
max_db = 100
ref_db = 20
preemphasis = 0.97
hop_length = int(sampling_rate * frame_shift)
win_length = int(sampling_rate * frame_length)
n_mels = 80
resampled = 5
reduction_factor = 5
def get_spectrogram(audio_file):
y, sr = librosa.load(audio_file, sr = sampling_rate)
y, _ = librosa.effects.trim(y)
y = np.append(y[0], y[1:] - preemphasis * y[:-1])
linear = librosa.stft(
y = y,
n_fft = fourier_window_size,
hop_length = hop_length,
win_length = win_length,
)
mag = np.abs(linear)
mel_basis = librosa.filters.mel(sampling_rate, fourier_window_size, n_mels)
mel = np.dot(mel_basis, mag)
mel = 20 * np.log10(np.maximum(1e-5, mel))
mag = 20 * np.log10(np.maximum(1e-5, mag))
mel = np.clip((mel - ref_db + max_db) / max_db, 1e-8, 1)
mag = np.clip((mag - ref_db + max_db) / max_db, 1e-8, 1)
return mel.T.astype(np.float32), mag.T.astype(np.float32)
def load_file(path):
mel, mag = get_spectrogram(path)
t = mel.shape[0]
num_paddings = resampled - (t % resampled) if t % resampled != 0 else 0
mel = np.pad(mel, [[0, num_paddings], [0, 0]], mode = 'constant')
mag = np.pad(mag, [[0, num_paddings], [0, 0]], mode = 'constant')
return mel.reshape((-1, n_mels * resampled)), mag
if not os.path.exists('mel'):
os.mkdir('mel')
if not os.path.exists('mag'):
os.mkdir('mag')
tolong_sebut = [
'tolong-sebut/' + i for i in os.listdir('tolong-sebut') if '.wav' in i
]
sebut_perkataan_man = [
'sebut-perkataan-man/' + i
for i in os.listdir('sebut-perkataan-man')
if '.wav' in i
]
sebut_perkataan_woman = [
'sebut-perkataan-woman/' + i
for i in os.listdir('sebut-perkataan-woman')
if '.wav' in i
]
wavs = tolong_sebut + sebut_perkataan_man + sebut_perkataan_woman
for path in tqdm.tqdm(wavs):
try:
mel, mag = load_file(path)
root, ext = os.path.splitext(path)
root = root.replace('/', '-')
np.save('mel/%s.npy' % (root), mel)
np.save('mag/%s.npy' % (root), mag)
except Exception as e:
print(e)
pass
| [
"husein.zol05@gmail.com"
] | husein.zol05@gmail.com |
d12073849a04a0418ceebefbbd28dba15b9a9086 | bde6ed092b7b29703737e11c5a5ff90934af3d74 | /hackerrank/30-days-of-code/day22.py | 3692eb9c7775e83b659713b768a2fa6812c9da0b | [] | no_license | takecian/ProgrammingStudyLog | 2ab7ea601e0996b3fa502b81ec141bc3772442b6 | 94485d131c0cc9842f1f4799da2d861dbf09b12a | refs/heads/master | 2023-04-28T16:56:18.943574 | 2023-04-18T06:34:58 | 2023-04-18T06:34:58 | 128,525,713 | 4 | 0 | null | 2022-12-09T06:15:19 | 2018-04-07T12:21:29 | Python | UTF-8 | Python | false | false | 951 | py | class Node:
def __init__(self, data):
self.right = self.left = None
self.data = data
class Solution:
def insert(self, root, data):
if root == None:
return Node(data)
else:
if data <= root.data:
cur = self.insert(root.left, data)
root.left = cur
else:
cur = self.insert(root.right, data)
root.right = cur
return root
def getHeight(self, root):
# Write your code here
if root == None:
return 0
else:
if root.left == None and root.right == None:
return 0
else:
return 1 + max(self.getHeight(root.left), self.getHeight(root.right))
T = int(input())
myTree = Solution()
root = None
for i in range(T):
data = int(input())
root = myTree.insert(root, data)
height = myTree.getHeight(root)
print(height) | [
"takecian@gmail.com"
] | takecian@gmail.com |
57228292fe41800007162ca6be34000fa2208823 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2018_10_01/models/topology.py | 0838d2b70b2837816c9f5e03ae5813cc13404e92 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,821 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Topology(Model):
"""Topology of the specified resource group.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: GUID representing the operation id.
:vartype id: str
:ivar created_date_time: The datetime when the topology was initially
created for the resource group.
:vartype created_date_time: datetime
:ivar last_modified: The datetime when the topology was last modified.
:vartype last_modified: datetime
:param resources:
:type resources:
list[~azure.mgmt.network.v2018_10_01.models.TopologyResource]
"""
_validation = {
'id': {'readonly': True},
'created_date_time': {'readonly': True},
'last_modified': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'resources': {'key': 'resources', 'type': '[TopologyResource]'},
}
def __init__(self, **kwargs):
super(Topology, self).__init__(**kwargs)
self.id = None
self.created_date_time = None
self.last_modified = None
self.resources = kwargs.get('resources', None)
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
f864ade49aab086a7a31f2e135fad9a46cdb13ca | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2425/60631/272083.py | 5ac84ac36934d8e5049118c3115c584027eceaf7 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | t=int(input())
for ti in range(t):
si=input().split(' ')
n=int(si[0])
k=si[1]
s=input().split(' ')
for i in range(n):
if i+1==n:
break
lo=int(s[i])-int(k)
hi=int(s[i+1])-int(k)
if lo*hi <0:
if hi+lo>0:
print(s[i])
else:
print(s[i+1])
#print(n,k,s)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
798069de8b0f9e0ce67fbbced24721a901e7d47c | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5766201229705216_0/Python/bponsler/binary.py | b5e3209f540bab8969cf4715b64d40daee21ea46 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,355 | py | from sys import stdin
def addToTree(items, tree):
treeItem0 = tree.get(items[0], [])
treeItem1 = tree.get(items[1], [])
treeItem0.append(items[1])
treeItem1.append(items[0])
tree[items[0]] = treeItem0
tree[items[1]] = treeItem1
return tree
def countChildren(tree, node, visited=None):
visited = [] if visited is None else visited
visited.append(node)
children = tree[node]
# Ignore parents
children = filter(lambda c: c not in visited, children)
num = len(children)
for child in children:
c = countChildren(tree, child, visited)
num += c
return num
def numChildrenToRemove(tree, node, visited=None):
visited = [] if visited is None else visited
visited.append(node)
numChildren = 0
children = tree[node]
# Ignore visited nodes
children = filter(lambda c: c not in visited, children)
if len(children) == 0:
return 0 # No children have to be removed, valid leaf
elif len(children) == 1:
# Have to remove the child and its children
nc = 1 + countChildren(tree, children[0], visited)
numChildren += nc
else:
childCounts = [numChildrenToRemove(tree, c, visited) \
for c in children]
numChildren += sum(childCounts)
# Need to remove children until achieve 2 children
numChildren += len(childCounts) - 2
return numChildren
def handleTest(case, numNodes, lines):
tree = {}
for line in lines:
items = map(int, line.split(" "))
tree = addToTree(items, tree)
# Roots must have at least 2 nodes to start
possibleRoots = filter(lambda e: len(tree[e]) >= 2, tree)
minCount = None
for root in possibleRoots:
count = numChildrenToRemove(tree, root)
minCount = count if minCount is None else min(minCount, count)
if minCount is None:
minCount = len(tree)
print "Case #%d: %s" % (case, minCount)
if __name__ == '__main__':
data = stdin.read().strip()
lines = data.split("\n")
numTests = int(lines[0])
case = 1
index = 1
while index < len(lines):
numNodes = int(lines[index])
testLines = lines[index+1:index + numNodes]
handleTest(case, numNodes, testLines)
case += 1
index += numNodes
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
b60d9dcf9a73cb0bc79ae108a7050edd0b8d277c | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/glljos003/question2.py | 21e349f32092b4b61fac4704fcb4b74a41414501 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | choice_input = input("Enter the input filename:\n")
choice_output = input("Enter the output filename:\n")
input_file = open(choice_input, "r")
output_file = open(choice_output, "w")
width = eval(input("Enter the line width:\n"))
string = input_file.read()
x = string.splitlines(True)
string = "".join(x)
paragraphs = string.split("\n\n")
for i in range(len(paragraphs)):
paragraphs[i] = paragraphs[i].replace("\n", " ")
formatted_paragraphs = []
for para in paragraphs:
para = para.split(" ")
new_string = []
count = 0
for s in para: #s is each word in the current paragraph
if count + int(len(s)) <= width: #when the length of the new line is under the specified width, the string is just added to the list
new_string.append(s)
new_string.append(" ")
count+= int(len(s)+1)
else:
new_string.append("\n") #when the length of the new line exceeds the specified with, a newline character is added then string is appended
count = 0
new_string.append(s)
new_string.append(" ")
count+= int(len(s)+1)
formatted_paragraphs.append(new_string)
for i in formatted_paragraphs:
if i[-1] == " ":
i[-1] = ""
else:
continue
for para in formatted_paragraphs:
string = "".join(para)
string = string + "\n"
print(string, file=output_file)
input_file.close()
output_file.close()
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
5f19f916ce55cb6e965e8125fbe30a94008013c9 | 88ae8695987ada722184307301e221e1ba3cc2fa | /v8/tools/release/list_deprecated.py | 3549ecd427e785df1a537870920d2a6b5cb18bc2 | [
"BSD-3-Clause",
"SunPro",
"Apache-2.0"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 6,376 | py | #!/usr/bin/env python3
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
from datetime import datetime
import re
import subprocess
import sys
from pathlib import Path
import logging
from multiprocessing import Pool
RE_GITHASH = re.compile(r"^[0-9a-f]{40}")
RE_AUTHOR_TIME = re.compile(r"^author-time (\d+)$")
RE_FILENAME = re.compile(r"^filename (.+)$")
VERSION_CACHE = dict()
RE_VERSION_MAJOR = re.compile(r".*V8_MAJOR_VERSION ([0-9]+)")
RE_VERSION_MINOR = re.compile(r".*V8_MINOR_VERSION ([0-9]+)")
RE_MACRO_END = re.compile(r"\);")
RE_DEPRECATE_MACRO = re.compile(r"\(.*?,(.*)\);", re.MULTILINE)
class HeaderFile(object):
def __init__(self, path):
self.path = path
self.blame_list = self.get_blame_list()
@classmethod
def get_api_header_files(cls, options):
files = subprocess.check_output(
['git', 'ls-tree', '--name-only', '-r', 'HEAD', options.include_dir],
encoding='UTF-8')
files = map(Path, filter(lambda l: l.endswith('.h'), files.splitlines()))
with Pool(processes=24) as pool:
return pool.map(cls, files)
def extract_version(self, hash):
if hash in VERSION_CACHE:
return VERSION_CACHE[hash]
if hash == '0000000000000000000000000000000000000000':
return 'HEAD'
result = subprocess.check_output(
['git', 'show', f"{hash}:include/v8-version.h"], encoding='UTF-8')
major = RE_VERSION_MAJOR.search(result).group(1)
minor = RE_VERSION_MINOR.search(result).group(1)
version = f"{major}.{minor}"
VERSION_CACHE[hash] = version
return version
def get_blame_list(self):
logging.info(f"blame list for {self.path}")
result = subprocess.check_output(
['git', 'blame', '-t', '--line-porcelain', self.path],
encoding='UTF-8')
line_iter = iter(result.splitlines())
blame_list = list()
current_blame = None
while True:
line = next(line_iter, None)
if line is None:
break
if RE_GITHASH.match(line):
if current_blame is not None:
blame_list.append(current_blame)
hash = line.split(" ")[0]
current_blame = {
'datetime': 0,
'filename': None,
'content': None,
'hash': hash
}
continue
match = RE_AUTHOR_TIME.match(line)
if match:
current_blame['datetime'] = datetime.fromtimestamp(
int(match.groups()[0]))
continue
match = RE_FILENAME.match(line)
if match:
current_blame['filename'] = match.groups()[0]
current_blame['content'] = next(line_iter).strip()
continue
blame_list.append(current_blame)
return blame_list
def filter_and_print(self, macro, options):
before = options.before
index = 0
re_macro = re.compile(macro)
deprecated = list()
while index < len(self.blame_list):
blame = self.blame_list[index]
line = blame['content']
if line.startswith("#") or line.startswith("//"):
index += 1
continue
commit_datetime = blame['datetime']
if commit_datetime >= before:
index += 1
continue
commit_hash = blame['hash']
match = re_macro.search(line)
if match:
pos = match.end()
start = -1
parens = 0
while True:
if pos >= len(line):
# Extend to next line
index = index + 1
blame = self.blame_list[index]
line = line + blame['content']
if line[pos] == '(':
parens = parens + 1
elif line[pos] == ')':
parens = parens - 1
if parens == 0:
# Exclude closing ")
pos = pos - 1
break
elif line[pos] == '"' and start == -1:
start = pos + 1
pos = pos + 1
# Extract content and replace double quotes from merged lines
content = line[start:pos].strip().replace('""', '')
deprecated.append((index + 1, commit_datetime, commit_hash, content))
index = index + 1
for linenumber, commit_datetime, commit_hash, content in deprecated:
self.print_details(linenumber, commit_datetime, commit_hash, content)
def print_details(self, linenumber, commit_datetime, commit_hash, content):
commit_date = commit_datetime.date()
file_position = (f"{self.path}:{linenumber}").ljust(40)
v8_version = f"v{self.extract_version(commit_hash)}".rjust(5)
print(f"{file_position} {v8_version} {commit_date} {commit_hash[:8]}"
f" {content}")
def print_v8_version(self, options):
commit_hash, commit_datetime = subprocess.check_output(
['git', 'log', '-1', '--format=%H%n%ct', self.path],
encoding='UTF-8').splitlines()
commit_datetime = datetime.fromtimestamp(int(commit_datetime))
self.print_details(11, commit_datetime, commit_hash, content="")
def parse_options(args):
parser = argparse.ArgumentParser(
description="Collect deprecation statistics")
parser.add_argument("include_dir", nargs='?', help="Path to includes dir")
parser.add_argument("--before", help="Filter by date")
parser.add_argument("--verbose",
"-v",
help="Verbose logging",
action="store_true")
options = parser.parse_args(args)
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
if options.before:
options.before = datetime.strptime(options.before, '%Y-%m-%d')
else:
options.before = datetime.now()
if options.include_dir is None:
base_path = Path(__file__).parent.parent
options.include_dir = str((base_path / 'include').relative_to(base_path))
return options
def main(args):
options = parse_options(args)
print("# CURRENT V8 VERSION:")
version = HeaderFile(Path(options.include_dir) / 'v8-version.h')
version.print_v8_version(options)
header_files = HeaderFile.get_api_header_files(options)
print("\n")
print("# V8_DEPRECATE_SOON:")
for header in header_files:
header.filter_and_print("V8_DEPRECATE_SOON", options)
print("\n")
print("# V8_DEPRECATED:")
for header in header_files:
header.filter_and_print("V8_DEPRECATED", options)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"jengelh@inai.de"
] | jengelh@inai.de |
35b61c03110c01137c01c1f734774cc7bd7e4811 | 2de1934821e11edaf8c4cbf4993f5138a17b20f2 | /tasks/migrations/0007_remove_project_dedline.py | 8a4e6215bab5d342f332ea44dcdd89d9285d449d | [] | no_license | jonqwerty/taskmanager | e39c0cc5b27619bd21e6d064dda6d779337cf9e0 | 04a8a2672ae50f726bab3f7b9e794e544b9c2bd2 | refs/heads/main | 2023-01-03T09:54:48.832504 | 2020-10-28T19:07:49 | 2020-10-28T19:07:49 | 301,083,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | # Generated by Django 3.0.2 on 2020-10-18 09:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tasks', '0006_project_dedline'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='dedline',
),
]
| [
"you@example.com"
] | you@example.com |
e3d450aa45d3d9aff94356466a82ee5821f57f30 | 4ac9cf4c921e71ad4a5308b6de4900051fc6e162 | /MAIN/tasks/Others.py | e8a4911ca01a52171db6036d33b84b96eeba58a2 | [] | no_license | heyuantao/ACMTOOLS | 0928cb889222746dc20e677728c8b6816e28b2a0 | cd0c7dee272dc6b14c496cf02bfbbce863acfd59 | refs/heads/master | 2022-12-09T22:43:39.326033 | 2020-05-10T12:46:04 | 2020-05-10T12:46:04 | 172,885,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from celery.task import Task
import time
class HelpTask(Task):
name = 'help_task'
def run(self,*args,**kwargs):
print('start course task')
time.sleep(10)
print(kwargs)
#runTask(1000)
print('end') | [
"he_yuan_tao@163.com"
] | he_yuan_tao@163.com |
9698b7d231d1791a36e5749d1078d6cc01552709 | ad1a89d4b3e850b114df494c7d06f312105cd7c8 | /settings/dev_files.py | 7b05f2856fcbb2851bbba8018ea462d49c078f5f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | brenns10/fluffy | 22436bc958ce6c4f2bf06038e426dc74b82a0ee9 | a63f81bde64901416e6c575c4c8db2fbce6c346d | refs/heads/master | 2021-08-11T18:56:31.179119 | 2017-11-14T02:47:00 | 2017-11-14T02:47:00 | 110,591,621 | 0 | 0 | null | 2017-11-13T19:25:15 | 2017-11-13T19:25:14 | null | UTF-8 | Python | false | false | 724 | py | # fluffy-specific configuration options
# storage backend (how are the files stored after being uploaded?)
STORAGE_BACKEND = {
'name': 'file',
'object_path': 'tmp/object/{name}',
'html_path': 'tmp/html/{name}',
}
# branding
BRANDING = 'fluffy'
CUSTOM_FOOTER_HTML = None
# URL patterns
HOME_URL = 'http://localhost:5000/'
FILE_URL = 'http://localhost:5001/object/{name}'
HTML_URL = 'http://localhost:5001/html/{name}'
STATIC_ASSETS_URL = 'http://localhost:5000/{name}'
# abuse contact email address
ABUSE_CONTACT = 'abuse@example.com'
# max upload size per file (in bytes)
MAX_UPLOAD_SIZE = 10 * 1048576 # 10 MB
# max size Flask will accept; maybe a little larger?
MAX_CONTENT_LENGTH = MAX_UPLOAD_SIZE * 2
| [
"ckuehl@ocf.berkeley.edu"
] | ckuehl@ocf.berkeley.edu |
8ab10d663bdfbf0e0308ded035ba9e5f154f2c15 | add0bb7a309ea346614d7f560a24e653d3d0ff67 | /test/人机交互/数据交互.py | 03219ac54f95ceed913795f764cdd147bd7f0d56 | [] | no_license | 1572903465/PythonProjects | 935aff08d5b3d3f146393764a856369061513d36 | 73576080174f72ea1df9b36d201cf3949419041b | refs/heads/master | 2023-06-10T15:50:49.178112 | 2021-07-05T15:42:53 | 2021-07-05T15:42:53 | 301,328,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,918 | py | from flask import Flask, render_template, g, request, url_for, session, redirect
from dataclasses import dataclass
app = Flask(__name__, static_url_path="/")
app.config['SECRET_KEY']="sdfklas0lk42j"
@dataclass
class User:
id: int
username:str
password:str
users = [
User(1,"Admin","123456"),
User(2,"Eason","888888"),
User(3,"Tommy","666666")
]
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
user = [u for u in users if u.id == session['user_id']]
g.user=user[0]
print(g)
# @app.route('/')
# def begin():
# return render_template("login.html")
@app.route('/login',methods=['GET','POST'])
def login():
if request.method =='POST':
session.pop('user_id',None)
username = request.form.get("username",None)
password = request.form.get("password",None)
user = [u for u in users if u.username==username]
if len(user) > 0:
user = user[0]
if user and user.password == password:
session['user_id'] = user.id
# print(url_for('profile'))
# return redirect(url_for('profile'),)
user = {
'username':username,
'uid':user.id
}
return render_template("profile.html",userinfo=user)
return render_template("login.html")
@app.route("/profile")
def profile():
if not g.user:
return redirect(url_for('login'))
return render_template('profile.html')
@app.route("/modifyTeim",methods=["GET","POST"])
def modify_item():
print("111111111111111")
print(request.args)
request.args.get("name","")
return {"success":0};
if __name__ == '__main__':
# 运行本项目,host=0.0.0.0可以让其他电脑也能访问到该网站,port指定访问的端口。默认的host是127.0.0.1,port为5000
app.run(host='127.0.0.1',port=9000) | [
"1572903465@qq.com"
] | 1572903465@qq.com |
a6b7b62c9f1ac79f10ddf614af9f7bf20439ed33 | 0d2c2ffe431b159a87bcd78c97147422dce8d778 | /GUI学习/01PyQt5快速开发与实战/ch06信号与槽/05事件处理机制01.py | 43465b6595902f3927dcab02d7536babf580a255 | [] | no_license | YuanXianguo/Python-Project-ITC | 9e297fc1e1e8ec2b136e6e8b1db0afaaba81c16c | afd14cbe501147ec66b4aa0c1c7907b3ae41d148 | refs/heads/master | 2020-04-16T13:54:33.727825 | 2019-12-20T02:16:52 | 2019-12-20T02:16:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,225 | py | import sys
from PyQt5.QtCore import QEvent, QTimer, Qt
from PyQt5.QtWidgets import QApplication, QMenu, QWidget
from PyQt5.QtGui import QPainter
class Widget(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle('Event Demo')
self.setGeometry(300, 300, 300, 200)
self.just_double_clicked = False
self.key = ''
self.text = ''
self.message = ''
QTimer.singleShot(3000, self.give_help)
def give_help(self):
self.text = '请点击这触发追踪鼠标功能'
self.update() # 重绘事件,也就是触发paintEvent函数
def closeEvent(self, event):
"""重新实现关闭事件"""
print('Closed')
def contextMenuEvent(self, event):
"""重新实现上下文菜单事件,表示的是右键所显示的菜单事件"""
menu = QMenu()
one_action = menu.addAction('&One')
one_action.triggered.connect(self.one)
two_action = menu.addAction('&Two')
two_action.triggered.connect(self.two)
if not self.message:
menu.addSeparator()
three_action = menu.addAction('&Three')
three_action.triggered.connect(self.three)
menu.exec_(event.globalPos())
"""上下文菜单函数"""
def one(self):
self.message = 'Menu option One'
self.update()
def two(self):
self.message = 'Menu option Two'
self.update()
def three(self):
self.message = 'Menu option Three'
self.update()
def paintEvent(self, event):
"""重新实现绘制事件"""
text = self.text
i = text.find('\n\n')
if i >= 0:
text = text[0:i]
if self.key: # 若触发了键盘按键,则在信息文本中记录这个按键信息
text += '\n\n你按下了:{0}'.format(self.key)
painter = QPainter(self)
painter.setRenderHint(QPainter.TextAntialiasing)
painter.drawText(self.rect(), Qt.AlignCenter, text) # 绘制信息文本的内容
if self.message: # 若信息文本存在,则在底部居中绘制信息,5秒后清空信息文本并重绘
painter.drawText(self.rect(), Qt.AlignBottom|Qt.AlignCenter, self.message)
QTimer.singleShot(5000, self.clear_message)
QTimer.singleShot(5000, self.update)
def clear_message(self):
"""清空信息文本的槽函数"""
self.message = ''
def resizeEvent(self, event):
self.text = '调整窗口的大小为:QSize({},{})'.format(event.size().width(), event.size().height())
self.update()
def mouseReleaseEvent(self, event):
"""重新实现鼠标释放事件"""
# 若为双击释放,则不跟踪鼠标移动
# 若为单击释放,则需要改变跟踪功能的状态,如果开启跟踪功能就跟踪,否则就不跟踪
if self.just_double_clicked:
self.just_double_clicked = False
else:
self.setMouseTracking(not self.hasMouseTracking()) # 单击鼠标
if self.hasMouseTracking():
self.text = '开启鼠标跟踪功能.\n' + '请移动一下鼠标!\n' + \
'单击鼠标可以关闭这个功能'
else:
self.text = '关闭鼠标跟踪功能.\n' + '单击鼠标可以开启这个功能'
self.update()
def mouseMoveEvent(self, event):
"""重新实现鼠标移动事件"""
if not self.just_double_clicked:
globalPos = self.mapToGlobal(event.pos()) # 将窗口坐标转换为屏幕坐标
self.text = """鼠标位置:
窗口坐标为:QPoint({}, {})
屏幕坐标为:QPoint({}, {})""".format(event.pos().x(), event.pos().y(),
globalPos.x(), globalPos.y())
self.update()
def mouseDoubleClickEvent(self, event):
"""重新实现鼠标双击事件"""
self.just_double_clicked = True
self.text = '你双击了鼠标'
self.update()
def keyPressEvent(self, event):
"""重新实现键盘按下事件"""
self.key = ''
if event.key() == Qt.Key_Home:
self.key = 'Home'
elif event.key() == Qt.Key_End:
self.key = 'End'
elif event.key() == Qt.Key_PageUp:
if event.modifiers() & Qt.ControlModifier:
self.key = 'Ctrl+PageUp'
else:
self.key = 'PageUp'
elif event.key() == Qt.Key_PageDown:
if event.modifiers() & Qt.ControlModifier:
self.key = 'Ctrl+PageDown'
else:
self.key = 'PageDown'
elif Qt.Key_A <= event.key() <= Qt.Key_Z:
if event.modifiers() & Qt.ShiftModifier:
self.key = 'Shift+'
else:
self.key += event.text()
if self.key:
self.key = self.key
self.update()
else:
QWidget.keyPressEvent(self, event)
if __name__ == '__main__':
app = QApplication(sys.argv)
my_show = Widget()
my_show.show()
sys.exit(app.exec_())
| [
"736913978@qq.com"
] | 736913978@qq.com |
b4818ae7a9622cf19a01a92d9c769226d31d19a8 | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/Test_001/test_link_20201125110915.py | a02ef9ae464982e8e63a84a477479ab6fcfb08ce | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py |
import allure
@allure.link("https://www.baidu.com",name="链接地址")
def test_link_a():
print("测试连接的测试用例")
testcase="https://www.baidu.com"
@allure.testcase
def test_testcase():
print("这个是测试用例地址") | [
"zhangyingxbba@gmail.com"
] | zhangyingxbba@gmail.com |
65ed6f471c70db25d6ea06054f3ba7f8adeaa18b | fa798e1779af170ee31bfd710a6faca9904a99ef | /7day/7. ex1.py | c141f66dd49c7579bdaf015710ae54329f342ae1 | [] | no_license | itwebMJ/pythonStudy | 1c573f98b78ce8c9273ae17a44d59a5a26c61b2c | 8ea3112c9c587b6aeb8a5fa6ef715053286fbaae | refs/heads/master | 2023-06-28T05:37:29.239010 | 2021-08-06T08:01:54 | 2021-08-06T08:01:54 | 375,879,186 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,657 | py | '''
프로그램 시작되면 memo 디렉토리 없으면 새로 생성.(맨처음만)
메모장
1. 쓰기
파일명 => 중복 : 덮어쓰기 : 기존 내용 지우고 작성
이어쓰기 : 기존 내용 살려서 뒤에 이어씀. =>키보드로 내용입력('/exit':입력 멈춤) 파일에 씀 =>파일 닫고 종료
새이름 : 새 파일 생성
2. 읽기
memo 디렉토리의 파일 목록 출력 => 파일 선택 => 그 파일을 읽기 모드로 오픈해서 파일 내용 읽어와서 출력
3. 삭제
memo 디렉토리의 파일 목록 출력 => 삭제할 파일 선택 => 선택한 파일 삭제
4. 종료
'''
import os
def init(path): #초기화 함수.
if not os.path.isdir(path): #지정한 디렉토리 있나 확인하여 없으면
os.mkdir(path) #새로 생성
def selectFile(path): #파일 선택 함수.
flist = os.listdir(path) #메모 디렉토리의 파일 목록 읽어옴[a.txt, b.txt, c.txt]
if len(flist)==0:
print('파일이 없다')
return
print('메모 파일 목록')
for i in range(0, len(flist)):#파일 목록 출력
print(i, '. '+flist[i]) #인덱스. 파일명
while True:
idx = int(input('선택할 파일의 번호를 입력하시오'))
if 0 <= idx <= len(flist)-1:
break
return flist[idx]
def readFile(path):
fname = selectFile(path)
if fname == None:
return
print('선택한 파일명:', fname)
f = open(path+fname, 'r', encoding='utf-8')
content = f.read()
f.close()
print('===', fname, '내용===')
print(content)
def nameCheck(path, fname): #파일 이름 중복 체크 및 중복됐을때 모드 선택. mode, 새파일명(중복됐을때 새로 입력받은 파일명)
flist = os.listdir(path) #메모 디렉토리 목록
mode = 'w' #디폴트 모드 w로 설정
if len(flist)==0:
return mode, ''
newName = ''
if fname in flist: #in: 리스트에 in 앞의 값이 있으면 True, 없으면 False. fname이 중복된 이름
newName = flist[0]
x = int(input('1.덮어쓰기 2.이어쓰기 3.새파일명입력'))
if x==1:
mode = 'w'
newName = ''
elif x==2:
mode = 'a'
newName = ''
elif x==3:
while newName in flist: #중복되지 않는 이름을 입력할때까지 계속 입력받음
newName = input('새파일명:')
else:
print('중복처리 중 잘못된 메뉴로 종료')
return
return mode, newName
#반환값이 None이면 잘못된 입력 쓰기 중단
#newName=='': mode값만 이용해서 쓰기작업
#newName!='': 새파일명 입력했음
def writeFile(path):
fname = input('파일명 입력')
res = nameCheck(path, fname)
mode = ''
if res == None:
print('파일 중복처리 선택을 잘못해서 여기서 끝냄')
return
elif res[1]=='':
mode = res[0]
elif res[1]!='':
mode = res[0]
fname = res[1]
else:
return
f = open(path+fname, mode, encoding='utf-8')
while True:
msg = input('내용입력(멈추려면 /exit):')
if msg=='/exit':
break
else:
f.write(msg+'\n')
f.close()
def main():
memo_path = 'memo/'
init(memo_path)
while True:
menu = input('1.읽기 2.쓰기 3.삭제 4.종료')
if menu=='1':
readFile(memo_path)
elif menu=='2':
writeFile(memo_path)
elif menu=='4':
break
main() | [
"rlaalwn61@naver.com"
] | rlaalwn61@naver.com |
fdd8a87f35d8ed1df1de0ea2daeaafd48ffa105a | 7a53f6c98c9a15772632dd1346a5507f01cf462c | /brick_server/__init__.py | f03901f54b3bffbb5bff2d157c921b74e481ac92 | [
"MIT"
] | permissive | jbkoh/brick-server | 59c1642665b908b74f344a7a1cacdae66c7caf59 | 945196e4915a7ae65cf60344eab146ee4926d9dd | refs/heads/master | 2020-04-14T21:48:36.888356 | 2019-03-29T23:30:00 | 2019-03-29T23:30:00 | 164,140,827 | 0 | 3 | MIT | 2019-03-26T19:39:37 | 2019-01-04T18:19:52 | Python | UTF-8 | Python | false | false | 2,301 | py | import pdb
import json
from flask import Flask
from flask_injector import FlaskInjector
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from apispec_webframeworks.flask import FlaskPlugin
from .apis import blueprint, entity_api
configs = json.load(open('configs/configs.json'))
API_V1_PREFIX = '/api/v1'
def configure_binding(binder):
from brick_data.timeseries import BrickTimeseries
from brick_data.sparql import BrickSparql
from brick_server.extensions.lockmanager import LockManager
brick_ts_configs = configs['timeseries']
brick_ts = BrickTimeseries(brick_ts_configs['dbname'],
brick_ts_configs['user'],
brick_ts_configs['password'],
brick_ts_configs['host'],
brick_ts_configs['port'],
)
lockmanager_configs = configs['lockmanager']
lock_manager = LockManager(lockmanager_configs['host'],
lockmanager_configs['port'],
lockmanager_configs['dbname'],
lockmanager_configs['user'],
lockmanager_configs['password'],
)
brick_configs = configs['brick']
if configs['server']['use_hostname_as_ns']:
base_ns = 'http://{hostname}{api_prefix}{entity_api_prefix}/'.format(
hostname = configs['server']['hostname'],
api_prefix = API_V1_PREFIX,
entity_api_prefix = entity_api.path
)
else:
base_ns = brick_configs['base_ns']
brick_sparql = BrickSparql(brick_configs['host'],
brick_configs['brick_version'],
#base_ns=brick_configs['base_ns'],
base_ns=base_ns,
load_schema=True,
)
binder.bind(BrickTimeseries, to=brick_ts)
binder.bind(BrickSparql, to=brick_sparql)
binder.bind(LockManager, to=lock_manager)
def create_app(**kwargs):
app = Flask(__name__)
app.register_blueprint(blueprint, url_prefix=API_V1_PREFIX)
FlaskInjector(app=app, modules=[configure_binding])
return app
| [
"bk7749@gmail.com"
] | bk7749@gmail.com |
3a1f2199bc64a36f4049e02a0ae900a3fecdef66 | bae75bf1de75fb1b76e19b0d32c778e566de570a | /smodels-database/13TeV/ATLAS/ATLAS-SUSY-2016-26/orig/InputFile_HepData_Reader.py | b11122a202cb33cc8d5dcf13d5a87699d16bbb08 | [] | no_license | andlessa/RDM | 78ae5cbadda1875c24e1bb726096b05c61627249 | ac6b242871894fee492e089d378806c2c2e7aad8 | refs/heads/master | 2023-08-16T00:47:14.415434 | 2021-09-21T20:54:25 | 2021-09-21T20:54:25 | 228,639,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,611 | py | # This script reads files given as 'input' format in the HEP data website.
# e.g. see ATLAS susy analyses.
"""
This function reads the X,Y and values from the input file.
It returns the three corresponding lists of objects.
First it creates a general list, for which the entries are the lines that are read, in form of a list:
i.e. ListOfLines = [ [x1,y1,z1] , [x2,y2,z2] , ... , [xn,yn,zn] ]
Then it extracts the entries number 0,1 and 2 for each list, and fills the vectors XArrays, YArrays ... ZArrays, that are then returned.
Note that you have to adapt the numbers of verctor that are returned and read according to the numbers of columns present in the file.
num_col is the total number of entrie you need (x,y,z for efficiencies - x,y for exlusion lines)
column is the colomun that you need
"""
def Reading_Values(input,num_col,column):
print 'Reading the values from the input file: ',input,' .The column containing the chosen values is number: ',column , ' . \n'
ListOfLines = []
inputFile = open(input,'r')
for line in inputFile:
if line[0] != '#' and line[0] != '*' and line[0] != '\n':
# print line
lineElements = line.split(';')
# print lineElements;
elementsList = []
for element in lineElements:
if element and element != '\n':
#fElement=float(element)
element = element.replace(' ','')
elementsList.append(element)
ListOfLines.append(elementsList)
inputFile.close()
XArray = []
YArray = []
ZArray = []
# print ListOfLines
# If saves in the third list the values contained in the column number you specified in the parameter 'column'.
if(num_col ==3):
for list in ListOfLines:
XArray.append(list[0])
YArray.append(list[1])
ZArray.append(list[column-1])
return XArray, YArray, ZArray
elif(num_col ==2):
if(column == num_col):
for list in ListOfLines:
XArray.append(list[0])
YArray.append(list[1])
return XArray, YArray
"""
This function produces the efficiency maps: it multiplies the values for acceptance and efficiency and creates the .txt files for each region
The input parameters are the two name of the Acc and Eff files;
topo and SR are used to create the name of the output files.
BE CAREFUL if you want to divide or not by 10.000 ( i.e. if the values given are in percentage or absolute ): you can state this option
by inputting a normalization value in Norm
"""
def Map_Multiplier(topo, SR, accFile, effFile,num_col,column, Norm):
X1,Y1,Acc = Reading_Values(accFile,num_col,column)
X2,Y2,Eff = Reading_Values(effFile,num_col,column)
outputMap = open('EffMap_'+topo+"_"+SR+".txt",'w')
outputMap.write('# MassX , MassY , Eff*Acc '+'\n')
for x1,y1,acc in zip(X1,Y1,Acc):
for x2,y2,eff in zip(X2,Y2,Eff):
if x1==x2 and y1==y2:
# print x1 + ' ' + x2 + ' ' + y1 + ' ' + y2 + ' \n' # just to check if the selected values from the two files matches
outputMap.write(x1 + ' ' + y1 + ' ' + str(float(acc)*float(eff)/Norm) + '\n')
print "Map ",'EffMap_'+topo+"_"+SR+".txt", ' written!'
"""
This function simply rewrite in a file .dat that you want to plot, in a SModelS friendly format. It takes the values of the arrays from the Reading_Values function.
Give as parameters the two array you want to plot, and the name of the output file.
With 'type of data' you specify what kind of values are you extracting.
"""
def Simple_Map_Producer(X,Y,Z,type_of_data,outputName):
output = open(outputName+'.dat','w')
output.write('# MassX , MassY ' + type_of_data+'\n')
for x,y,z in zip(X,Y,Z):
output.write(x+' '+y+' '+z +'\n')
def Simple_Exclusion_Producer(X,Y,type_of_data,outputName):
output = open(outputName+'.dat','w')
output.write('# MassX , MassY ' + type_of_data+'\n')
for x,y in zip(X,Y):
output.write(x+' '+y+'\n')
for SR in ['SR1','SR2','SR3','SR4','SR5']:
ACC = 'T2cc_ACC_'+SR+".csv"
EFF = 'T2cc_EFF_'+SR+".csv"
Map_Multiplier('T2cc',SR,ACC,EFF,3,3,10000)
X,Y = Reading_Values("T2cc_Obs_Excl.csv",2,2) #Obs_Line.dat
Simple_Exclusion_Producer(X,Y,"Obs_Excl","T2cc_Obs_Excl.dat")
X,Y = Reading_Values("T2cc_Exp_Excl.csv",2,2) #Exp_Line.dat
Simple_Exclusion_Producer(X,Y,"Exp_Excl","T2cc_Exp_Excl.dat")
X,Y,Z = Reading_Values("T2cc_Obs_UL.csv",3,3)
Simple_Map_Producer(X,Y,Z,"Obs_UL","T2cc_Obs_UL.dat")
| [
"wolfgang.waltenberger@gmail.com"
] | wolfgang.waltenberger@gmail.com |
f89ffcd21d944932d0bc3df067c549070844ae55 | 2f9c2bb2c8d32368f90ef798c08848cec4ea2ebd | /jina/types/message/common.py | b3b3245b299fc6ce48d10701153c5fd2fd5037a6 | [
"Apache-2.0"
] | permissive | automation555/jina | 9e0aafd9d894bd5995f091ea0f8566a9ed0f781d | 337526c00265190fc45235b80df10c0a75b51c09 | refs/heads/master | 2023-06-03T04:33:18.460871 | 2021-06-17T08:51:21 | 2021-06-17T08:51:21 | 377,765,051 | 0 | 0 | Apache-2.0 | 2021-06-17T08:55:30 | 2021-06-17T08:50:48 | Python | UTF-8 | Python | false | false | 1,434 | py | from . import Message
from ..request import Request
from ...proto import jina_pb2
_available_commands = dict(
jina_pb2.RequestProto.ControlRequestProto.DESCRIPTOR.enum_values_by_name
)
__all__ = ['ControlMessage']
class ControlMessage(Message):
"""
Class of the protobuf message.
:param command: Command with string content. (e.g. 'IDLE', 'CANCEL', 'TERMINATE', 'STATUS')
:param pod_name: Name of the current pod, to represent routes only.
:param identity: The identity of the current pod
:param args: Additional positional arguments which are just used for the parent initialization
:param kwargs: Additional keyword arguments which are just used for the parent initialization
"""
def __init__(
self, command: str, pod_name: str = '', identity: str = '', *args, **kwargs
):
req = Request(jina_pb2.RequestProto())
if command in _available_commands:
req.control.command = getattr(
jina_pb2.RequestProto.ControlRequestProto, command
)
else:
raise ValueError(
f'command "{command}" is not supported, must be one of {_available_commands}'
)
super().__init__(
None, req, pod_name=pod_name, identity=identity, *args, **kwargs
)
req.request_type = 'control'
args = kwargs.get('args', None)
if args:
req.args = args
| [
"rajashree.patil@embold.io"
] | rajashree.patil@embold.io |
a5f75c4b6cd99db91c0f65af43367b7e6670c70b | 2315c570965da85ddb276840ee158319b2fb9df4 | /tests/suggestions/test_suggest_event_webcast_controller.py | e5926bcfccf85118c0e2706f07a1dcd2e02f1fa6 | [
"MIT"
] | permissive | enterstudio/the-blue-alliance | c1779676f809471d39486d077c834c7e78520467 | b53f752fe1f059b4b6f91c841e1865a6c6b81268 | refs/heads/master | 2022-11-26T06:50:11.159102 | 2017-02-03T16:53:26 | 2017-02-03T16:53:26 | 80,987,951 | 0 | 0 | MIT | 2022-11-19T06:05:18 | 2017-02-05T11:19:22 | HTML | UTF-8 | Python | false | false | 4,945 | py | from datetime import datetime
import unittest2
import webapp2
import webtest
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from webapp2_extras.routes import RedirectRoute
from consts.district_type import DistrictType
from consts.event_type import EventType
from controllers.suggestions.suggest_event_webcast_controller import SuggestEventWebcastController
from models.account import Account
from models.event import Event
from models.suggestion import Suggestion
class TestSuggestEventWebcastController(unittest2.TestCase):
def loginUser(self):
self.testbed.setup_env(
user_email="user@example.com",
user_id="123",
user_is_admin='0',
overwrite=True)
Account.get_or_insert(
"123",
email="user@example.com",
registered=True)
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_user_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
app = webapp2.WSGIApplication([
RedirectRoute(r'/suggest/event/webcast', SuggestEventWebcastController, 'suggest-webcast', strict_slash=True),
], debug=True)
self.testapp = webtest.TestApp(app)
self.event = Event(
id="2016necmp",
name="New England District Championship",
event_type_enum=EventType.DISTRICT_CMP,
event_district_enum=DistrictType.NEW_ENGLAND,
short_name="New England",
event_short="necmp",
year=2016,
end_date=datetime(2016, 03, 27),
official=False,
city='Hartford',
state_prov='CT',
country='USA',
venue="Some Venue",
venue_address="Some Venue, Hartford, CT, USA",
timezone_id="America/New_York",
start_date=datetime(2016, 03, 24),
webcast_json="",
website="http://www.firstsv.org",
)
self.event.put()
def tearDown(self):
self.testbed.deactivate()
def getSuggestionForm(self, event_key):
response = self.testapp.get('/suggest/event/webcast?event_key={}'.format(event_key))
self.assertEqual(response.status_int, 200)
form = response.forms.get('suggest_webcast', None)
self.assertIsNotNone(form)
return form
def testLoginRedirect(self):
response = self.testapp.get('/suggest/event/webcast?event_key=2016necmp', status='3*')
response = response.follow(expect_errors=True)
self.assertTrue(response.request.path.startswith("/account/login_required"))
def testNoParams(self):
self.loginUser()
response = self.testapp.get('/suggest/event/webcast', status='3*')
response = response.follow(expect_errors=True)
self.assertEqual(response.request.path, '/')
def testSubmitEmptyForm(self):
self.loginUser()
form = self.getSuggestionForm('2016necmp')
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
request = response.request
self.assertEqual(request.GET.get('status'), 'blank_webcast')
def testSubmitBadUrl(self):
self.loginUser()
form = self.getSuggestionForm('2016necmp')
form['webcast_url'] = 'The Blue Alliance'
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
request = response.request
self.assertEqual(request.GET.get('status'), 'invalid_url')
def testSubmitTBAUrl(self):
self.loginUser()
form = self.getSuggestionForm('2016necmp')
form['webcast_url'] = 'http://thebluealliance.com'
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
request = response.request
self.assertEqual(request.GET.get('status'), 'invalid_url')
def testSubmitWebcast(self):
self.loginUser()
form = self.getSuggestionForm('2016necmp')
form['webcast_url'] = 'https://twitch.tv/frcgamesense'
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
request = response.request
self.assertEqual(request.GET.get('status'), 'success')
# Make sure the Suggestion gets created
suggestion = Suggestion.query().fetch()[0]
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
self.assertEqual(suggestion.target_key, '2016necmp')
self.assertEqual(suggestion.contents['webcast_url'], 'https://twitch.tv/frcgamesense')
self.assertIsNotNone(suggestion.contents.get('webcast_dict'))
| [
"noreply@github.com"
] | enterstudio.noreply@github.com |
4cf4469143aebe731974ed947d501afecb9cceab | 5636cb0c282d03e91a830d30cec3bd54c225bd3b | /TP_SPE_Supplementaires/Mines_Ponts_2015/programmes/TD02_piles_Patricia.py | 23095a301d660d6e8b3c515c2a69d854cfbce056 | [] | no_license | xpessoles/Informatique | 24d4d05e871f0ac66b112eee6c51cfa6c78aea05 | 3cb4183647dc21e3acbcbe0231553a00e41e4e55 | refs/heads/master | 2023-08-30T21:10:56.788526 | 2021-01-26T20:57:51 | 2021-01-26T20:57:51 | 375,464,331 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,542 | py | ### TD02 - piles
# question 1 : fonction est_vide
def est_vide(pile):
return len(pile)==0
# >>> est_vide((1,2,3))
# False
# >>> est_vide(())
# True
# question 2 : fonction est pleine
def est_pleine(pile,nb):
return len(pile)==nb
# >>> est_pleine((1,2,3),3)
# True
# >>> est_pleine((1,2,3),6)
# False
# question 3 : ajouter un element (un peu tiré par les cheveux)
def push(pile,el):
pile=pile+(el,0)#je ne peux pas concatener un tuple avec un seul élément, minimum 2
pile=pile[:-1]
return(pile)
# >>> push((1,2,3),(94))
# (1, 2, 3, 94)
def pop(pile):
dernier=pile[-1]
pile=pile[:-1] #je n'arrive pas à changer le tuple qui est non mutable
return dernier,pile
# >>> pop((1,2,3,4,5))
# 5
# >>> pop((1,2,3,4,5))
# (5, (1, 2, 3, 4))
### Exercice 2 : notation polonaise inversee
# est-ce un element au hasard ?
# la pile est un tuple de strings
def est_nombre(pile,i):
return pile[i] not in ['+','-','*','/']
# >>> est_nombre(('+','1','3','*'),1)
# True
def est_operation(pile,i):
return pile[i] in ['+','-','*','/']
# >>> est_operation(('+','1','3','*'),0)
# True
# >>> est_operation(('+','1','3','*'),1)
# False
def evaluer(exp):
''' l'expression exp doit être postfixée '''
pile=()
for element in exp:
pile=push(pile,element)
# return pile resultat OK
res=()
for elt in pile:
if elt == '+':
b = float(pop(res)[0])
res=pop(res)[1]
a=float(pop(res)[0])
res=pop(res)[1]
res=push(res,(a+b))
elif elt == '*':
b = float(pop(res)[0])
res=pop(res)[1]
a=float(pop(res)[0])
res=pop(res)[1]
res=push(res,(a*b))
elif elt == '-':
b = float(pop(res)[0])
res=pop(res)[1]
a=float(pop(res)[0])
res=pop(res)[1]
res=push(res,(a-b))
elif elt == '/':
b = float(pop(res)[0])
res=pop(res)[1]
a=float(pop(res)[0])
res=pop(res)[1]
res=push(res,(a/b))
else:
res=push(res,(float(elt)))
return res[0]
# NE fonctionne pas
# Question 4 : '12+4*3-5+'
### Exercice 3 - croisement routier
#creation de listes aléatoires
import random as rd
f1=[rd.randint(0,1) for i in range(10)]
f2=[rd.randint(0,1) for i in range(8)]
def croisement(f1,f2):
f3=[]
while len(f1)!=0 and len(f2)!=0:
if f1[-1]==1: # si un véhicule dans la file 1 il est prioritaire
f3.append(1) # la file 3 reçoit le véhicule de la file 1
f1.pop() #la file 1 est dépilée
if f2[-1]==0:
f2.pop() #si pas de voiture sur la file 2 du stop avancer d'un véhicule
else: # si pas de véhicule sur la file 1 dépiler la file 2
if f2[-1]==1:
f3.append(1)
f1.pop()
f2.pop()
else:
f3.append(0)
f1.pop()
f2.pop()
if len(f1)!=0: #quand une file est vide les véhicules de la file suivant ese vide dans file 3
for i in range(len(f1)):
f3.append(f1.pop())
else:
for i in range(len(f2)):
f3.append(f2.pop())
f3.reverse() #inverser la file 3 pour avoir les véhicules dans l'ordre d'arrivée
return f3
# >>> croisement([0, 1, 1, 0, 0, 1, 1, 0, 1, 1],[0, 1, 0, 1, 1, 1, 1, 0])
# [0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
| [
"xpessoles.ptsi@free.fr"
] | xpessoles.ptsi@free.fr |
26d9469054aaa2d8af40439d3ff87f189436e3f0 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544839/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_58/run_cfg.py | f7490b8599e07eb7ffec34a1228ef176f43b870c | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544839/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_316.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_317.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_318.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_319.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_32.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
add097c8c0bbfc990db0229b131cc4d6e9aee2c8 | 45e376ae66b78b17788b1d3575b334b2cb1d0b1c | /tests/terraform/checks/resource/azure/test_AppServiceJavaVersion.py | b5041c7eaa0dafe935c47390d2a7a832719f6014 | [
"Apache-2.0"
] | permissive | bridgecrewio/checkov | aeb8febed2ed90e61d5755f8f9d80b125362644d | e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d | refs/heads/main | 2023-08-31T06:57:21.990147 | 2023-08-30T23:01:47 | 2023-08-30T23:01:47 | 224,386,599 | 5,929 | 1,056 | Apache-2.0 | 2023-09-14T20:10:23 | 2019-11-27T08:55:14 | Python | UTF-8 | Python | false | false | 1,421 | py | import os
import unittest
from checkov.runner_filter import RunnerFilter
from checkov.terraform.runner import Runner
from checkov.terraform.checks.resource.azure.AppServiceJavaVersion import check
class TestAppServiceJavaVersion(unittest.TestCase):
def test(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = os.path.join(current_dir, "example_AppServiceJavaVersion")
report = runner.run(root_folder=test_files_dir,
runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
passing_resources = {
'azurerm_app_service.pass',
}
failing_resources = {
'azurerm_app_service.fail',
}
skipped_resources = {}
passed_check_resources = set([c.resource for c in report.passed_checks])
failed_check_resources = set([c.resource for c in report.failed_checks])
self.assertEqual(summary['passed'], len(passing_resources))
self.assertEqual(summary['failed'], len(failing_resources))
self.assertEqual(summary['skipped'], len(skipped_resources))
self.assertEqual(summary['parsing_errors'], 0)
self.assertEqual(passing_resources, passed_check_resources)
self.assertEqual(failing_resources, failed_check_resources)
if __name__ == '__main__':
unittest.main() | [
"noreply@github.com"
] | bridgecrewio.noreply@github.com |
461c936aa43dfc116d3a4e6bf313f171ee477ef0 | c05ed32f1ef7e1eb7d73efd674e7d1fd710ad171 | /daily-coding-problems/problem140.py | 840bf38914b4da57f1be1d0cb9995f9fdd704039 | [] | no_license | carlhinderer/python-exercises | c8367517fdf835fa1117f96dbfee3dccc596afa6 | 4e09bbb4c4e2bd5644ed50e997db9f3c289a18f7 | refs/heads/master | 2021-06-01T16:17:00.389134 | 2021-02-09T18:21:01 | 2021-02-09T18:21:01 | 150,902,917 | 0 | 0 | null | 2021-04-20T20:33:11 | 2018-09-29T21:03:36 | Python | UTF-8 | Python | false | false | 386 | py | # Problem 140
# Medium
# Asked by Facebook
#
# Given an array of integers in which two elements appear exactly once and all other elements appear
# exactly twice, find the two elements that appear only once.
#
# For example, given the array [2, 4, 6, 8, 10, 2, 6, 10], return 4 and 8. The order does not matter.
#
# Follow-up: Can you do this in linear time and constant space?
# | [
"carl.hinderer4@gmail.com"
] | carl.hinderer4@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.