blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8fe7534f7a917b9a24339237b65158d3c38d891e | 76c4f199adcc450d1bff5bc1d85d138e99948274 | /riotwatcher/Handlers/__init__.py | 83c58199ed93d7ff523603bb5d20aa3aa359d096 | [
"MIT"
] | permissive | finleylau/Riot-Watcher | 8083cf6d09292264d3f6576bf23b723b59df2102 | 3a4a5d8f708d5bc7d83e5fffaa0388504b111faf | refs/heads/master | 2021-01-22T19:26:00.160641 | 2017-09-05T19:00:45 | 2017-09-05T19:00:45 | 102,420,367 | 0 | 0 | null | 2017-09-05T01:48:24 | 2017-09-05T01:48:24 | null | UTF-8 | Python | false | false | 527 | py |
from .LimitCount import LimitCount
from .RateLimitHeaders import RateLimitHeaders
from .RequestHandler import RequestHandler
from .BaseRateLimitHandler import BaseRateLimitHandler
from .JsonifyHandler import JsonifyHandler
from .ThrowOnErrorHandler import ThrowOnErrorHandler
from .WaitingRateLimitHandler import WaitingRateLimitHandler
__all__ = [
'LimitCount',
'RateLimitHeaders',
'RequestHandler',
'BaseRateLimitHandler',
'JsonifyHandler',
'ThrowOnErrorHandler',
'WaitingRateLimitHandler',
]
| [
"godtheresnonamesleft@gmail.com"
] | godtheresnonamesleft@gmail.com |
d440b3127f444d1ed80a2ad574d3cb9c84b5bb5b | 3018a23cfa3b272b9825925407746890b4c53f1e | /TryOutGame/main.py | 5b78a38d833261ce8bd2646e66444932cbadaffd | [
"Apache-2.0"
] | permissive | Ardatufekci/GridPointGameAITry | 75455f2ea44a76fe2486b21896214aa91f41d991 | cc9a879b8d9f67e1202f94080d913c3bbe3101eb | refs/heads/main | 2023-01-07T21:18:06.177074 | 2020-11-08T13:45:52 | 2020-11-08T13:45:52 | 310,737,904 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,815 | py | import tkinter as tk
import numpy as np
from player import Player
from filler import Filler
from random import sample
### DEĞİŞKEN
grid = [64*i for i in range(10)]
puans = np.zeros((10,10),dtype=np.int)
data = []
sdata = []
playerslastscore = [0,0]
### OBJELER
oyuncu = Player(0,0,"red")
oyuncu.makeDecision(100)
oyuncu2 = Player(grid[9],grid[9],"blue")
fils = [Filler(-100,-100,"black")]
for i in range(1,5):
add = np.ones((10-2*i,10-2*i),dtype=np.int)
puans[i:10-i,i:10-i] += add
root = tk.Tk(className="GAME")
root.wm_maxsize(640,640)
root.wm_minsize(640,640)
canv = tk.Canvas(bg="white",width = 640, height = 640)
canv.pack()
n,resetsayisi=(0,0)
maxhamle=20
def isStucked(gamer):
global fils
if gamer.x + 64 > 640-64 or gamer.x - 64 < 0 or gamer.y - 64 < 0 or gamer.y + 64 > 640 - 64:
return False
for i in fils:
if gamer.x+64==i.x and gamer.y == i.y:
return False
if gamer.x-64==i.x and gamer.y == i.y:
return False
if gamer.y+64==i.y and gamer.x == i.x:
return False
if gamer.y-64==i.y and gamer.x == i.x:
return False
return True
def selectPlayer(datas):
global playerslastscore
print("Puanlar",playerslastscore)
if playerslastscore[1] > playerslastscore[0]:
return sdata
k = [i[1]/len(i[0]) for i in datas]
o = 0
for i in k:
if i == max(k):
#print(datas[o])
#print(k)
return datas[o][0]
o+=1
def loop():
global n,fils,maxhamle,resetsayisi,data,playerslastscore
n+=1
print(maxhamle,n)
if n >= maxhamle:
n=0
if isStucked(oyuncu):
maxhamle-=5
#resetsayisi-=1
else:
data.append([oyuncu.direction[0:maxhamle],oyuncu.puan])
playerslastscore =[oyuncu.puan,oyuncu2.puan]
reset()
#oyuncu.direction[:maxhamle] = selectPlayer(data)
maxhamle+=5
#print(data)
if resetsayisi==3:
resetsayisi=0
#oyuncu.makeDecision(100)
oyuncu.direction[:maxhamle-len(fils)] = selectPlayer(data)
print(maxhamle-len(fils))
data=[]
a = oyuncu.move(oyuncu.direction[n],fils)
if a != False:
oyuncu.puan += puans[oyuncu.x // 64, oyuncu.y // 64]
fils.append(a)
else:
oyuncu.direction.pop(n)
n-=1
maxhamle-=(not isStucked(oyuncu))
[oyuncu.direction.append(sample([0,1,2,3],1)[0]) for i in range(10)]
root.after(100,loop)
def draw():
canv.delete("all")
[canv.create_line(i,0,i,640) for i in range(0,640,64)]
[canv.create_line(0,i,640,i) for i in range(0,640,64)]
#canv.create_rectangle(grid[5],grid[3],grid[5]+64,grid[3]+64,fill="black")
for i in range(10):
for j in range(10):
if puans[i,j] == 0:
continue
canv.create_text(grid[i]+32,grid[j]+32,fill="darkblue",font="Times 20 bold",text=str(puans[i,j]))
oyuncu.draw(canv)
oyuncu2.draw(canv)
canv.create_text(oyuncu.x + 32, oyuncu.y + 32, fill="black", text=str(oyuncu.puan), font="Times 20 bold")
canv.create_text(oyuncu2.x+32,oyuncu2.y+32,fill="black",text=str(oyuncu2.puan),font="Times 20 bold")
[i.draw(canv) for i in fils]
root.after(50,draw)
loop()
draw()
a = False
def yon(e):
global a
a = False
#print(e.char)
if e.char=="w":
a = oyuncu.move(0,fils)
if e.char=="s":
a = oyuncu.move(1,fils)
if e.char=="d":
a = oyuncu.move(2,fils)
if e.char=="a":
a = oyuncu.move(3,fils)
if a != False:
oyuncu.puan += puans[oyuncu.x // 64, oyuncu.y // 64]
fils.append(a)
root.bind("<w>",yon)
root.bind("<a>",yon)
root.bind("<s>",yon)
root.bind("<d>",yon)
def yon2(e):
global a
a = False
#print(e.keysym)
if e.keysym=="Up":
a = oyuncu2.move(0,fils)
sdata.append(1)
if e.keysym=="Down":
a = oyuncu2.move(1,fils)
sdata.append(0)
if e.keysym=="Right":
a = oyuncu2.move(2,fils)
sdata.append(3)
if e.keysym=="Left":
a = oyuncu2.move(3,fils)
sdata.append(2)
if a != False:
oyuncu2.puan += puans[oyuncu2.x // 64, oyuncu2.y // 64]
fils.append(a)
def reset(e=False):
global oyuncu2,oyuncu,fils,resetsayisi
resetsayisi+=1
#oyuncu = Player(0,0,"red")
oyuncu.x=0
oyuncu.y=0
oyuncu.puan=0
oyuncu2 = Player(grid[9],grid[9],"blue")
fils = [Filler(-312,-123,color="black")]
root.bind("<Up>",yon2)
root.bind("<Down>",yon2)
root.bind("<Left>",yon2)
root.bind("<Right>",yon2)
root.bind("<o>",reset)
tk.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
ee68f3378e82fc55e7c7efd7a2f2cfd008a4b3bf | 83c42b05d59abcfaa7ccc079543a2ddadd2b1393 | /dictionary.py | 58aef8b743ef2589019cd70214393b44d6b692b4 | [] | no_license | MrLibro/e_health_application_prj | 6d3f9306402f082e2631e807f0bea5033606ee1c | 7478f1db05b4721a414b0354b45faaee930491c9 | refs/heads/master | 2023-02-04T09:17:01.017607 | 2020-12-19T16:50:37 | 2020-12-19T16:50:37 | 322,739,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | import text_processing as txt
def creat_dictionary(pubmed):
abstract = txt.txt_prepration(pubmed.abstract)
abstract = list(str(abstract).split(" "))
print(abstract)
return abstract | [
"akaalirezaketabdari@gmail.com"
] | akaalirezaketabdari@gmail.com |
f4a90c423a193dd05d8521b940780699acd28b4d | 36bffa15a4205bfb4601de31f52278ef1f787792 | /requests/views.py | 8c81bc6f31ce6b182fb3fcea1d4c123921660f3b | [] | no_license | christaggart/requestly | 63e0abb2a1e5130fe194a560b84cdb3c5e86042b | f1271d2dc4c2551cd761fe8a7bacad8b4ab16650 | refs/heads/master | 2021-01-10T22:03:04.110611 | 2012-01-06T17:04:29 | 2012-01-06T17:04:29 | 1,856,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | # Create your views here.
from django.http import HttpResponse
from django.template.response import TemplateResponse
import datetime
def home(request):
return HttpResponse("hello world") | [
"chris@thinkit.ca"
] | chris@thinkit.ca |
49e50d64110e6cda62a541ad93cd6e989f8a06bd | 530b9779035c9f2482f20097c11c2295fbc6248c | /restframework/serializers.py | e8544e9e73aa3cf5909738abb4abff75cbc0cb71 | [] | no_license | sanooptp/MakeDeal | 69ff0e4f07d82d1e1fbf6e8f31bcdf65461818bd | 03d22527a4dcfa1631cd3173235a8fdd85f91ad4 | refs/heads/master | 2023-08-31T12:04:46.927813 | 2021-10-11T15:19:34 | 2021-10-11T15:19:34 | 389,553,544 | 0 | 0 | null | 2021-10-11T15:16:17 | 2021-07-26T07:54:23 | JavaScript | UTF-8 | Python | false | false | 2,271 | py | from product.models import Product
from django.contrib.auth import models
from rest_framework import serializers
from restframework.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES, TestModel
from django.contrib.auth.models import User
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from django.contrib.auth import authenticate
class SnippetSerializer(serializers.ModelSerializer):
snippets = serializers.PrimaryKeyRelatedField(many=True, queryset=Snippet.objects.all())
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model= Snippet
fields = ['id', 'title', 'code', 'linenos', 'language', 'style', 'owner','snippets']
class UserSerializer(serializers.ModelSerializer):
snippets = serializers.PrimaryKeyRelatedField(many=True, queryset=Snippet.objects.all())
class Meta:
model = User
fields = ['id', 'username', 'snippets']
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = TestModel
fields = "__all__"
class MyTokenObtainPairSerializer(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
token = super(MyTokenObtainPairSerializer, cls).get_token(user)
# Add custom claims
token['username'] = user.username
return token
class CreateUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User.objects.create_user(validated_data['username'],
None,
validated_data['password'])
return user
class LoginUserSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError("Invalid Details.")
# project serializers
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model= Product
fields = ['name','description']
| [
"isanooptp@gmail.com"
] | isanooptp@gmail.com |
5845ad25df07deb2f338228c0feb1eb3bd4936b0 | 6a8f267ae3443e9b1bd1ee14b428d4378b90d87d | /deps/shapely/geometry/multipolygon.py | 3e645de1feac587199ac4b259e5a28045f6c131c | [
"MIT"
] | permissive | meshulam/sly | 713878fd3957a7fcdeee4f5dfcc6325ce08918fc | 9cb0287b9d2bce7e872898365f1dc9bcd37ef5a9 | refs/heads/master | 2021-01-18T22:41:42.548393 | 2015-04-26T18:11:18 | 2015-04-26T18:11:18 | 32,053,249 | 11 | 2 | null | 2015-04-21T01:47:07 | 2015-03-12T02:40:08 | Python | UTF-8 | Python | false | false | 5,745 | py | """Collections of polygons and related utilities
"""
import sys
if sys.version_info[0] < 3:
range = xrange
from ctypes import c_void_p, cast
from shapely.geos import lgeos
from shapely.geometry.base import BaseMultipartGeometry, geos_geom_from_py
from shapely.geometry import polygon
from shapely.geometry.proxy import CachingGeometryProxy
__all__ = ['MultiPolygon', 'asMultiPolygon']
class MultiPolygon(BaseMultipartGeometry):
"""A collection of one or more polygons
If component polygons overlap the collection is `invalid` and some
operations on it may fail.
Attributes
----------
geoms : sequence
A sequence of `Polygon` instances
"""
def __init__(self, polygons=None, context_type='polygons'):
"""
Parameters
----------
polygons : sequence
A sequence of (shell, holes) tuples where shell is the sequence
representation of a linear ring (see linearring.py) and holes is
a sequence of such linear rings
Example
-------
Construct a collection from a sequence of coordinate tuples
>>> ob = MultiPolygon( [
... (
... ((0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)),
... [((0.1,0.1), (0.1,0.2), (0.2,0.2), (0.2,0.1))]
... )
... ] )
>>> len(ob.geoms)
1
>>> type(ob.geoms[0]) == Polygon
True
"""
super(MultiPolygon, self).__init__()
if not polygons:
# allow creation of empty multipolygons, to support unpickling
pass
elif context_type == 'polygons':
self._geom, self._ndim = geos_multipolygon_from_polygons(polygons)
elif context_type == 'geojson':
self._geom, self._ndim = geos_multipolygon_from_py(polygons)
def shape_factory(self, *args):
return polygon.Polygon(*args)
@property
def __geo_interface__(self):
allcoords = []
for geom in self.geoms:
coords = []
coords.append(tuple(geom.exterior.coords))
for hole in geom.interiors:
coords.append(tuple(hole.coords))
allcoords.append(tuple(coords))
return {
'type': 'MultiPolygon',
'coordinates': allcoords
}
def svg(self, scale_factor=1.):
"""
SVG representation of the geometry. Scale factor is multiplied by
the size of the SVG symbol so it can be scaled consistently for a
consistent appearance based on the canvas size.
"""
parts = []
for part in self.geoms:
exterior_coords = [["{0},{1}".format(*c) for c in part.exterior.coords]]
interior_coords = [
["{0},{1}".format(*c) for c in interior.coords]
for interior in part.interiors ]
path = " ".join([
"M {0} L {1} z".format(coords[0], " L ".join(coords[1:]))
for coords in exterior_coords + interior_coords ])
parts.append(
"""<g fill-rule="evenodd" fill="{2}" stroke="#555555"
stroke-width="{0}" opacity="0.6">
<path d="{1}" /></g>""".format(
2. * scale_factor,
path,
"#66cc99" if self.is_valid else "#ff3333"))
return "\n".join(parts)
class MultiPolygonAdapter(CachingGeometryProxy, MultiPolygon):
context = None
_other_owned = False
def __init__(self, context, context_type='polygons'):
self.context = context
if context_type == 'geojson':
self.factory = geos_multipolygon_from_py
elif context_type == 'polygons':
self.factory = geos_multipolygon_from_polygons
@property
def _ndim(self):
try:
# From array protocol
array = self.context[0][0].__array_interface__
n = array['shape'][1]
assert n == 2 or n == 3
return n
except AttributeError:
# Fall back on list
return len(self.context[0][0][0])
def asMultiPolygon(context):
"""Adapts a sequence of objects to the MultiPolygon interface"""
return MultiPolygonAdapter(context)
def geos_multipolygon_from_py(ob):
"""ob must provide Python geo interface coordinates."""
L = len(ob)
assert L >= 1
N = len(ob[0][0][0])
assert N == 2 or N == 3
subs = (c_void_p * L)()
for l in range(L):
geom, ndims = polygon.geos_polygon_from_py(ob[l][0], ob[l][1:])
subs[l] = cast(geom, c_void_p)
return (lgeos.GEOSGeom_createCollection(6, subs, L), N)
def geos_multipolygon_from_polygons(ob):
"""
ob must be either a MultiPolygon, sequence or array of sequences
or arrays.
"""
if isinstance(ob, MultiPolygon):
return geos_geom_from_py(ob)
obs = getattr(ob, 'geoms', None) or ob
L = len(obs)
assert L >= 1
exemplar = obs[0]
try:
N = len(exemplar[0][0])
except TypeError:
N = exemplar._ndim
assert N == 2 or N == 3
subs = (c_void_p * L)()
for l in range(L):
shell = getattr(obs[l], 'exterior', None)
if shell is None:
shell = obs[l][0]
holes = getattr(obs[l], 'interiors', None)
if holes is None:
holes = obs[l][1]
geom, ndims = polygon.geos_polygon_from_py(shell, holes)
subs[l] = cast(geom, c_void_p)
return (lgeos.GEOSGeom_createCollection(6, subs, L), N)
# Test runner
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| [
"matt@meshul.am"
] | matt@meshul.am |
660d8fe0314977e4a5fd5748ddcb6c4aef0ca904 | 438d479eac3bff296407ca46b567c9745186222c | /app/admin.py | 2f776f6b63daad8171856e2abaa4a2ba52482f0d | [] | no_license | mclt0568/pyserverman | c6a82c4801a4cc62eb0e33f838d9484f24906614 | 176820488e1fff39b5dee8c6bcf6a7c595fe83f2 | refs/heads/master | 2023-05-08T12:12:51.008630 | 2021-05-31T11:07:32 | 2021-05-31T11:07:32 | 355,734,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | from constants import database
def get_admins():
return database["admins"].fetch_column("user_id")
def is_admin(user_id):
admins = get_admins()
return user_id in admins
#Return True for successful, False for admin not found.
def remove_admin(user_id):
if is_admin(user_id):
database["admins"].delete(f"user_id = '{user_id}'")
return False
#Return True for successful, False for already admin.
def add_admin(user_id):
if is_admin(user_id):
return False
database["admins"].insert([user_id])
return True | [
"tis7bfrankie@gmail.com"
] | tis7bfrankie@gmail.com |
91dbf8f944594010b21f4e33cdd5c303b603daa0 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2018_02_01/models/outbound_nat_rule.py | 509f9e9922798df037d6dab645f99d2111cc92f6 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,886 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class OutboundNatRule(SubResource):
"""Outbound NAT pool of the load balancer.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param allocated_outbound_ports: The number of outbound ports to be used
for NAT.
:type allocated_outbound_ports: int
:param frontend_ip_configurations: The Frontend IP addresses of the load
balancer.
:type frontend_ip_configurations:
list[~azure.mgmt.network.v2018_02_01.models.SubResource]
:param backend_address_pool: Required. A reference to a pool of DIPs.
Outbound traffic is randomly load balanced across IPs in the backend IPs.
:type backend_address_pool:
~azure.mgmt.network.v2018_02_01.models.SubResource
:param provisioning_state: Gets the provisioning state of the PublicIP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'backend_address_pool': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'allocated_outbound_ports': {'key': 'properties.allocatedOutboundPorts', 'type': 'int'},
'frontend_ip_configurations': {'key': 'properties.frontendIPConfigurations', 'type': '[SubResource]'},
'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OutboundNatRule, self).__init__(**kwargs)
self.allocated_outbound_ports = kwargs.get('allocated_outbound_ports', None)
self.frontend_ip_configurations = kwargs.get('frontend_ip_configurations', None)
self.backend_address_pool = kwargs.get('backend_address_pool', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
bedf0931ccef770750040887a803cdba60d8d515 | de1f9d660cfb738afdb66e4a2d63a4577c07d9c6 | /test/webapi/controllers/test_wmts.py | ad52d78c1aaca58531a54c0ef0ecba42c5079c04 | [
"MIT"
] | permissive | rabaneda/xcube | db47eb416db85df891a924063482a7943cae9d4f | 0d38ca513987184dbc4a37da1616e4076964d0f1 | refs/heads/master | 2020-11-24T00:11:17.107630 | 2020-02-11T10:11:34 | 2020-02-11T10:11:34 | 227,877,138 | 0 | 0 | MIT | 2019-12-13T16:14:51 | 2019-12-13T16:14:50 | null | UTF-8 | Python | false | false | 703 | py | import os
import unittest
from test.webapi.helpers import get_res_test_dir, new_test_service_context
from xcube.webapi.controllers.wmts import get_wmts_capabilities_xml
class WmtsControllerTest(unittest.TestCase):
def test_get_wmts_capabilities_xml(self):
self.maxDiff = None
with open(os.path.join(get_res_test_dir(), 'WMTSCapabilities.xml')) as fp:
expected_capabilities = fp.read()
ctx = new_test_service_context()
capabilities = get_wmts_capabilities_xml(ctx, 'http://bibo')
print(80 * '=')
print(capabilities)
print(80 * '=')
self.assertEqual(expected_capabilities.replace(' ', ''), capabilities.replace(' ', ''))
| [
"norman.fomferra@gmail.com"
] | norman.fomferra@gmail.com |
b92a0e4c7afa8244a54b3964bea78bc355235382 | 544c85db4196b279f242a6d1a3838f69b85a8ff6 | /drostelib/drostehelper.py | db44391e85c5e66c536be20607e0a4db2fd419cc | [] | no_license | azer89/Droste_Generator | e0d5c4785608d6db253b9658627f4273cb047035 | f8a81540746eacfed8edbdf39b901edbee99b020 | refs/heads/master | 2023-02-22T12:05:53.246208 | 2021-01-18T11:10:34 | 2021-01-18T11:10:34 | 46,583,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,950 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 24 12:59:31 2015
@author: azer
"""
import numpy as np
"""
Magenta color as the mask color
"""
transp_color = np.array([255, 0, 255])
"""
Determine whether a color is the mask color
"""
def IsMasked(col):
if(col[0] == transp_color[0] and col[1] == transp_color[1] and col[2] == transp_color[2]):
return True
return False
"""
Make sure a coordinate is valid
(or is this because a bug in my program?)
"""
def IsCoordValid(x, y):
# if x is invalid
if(np.isnan(x) or np.isinf(x)):
return False
# if y is invalid
if(np.isnan(y) or np.isinf(y)):
return False
# the coordinate is valid
return True
"""
make sure a coordinate is inside the image
"""
def IsInside(x, y, width, height):
if(x >= 0 and y >= 0 and x < width and y < height):
return True
# is inside
return False
"""
Calculate the center of the mask and the approximate radius
"""
def CalculateCenter(img_col):
r1 = 0.0
center_x = 0.0
center_y = 0.0
pixel_counter = 0.0
xs = [] # list of x-coordinates
ys = [] # list of y-coordinates
height, width, depth = img_col.shape
# sum of the region
for y_iter in xrange(height):
for x_iter in xrange(width):
col = img_col[y_iter][x_iter]
if(IsMasked(col)):
center_x += x_iter
center_y += y_iter
xs.append(x_iter)
ys.append(y_iter)
pixel_counter += 1.0
# get the center
center_x /= pixel_counter
center_y /= pixel_counter
#
for i in xrange(len(xs)):
x = xs[i] - center_x
y = ys[i] - center_y
r = np.sqrt(x * x + y * y)
if (r > r1):
r1 = r
# return the radius and the center
return r1, center_x, center_y
| [
"radhitya@uwaterloo.ca"
] | radhitya@uwaterloo.ca |
e6d1d32bdcd74e78895ec9458610ec4a8c07cbef | 3cf617e510b4da4c90744f60c40d057d887656db | /datatypes/Static.py | dd0d19691d4b3d1f8e65bfb40b15f2da4cddb10e | [] | no_license | Peaches491/typecast | b971a7330494f5a078090065b6180fdd7668b344 | 363d960dd784ca8d20dbc50dcace271cb2015125 | refs/heads/master | 2020-12-24T15:06:06.613738 | 2014-05-10T04:29:30 | 2014-05-10T04:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from utilitites import markdown2
from google.appengine.ext import db
from blog_handler import Blog
class StaticPage(db.Model):
title = db.StringProperty(required = True)
brief = db.StringProperty(required = True)
url = db.StringProperty(required = True)
content = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
last_modified = db.DateTimeProperty(auto_now = True)
def render(self, key=None):
self._render_text = markdown2.markdown(self.content, extras=["fenced-code-blocks"])
return Blog.render_str("static.html", static = self) | [
"peaches@TADesktop.(none)"
] | peaches@TADesktop.(none) |
b45caf654c9014fad4ce37ba5d3fe51256a56e14 | 0713d5e8014dfa7bdc28ac79b3e9e35caba81334 | /chapter6/SketchWindow_beauty_ui.py | 65166a3313131d2f25212a68bde143713c098864 | [
"Apache-2.0"
] | permissive | sdphome/wxPython_training | 420bc7a30a197fd54e9d93f2881774de970bb9a1 | eea3a1863d0ab4e3fa5b3920772a37b525cadc49 | refs/heads/master | 2021-01-10T04:08:15.242263 | 2016-01-22T06:22:31 | 2016-01-22T06:22:31 | 49,471,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,642 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import wx
import os
import cPickle
class SketchWindow(wx.Window):
def __init__(self, parent, ID):
wx.Window.__init__(self, parent, ID)
self.SetBackgroundColour("White")
self.color = "Black"
self.thickness = 1
self.pen = wx.Pen(self.color, self.thickness, wx.SOLID)
self.lines = []
self.curLine = []
self.pos = (0, 0)
self.InitBuffer()
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_IDLE, self.OnIdle)
self.Bind(wx.EVT_PAINT, self.OnPaint)
def InitBuffer(self):
size = self.GetClientSize()
self.buffer = wx.EmptyBitmap(size.width, size.height)
dc = wx.BufferedDC(None, self.buffer)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
self.DrawLines(dc)
self.reInitBuffer = False
def GetLinesData(self):
return self.lines[:]
def SetLinesData(self, lines):
self.lines = lines[:]
self.InitBuffer()
self.Refresh()
def OnLeftDown(self, event):
self.curLine = []
self.pos = event.GetPositionTuple() #得到鼠标位置
self.CaptureMouse() #捕获鼠标
def OnLeftUp(self, event):
if self.HasCapture():
self.lines.append((self.color,
self.thickness,
self.curLine))
self.curLine = []
self.ReleaseMouse() #释放鼠标
def OnMotion(self, event):
if event.Dragging() and event.LeftIsDown():
dc = wx.BufferedDC(wx.ClientDC(self), self.buffer)
self.drawMotion(dc, event)
event.Skip()
def drawMotion(self, dc, event):
dc.SetPen(self.pen)
newPos = event.GetPositionTuple()
coords = self.pos + newPos
dc.DrawLine(*coords)
self.pos = newPos
def OnSize(self, event):
self.reInitBuffer = True
def OnIdle(self, event):
if self.reInitBuffer:
self.InitBuffer()
self.Refresh(False)
def OnPaint(self, event):
dc = wx.BufferedPaintDC(self, self.buffer)
def DrawLines(self, dc):
for colour, thickness, line in self.lines:
pen = wx.Pen(colour, thickness, wx.SOLID)
dc.SetPen(pen)
for coords in line:
dc.DrawKine(*coords)
def SetColor(self, color):
self.color = color
self.pen = wx.Pen(self.color, self.thickness, wx.SOLID)
def SetThickness(self, num):
self.thickness = num
self.pen = wx.Pen(self.color, self.thickness, wx.SOLID)
class ControlPanel(wx.Panel):
BMP_SIZE = 16
BMP_BORDER = 3
NUM_COLS = 4
SPACING = 4
colorList = ('Black', 'Yellow', 'Red', 'Green', 'Blue', 'Purple',
'Brown', 'Aquamarine', 'Forest Green', 'Light Blue',
'Goldenrod', 'Cyan', 'Orange', 'Navy', 'Dark Grey',
'Light Grey')
maxThickness = 16
def __init__(self, parent, ID, sketch):
wx.Panel.__init__(self, parent, ID, style=wx.RAISED_BORDER)
self.sketch = sketch
buttonSize = (self.BMP_SIZE + 2 * self.BMP_BORDER,
self.BMP_SIZE + 2 * self.BMP_BORDER)
colorGrid = self.createColorGrid(parent, buttonSize)
thicknessGrid = self.createThicknessGrid(buttonSize)
self.layout(colorGrid, thicknessGrid)
def createColorGrid(self, parent, buttonSize):
self.colorMap = {}
self.colorButtons = {}
colorGrid = wx.GridSizer(cols=self.NUM_COLS, hgap=2, vgap=2)
for eachColor in self.colorList:
bmp = parent.MakeBitmap(eachColor)
b = buttons.GenBitmapToggleButton(self, -1, bmp, size=buttonSize)
b.SetBezelWidth(1)
b.SetUseFocusIndicator(False)
self.Bind(wx.EVT_BUTTON, self.OnSetColour, b)
colorGrid.Add(b, 0)
self.colorMap[b.GetId()] = eachColor
self.colorButtons[eachColor] = b
self.colorButtons[self.colorList[0]].SetToggle(True)
return colorGrid
def createThicknessGrid(self, buttonSize):
self.thicknessMap = {}
self.thicknessButtons = {}
thicknessGrid = wx.GridSizer(cols=self.NUM_COLS, hgap=2, vgap=2)
for x in range(1, self.maxThickness + 1):
b = buttons.GenToggleButton(self, -1, str(x), size=buttonSize)
b.SetBezelWidth(1)
b.SetUseFocusIndicator(False)
self.Bind(wx.EVT_BUTTON, self.OnSetThickness, b)
thicknessGrid.Add(b, 0)
self.thicknessIdMap[b.GetId()] = x
self.thicknessButtons[x] = b
self.thicknessButtons[1].SetToggle(True)
return thicknessGrid
def layout(self, colorGrid, thicknessGrid):
box = wx.BoxSizer(wx.VERTICAL)
box.Add(colorGrid, 0, wx.ALL, self.SPACING)
self.SetSizer(box)
box.Fit(self)
def OnSetColour(self, event):
color = self.colorMap[event.GetId()]
if color != self.sketch.color:
self.colorButtons[self.sketch.color].SetToggle(False)
self.sketch.SetColor(color)
def OnSetThickness(self, event):
thickness = self.thicknessIdMap[event.GetId()]
if thickness != self.sketch.thickness:
self.thicknessButtons[self.sketch.thickness].SetToggle(False)
self.sketch.SetThickness(thickness)
class SketchFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "Sketch Frame", size=(800, 600))
self.filename = ""
self.title = "SK"
self.wildcard = "cketch file (*.sketch)|*.sketch|All files(*.*)|*.*"
self.sketch = SketchWindow(self, -1)
self.sketch.Bind(wx.EVT_MOTION, self.OnSketchMotion)
self.initStatusBar()
self.createMenuBar()
self.createToolBar()
self.createPanel()
def createPanel(self):
controlPanel = ControlPanel(self, -1, self.sketch)
box = wx.BoxSizer(wx.HORIZONTAL)
box.Add(controlPanel, 0, wx.EXPAND)
box.Add(self.sketch, 1, wx.EXPAND)
self.SetSizer(box)
def createToolBar(self):
toolbar = self.CreateToolBar()
for each in self.toolbarData():
self.createSimpleTool(toolbar, *each)
toolbar.AddSeparator()
for each in self.toolbarColorData():
self.createColorTool(toolbar, each)
toolbar.Realize() #显示工具栏
def createSimpleTool(self, toolbar, label, filename, help, handler):
if not label:
toolbar.AddSeparator()
return
bmp = wx.Image(filename, wx.BITMAP_TYPE_BMP).ConvertToBitmap()
tool = toolbar.AddSimpleTool(-1, bmp, label, help)
self.Bind(wx.EVT_MENU, handler, tool)
def toolbarData(self):
return(("New", "new.bmp", "Create new sketch", self.OnNew),
("", "", "", ""),
("Open", "open.bmp", "Open existing shetch", self.OnOpen),
("Save", "save.bmp", "save existing sketch", self.OnSave))
def createColorTool(self, toolbar, color): #创建颜色工具
bmp= self.MakeBitmap(color)
newId = wx.NewId()
tool = toolbar.AddRadioTool(-1, bmp, shortHelp=color)
self.Bind(wx.EVT_MENU, self.OnColor, tool)
def MakeBitmap(self, color): #创建纯色的位图
bmp = wx.EmptyBitmap(16, 15)
dc = wx.MemoryDC()
dc.SelectObject(bmp)
dc.SetBackground(wx.Brush(color))
dc.Clear()
dc.SelectObject(wx.NullBitmap)
return bmp
def toolbarColorData(self):
return("Black", "Red", "Green", "Blue")
def initStatusBar(self):
self.statusbar = self.CreateStatusBar()
self.statusbar.SetFieldsCount(3)
self.statusbar.SetStatusWidths([-1, -2, -3])
def OnSketchMotion(self, event):
self.statusbar.SetStatusText("Pos: %s" %str(event.GetPositionTuple()), 0)
self.statusbar.SetStatusText("Current Pts: %s" %len(self.sketch.curLine), 1)
self.statusbar.SetStatusText("Line Count:%s" %len(self.sketch.lines), 2)
event.Skip()
def menuData(self):
return [("&File", (
("New", "New sketch file", self.OnNew),
("Open", "Open sketch file", self.OnOpen),
("Save", "Save sketch file", self.OnSave),
("", "", ""),
("Color", (
("Black", "", self.OnColor, wx.ITEM_RADIO),
("Red", "", self.OnColor, wx.ITEM_RADIO),
("Green", "", self.OnColor, wx.ITEM_RADIO),
("Blue", "", self.OnColor, wx.ITEM_RADIO),
("Other", "", self.OnOtherColor, wx.ITEM_RADIO))),
("", "", ""),
("Quit", "Quit", self.OnCloseWindow)))]
def OnOtherColor(self, event):
dlg = wx.ColourDialog(self)
dlg.GetColourData().SetChooseFull(True) #创建颜色数据对象
if dlg.ShowModal() == wx.ID_OK:
self.sketch.SetColor(dlg.GetColourData().GetColour())
dlg.Destroy()
event.Skip()
def createMenuBar(self):
menuBar = wx.MenuBar()
for eachMenuData in self.menuData():
menuLabel = eachMenuData[0]
menuItems = eachMenuData[1]
menuBar.Append(self.createMenu(menuItems), menuLabel)
self.SetMenuBar(menuBar)
def createMenu(self, menuData):
menu = wx.Menu()
for eachItem in menuData:
if len(eachItem) == 2:
label = eachItem[0]
subMenu = self.createMenu(eachItem[1])
menu.AppendMenu(wx.NewId(), label, subMenu)
else:
self.createMenuItem(menu, *eachItem)
return menu
def createMenuItem(self, menu, label, status, handler, kind=wx.ITEM_NORMAL):
if not label:
menu.AppendSeparator()
return
menuItem = menu.Append(-1, label, status, kind)
self.Bind(wx.EVT_MENU, handler, menuItem)
def SaveFile(self):
print("save file, filename=%s" %self.filename)
if self.filename:
data = self.sketch.GetLinesData()
f = open(self.filename, "w")
cPickle.dump(data, f)
f.close()
def ReadFile(self):
if self.filename:
try:
f = open(self.filename, 'r')
data = cPickle.load(f)
f.close()
self.sketch.SetLinesData(data)
except cPickle.UnpicklingError:
wx.MessageBox("%s is not a sketch file." %self.filename, "oops!",
style=wx.OK|wx.ICON_EXCLAMATION)
def OnOpen(self, event): #弹出打开对话框
dlg = wx.FileDialog(self, "open sketch file...", os.getcwd(),
style=wx.OPEN, wildcard=self.wildcard)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetPath()
self.ReadFile()
self.SetTitle(self.title + ' -- '+ self.filename)
def OnSave(self, event):
if not self.filename:
self.OnSaveAs(event)
else:
self.SaveFile()
def OnSaveAs(self, event):
print("save as")
dlg = wx.FileDialog(self, "Save sketch as...", os.getcwd(),
style=wx.SAVE | wx.OVERWRITE_PROMPT,
wildcard=self.wildcard)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetPath()
print("filename=%s" %self.filename)
if not os.path.splitext(self.filename)[1]:
self.filename = self.filename + '.sketch'
self.SaveFile()
self.SetTitle(self.title + ' -- '+ self.filename)
dlg.Destroy()
def OnNew(self, event): pass
def OnColor(self, event):
menubar = self.GetMenuBar()
itemId = event.GetId()
item = menubar.FindItemById(itemId)
if not item:
toolbar = self.GetToolBar()
item = toolbar.FindById(itemId)
color = item.GetShortHelp()
else:
color = item.GetLabel()
self.sketch.SetColor(color)
def OnCloseWindow(self, event):
self.Destroy()
if __name__ == '__main__':
print("main")
app = wx.PySimpleApp()
frame = SketchFrame(None)
frame.Show(True)
app.MainLoop()
| [
"sdphome@qq.com"
] | sdphome@qq.com |
25272b331eb766592f1b2361e63de77a581afc78 | 14fac282147a83e5607aceb7f5f589921fbf9329 | /choicer/test/__init__.py | 6fd5466da9544eef5d1869a0fa787b1ab0ca7353 | [
"MIT"
] | permissive | tonimichel/django-choicer | 4079cce7b45bb6781c309c4aa6e2a2e37b0ce291 | 69bce93585c6b3711f90c893d449e3eb3ce44eb5 | refs/heads/master | 2021-01-21T13:04:54.348088 | 2016-05-09T11:20:35 | 2016-05-09T11:20:35 | 54,739,325 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from __future__ import absolute_import, print_function, unicode_literals
import unittest
from choicer.test import core_tests
def suite():
return unittest.TestSuite([
unittest.TestLoader().loadTestsFromTestCase(core_tests.ChoicerTests),
])
def run_all():
return unittest.TextTestRunner(verbosity=2).run(suite())
| [
"toni.michel@schnapptack.de"
] | toni.michel@schnapptack.de |
fd96964145fbc06b436ee1ecbbf561c15f201c00 | caf192dbc1ca90fee18bb4ce170d37eb14870ec5 | /Chapter-5/7. Caesar cipher.py | f827a177676fc978c4d7d8bfee8324bfba34dc4a | [] | no_license | Dfredude/PythonZelle | 858b00f5eacce841173c64b3cecd978dedbeb145 | 1923fe84df604968eebc5269f23b7c0f167d55f0 | refs/heads/main | 2023-08-30T21:45:57.070344 | 2021-10-17T01:32:57 | 2021-10-17T01:32:57 | 359,041,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | def main():
#Get plaintext(p_text) and key(x) from the user
p_text = input("Enter the message you'd like encrypted.\n")
key = eval(input("What's the key? : "))
p_text = p_text.lower()
#Create string of letters
table = "abcdefghijklmnopqrstuvwxyz"
#Convert plaintext to ciphertext(c_text) using cipher loop
c_text = ""
for ch in p_text:
c_text = c_text + (table[((ord(ch)) - 97) + key % 52])
print("Your encoded message is {0}.".format(c_text))
main() | [
"dominguezlucio@outlook.com"
] | dominguezlucio@outlook.com |
52e3abab22df23c0d41785c571071dfae963b422 | 532775cf6728c43fb0346c758a02ae9f55651f14 | /main.py | cac66817a8910d71fce6577967555b37f8c9b044 | [] | no_license | adarsh1783/SearchEngine | 9402c517e59eaaed8d9e9082076e5eb6ba5798d1 | f3e527c5f30ee550c8c1527ab97f10f53c3211a3 | refs/heads/master | 2022-09-26T17:14:07.500379 | 2020-06-07T11:13:27 | 2020-06-07T11:13:27 | 270,262,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,513 | py | from sentenceSegmentation import SentenceSegmentation
from tokenization import Tokenization
from inflectionReduction import InflectionReduction
from stopwordRemoval import StopwordRemoval
from informationRetrieval import InformationRetrieval
from evaluation import Evaluation
from sys import version_info
import argparse
import json
import matplotlib.pyplot as plt
# Input compatibility for Python 2 and Python 3
if version_info.major == 3:
pass
elif version_info.major == 2:
try:
input = raw_input
except NameError:
pass
else:
print ("Unknown python version - input function not safe")
class SearchEngine:
def __init__(self, args):
self.args = args
self.tokenizer = Tokenization()
self.sentenceSegmenter = SentenceSegmentation()
self.inflectionReducer = InflectionReduction()
self.stopwordRemover = StopwordRemoval()
self.informationRetriever = InformationRetrieval(self.args.IRmodel)
self.evaluator = Evaluation()
def segmentSentences(self, text):
"""
Call the required sentence segmenter
"""
if self.args.segmenter == "naive":
return self.sentenceSegmenter.naive(text)
elif self.args.segmenter == "punkt":
return self.sentenceSegmenter.punkt(text)
def tokenize(self, text):
"""
Call the required tokenizer
"""
if self.args.tokenizer == "naive":
return self.tokenizer.naive(text)
elif self.args.tokenizer == "ptb":
return self.tokenizer.pennTreeBank(text)
def reduceInflection(self, text):
"""
Call the required stemmer/lemmatizer
"""
return self.inflectionReducer.reduce(text)
def removeStopwords(self, text):
"""
Call the required stopword remover
"""
return self.stopwordRemover.fromList(text)
def preprocessQueries(self, queries):
"""
Preprocess the queries - segment, tokenize, stem/lemmatize and remove stopwords
"""
# Segment queries
segmentedQueries = []
for query in queries:
segmentedQuery = self.segmentSentences(query)
segmentedQueries.append(segmentedQuery)
json.dump(segmentedQueries, open(self.args.out_folder + "segmented_queries.txt", 'w'))
# Tokenize queries
tokenizedQueries = []
for query in segmentedQueries:
tokenizedQuery = self.tokenize(query)
tokenizedQueries.append(tokenizedQuery)
json.dump(tokenizedQueries, open(self.args.out_folder + "tokenized_queries.txt", 'w'))
# Stem/Lemmatize queries
reducedQueries = []
for query in tokenizedQueries:
reducedQuery = self.reduceInflection(query)
reducedQueries.append(reducedQuery)
json.dump(reducedQueries, open(self.args.out_folder + "reduced_queries.txt", 'w'))
# Remove stopwords from queries
stopwordRemovedQueries = []
for query in reducedQueries:
stopwordRemovedQuery = self.removeStopwords(query)
stopwordRemovedQueries.append(stopwordRemovedQuery)
json.dump(stopwordRemovedQueries, open(self.args.out_folder + "stopword_removed_queries.txt", 'w'))
preprocessedQueries = stopwordRemovedQueries
return preprocessedQueries
def preprocessDocs(self, docs):
"""
Preprocess the documents
"""
# Segment docs
segmentedDocs = []
for doc in docs:
segmentedDoc = self.segmentSentences(doc)
segmentedDocs.append(segmentedDoc)
json.dump(segmentedDocs, open(self.args.out_folder + "segmented_docs.txt", 'w'))
# Tokenize docs
tokenizedDocs = []
for doc in segmentedDocs:
tokenizedDoc = self.tokenize(doc)
tokenizedDocs.append(tokenizedDoc)
json.dump(tokenizedDocs, open(self.args.out_folder + "tokenized_docs.txt", 'w'))
# Stem/Lemmatize docs
reducedDocs = []
for doc in tokenizedDocs:
reducedDoc = self.reduceInflection(doc)
reducedDocs.append(reducedDoc)
json.dump(reducedDocs, open(self.args.out_folder + "reduced_docs.txt", 'w'))
# Remove stopwords from docs
stopwordRemovedDocs = []
for doc in reducedDocs:
stopwordRemovedDoc = self.removeStopwords(doc)
stopwordRemovedDocs.append(stopwordRemovedDoc)
json.dump(stopwordRemovedDocs, open(self.args.out_folder + "stopword_removed_docs.txt", 'w'))
preprocessedDocs = stopwordRemovedDocs
return preprocessedDocs
def evaluateDataset(self):
"""
- preprocesses the queries and documents, stores in output folder
- invokes the IR system
- evaluates precision, recall, fscore, nDCG and MAP
for all queries in the Cranfield dataset
- produces graphs of the evaluation metrics in the output folder
"""
# Read queries
queries_json = json.load(open(args.dataset + "cran_queries.json", 'r'))[:]
query_ids, queries = [item["query number"] for item in queries_json], \
[item["query"] for item in queries_json]
# Process queries
processedQueries = self.preprocessQueries(queries)
# Read documents
docs_json = json.load(open(args.dataset + "cran_docs.json", 'r'))[:]
doc_ids, docs = [item["id"] for item in docs_json], \
[item["body"] for item in docs_json]
# Process documents
processedDocs = self.preprocessDocs(docs)
# Build document index
self.informationRetriever.buildIndex(processedDocs, doc_ids)
# Rank the documents for each query
doc_IDs_ordered = self.informationRetriever.rank(processedQueries)
# Read relevance judements
qrels = json.load(open(args.dataset + "cran_qrels.json", 'r'))[:]
# Calculate precision, recall, f-score, MAP and nDCG for k = 1 to 10
precisions, recalls, fscores, MAPs, nDCGs = [], [], [], [], []
for k in range(1, 11):
precision = self.evaluator.meanPrecision(
doc_IDs_ordered, query_ids, qrels, k)
precisions.append(precision)
recall = self.evaluator.meanRecall(
doc_IDs_ordered, query_ids, qrels, k)
recalls.append(recall)
fscore = self.evaluator.meanFscore(
doc_IDs_ordered, query_ids, qrels, k)
fscores.append(fscore)
print("Precision, Recall and F-score @ " +
str(k) + " : " + str(precision) + ", " + str(recall) +
", " + str(fscore))
MAP = self.evaluator.meanAveragePrecision(
doc_IDs_ordered, query_ids, qrels, k)
MAPs.append(MAP)
nDCG = self.evaluator.meanNDCG(
doc_IDs_ordered, query_ids, qrels, k)
nDCGs.append(nDCG)
print("MAP, nDCG @ " +
str(k) + " : " + str(MAP) + ", " + str(nDCG))
# Plot the metrics and save plot
plt.plot(range(1, 11), precisions, label="Precision")
plt.plot(range(1, 11), recalls, label="Recall")
plt.plot(range(1, 11), fscores, label="F-Score")
plt.plot(range(1, 11), MAPs, label="MAP")
plt.plot(range(1, 11), nDCGs, label="nDCG")
plt.legend()
plt.title("Evaluation Metrics - Cranfield Dataset")
plt.xlabel("k")
plt.savefig(args.out_folder + "eval_plot.png")
def handleCustomQuery(self):
"""
Take a custom query as input and return top five relevant documents
"""
#Get query
print("Enter query below")
query = input()
# Process documents
processedQuery = self.preprocessQueries([query])[0]
# Read documents
docs_json = json.load(open(args.dataset + "cran_docs.json", 'r'))[:]
doc_ids, docs = [item["id"] for item in docs_json], \
[item["body"] for item in docs_json]
# Process documents
processedDocs = self.preprocessDocs(docs)
# Build document index
self.informationRetriever.buildIndex(processedDocs, doc_ids)
# Rank the documents for the query
doc_IDs_ordered = self.informationRetriever.rank([processedQuery])[0]
# Print the IDs of first five documents
print("\nTop five document IDs : ")
for id_ in doc_IDs_ordered[:5]:
print(id_)
if __name__ == "__main__":
# Create an argument parser
parser = argparse.ArgumentParser(description='main.py')
# Tunable parameters as external arguments
parser.add_argument('-dataset', default = "cranfield/",
help = "Path to the dataset folder")
parser.add_argument('-out_folder', default = "output/",
help = "Path to output folder")
parser.add_argument('-segmenter', default = "punkt",
help = "Sentence Segmenter Type [naive|punkt]")
parser.add_argument('-tokenizer', default = "ptb",
help = "Tokenizer Type [naive|ptb]")
parser.add_argument('-custom', action = "store_true",
help = "Take custom query as input")
parser.add_argument('-IRmodel', default = "gensim",
help = "module name for information retrieval [naive|gensim]")
# Parse the input arguments
args = parser.parse_args()
# Create an instance of the Search Engine
searchEngine = SearchEngine(args)
# Either handle query from user or evaluate on the complete dataset
if args.custom:
searchEngine.handleCustomQuery()
else:
searchEngine.evaluateDataset()
| [
"noreply@github.com"
] | noreply@github.com |
b4ee36238f1638496b350f08c5a2bdceb96da3ee | 764fc4145a4136b48d4b6d95050adbedd3758ce3 | /CircularBuffers.py | a2245ade003ea52bb3cd7c4a7e7958ad9ea9ceec | [] | no_license | cmaher92/algorithms | acf373a4da32c066cd3458a4431ec51676072fc1 | 9ed1531ffc2c874a861f792b7e70b1b39d9ee0d4 | refs/heads/master | 2021-01-10T13:11:01.937289 | 2016-02-03T21:27:59 | 2016-02-03T21:27:59 | 50,763,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | # Vectors
# Linked Lists
# Circular
# Resizable Arrays
# Circular Buffers
# The useful property of a circular buffer is that it does not need to
# have its elements shuffled around when one is consumed.
# circular is FIFO
# Alan's definition
# Array with many cells, wraps around
# An example is an array of 30 frames, each object is a single frame
# A circular buffer can be implemented using four pointers,
# or two pointers and two integers:
# buffer start in memory
# buffer end in memory, or buffer capacity
# start of valid data (index or pointer)
# end of valid data (index or pointer), or amount of data currently in the buffer (integer)
# methods
# insert
| [
"connormmaher@gmail.com"
] | connormmaher@gmail.com |
80338f57e4494dc5fd84346bfab8cd6f883a4347 | b5dabe2e6da0e53498650b3c3f3f944c20f3e050 | /dolo/compiler/function_compiler_numexpr.py | e20ec37370ffaf43ad7e04c17d62a3028aaf64d8 | [
"BSD-2-Clause"
] | permissive | christophe-gouel/dolo | 12d582ecf3289aa9168f5d825da83a6284d5a669 | d9aef6d78d19899e2669e49ee6b7ad9aacf0e35d | refs/heads/master | 2020-12-24T09:31:19.389548 | 2018-01-04T20:42:19 | 2018-01-04T20:42:19 | 6,064,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,105 | py | from __future__ import division
from dolo.symbolic.derivatives import DerivativesTree
from dolo.symbolic.symbolic import TSymbol
from dolo.compiler.function_compiler import compile_multiargument_function as compile_multiargument_function_regular
DerivativesTree.symbol_type = TSymbol
def compile_multiargument_function(equations, args_list, args_names, parms, fname='anonymous_function', diff=True, return_text=False, order='rows'):
return compile_multiargument_function_regular(equations, args_list, args_names, parms, fname=fname, diff=diff, return_text=return_text, use_numexpr=True, order=order)
if __name__ == '__main__':
import sympy
from pprint import pprint
[w,x,y,z,t] = vars = sympy.symbols('w, x, y, z, t')
[a,b,c,d] = parms = sympy.symbols('a, b, c, d')
[k_1,k_2] = s_sym = sympy.symbols('k_1, k_2')
[x_1,x_2] = x_sym = sympy.symbols('x_1, x_2')
args_list = [
s_sym,
x_sym
]
from sympy import exp
eqs = [
x + y*k_2 + z*exp(x_1 + t),
(y + z)**0.3,
z,
(k_1 + k_2)**0.3,
k_2**x_1
]
sdict = {s:eqs[i] for i,s in enumerate(vars) }
from dolo.misc.triangular_solver import solve_triangular_system
order = solve_triangular_system(sdict, return_order=True)
ordered_vars = [ v for v in order ]
ordered_eqs = [ eqs[vars.index(v)] for v in order ]
pprint(ordered_vars)
pprint(ordered_eqs)
import numpy
floatX = numpy.float32
s0 = numpy.array( [2,5], dtype=floatX)
x0 = numpy.array( [2,2], dtype=floatX)
p0 = numpy.array( [4,3], dtype=floatX)
N = 2000
s1 = numpy.column_stack( [s0]*N )
x1 = numpy.column_stack( [x0]*N )
p1 = numpy.array( [4,3, 6, 7], dtype=floatX )
# f = create_fun()
#
# test = f(s1,x1,p0)
# print(test)
args_names = ['s','x']
#
#
solution = solve_triangular_system(sdict)
vals = [sympy.sympify(solution[v]) for v in ordered_vars]
from dolo.compiler.compiling import compile_multiargument_function as numpy_compiler
from dolo.compiler.compiling_theano import compile_multiargument_function as theano_compiler
f_numexpr = compile_multiargument_function( vals, args_list, args_names, parms )
f_numpy = numpy_compiler( vals, args_list, args_names, parms )
f_theano = theano_compiler( vals, args_list, args_names, parms )
n_exp = 1000
import time
r = time.time()
for i in range(n_exp):
res_numexpr = f_numexpr(s1,x1,p1)
# res = numpy.row_stack(res)
s = time.time()
print('Time (numexpr) : '+ str(s-r))
r = time.time()
for i in range(n_exp):
res_theano = f_theano(s1,x1,p1)
# res = numpy.row_stack(res)
s = time.time()
print('Time (theano) : '+ str(s-r))
r = time.time()
for i in range(n_exp):
res_numpy = f_numpy(s1,x1,p1)
# res = numpy.row_stack(res)
s = time.time()
print('Time (numpy) : '+ str(s-r))
print( abs(res_numpy - res_theano).max() )
print( abs(res_numexpr - res_numpy).max() ) | [
"pablo.winant@gmail.com"
] | pablo.winant@gmail.com |
eb0ddf382bb720cb968e3c942df4b16d726a4bfa | 42075953df65bdd6b02e02da7dcb3d7fab74fbbd | /superset/charts/schemas.py | bf1b57b321922ea7c9dac7dba8d130b5c4fb3eff | [
"Apache-2.0",
"OFL-1.1"
] | permissive | milimetric/incubator-superset | f61c00aeca7d7aa782816120037e63872df117a1 | 7b11b44abe08c131fb9df6ec479ea29c24c784ec | refs/heads/master | 2022-04-23T19:29:46.935978 | 2020-04-16T09:54:45 | 2020-04-16T09:54:45 | 256,266,649 | 0 | 0 | Apache-2.0 | 2020-04-16T16:14:18 | 2020-04-16T16:14:17 | null | UTF-8 | Python | false | false | 2,465 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Union
from marshmallow import fields, Schema, ValidationError
from marshmallow.validate import Length
from superset.exceptions import SupersetException
from superset.utils import core as utils
get_delete_ids_schema = {"type": "array", "items": {"type": "integer"}}
thumbnail_query_schema = {
"type": "object",
"properties": {"force": {"type": "boolean"}},
}
def validate_json(value: Union[bytes, bytearray, str]) -> None:
try:
utils.validate_json(value)
except SupersetException:
raise ValidationError("JSON not valid")
class ChartPostSchema(Schema):
slice_name = fields.String(required=True, validate=Length(1, 250))
description = fields.String(allow_none=True)
viz_type = fields.String(allow_none=True, validate=Length(0, 250))
owners = fields.List(fields.Integer())
params = fields.String(allow_none=True, validate=validate_json)
cache_timeout = fields.Integer(allow_none=True)
datasource_id = fields.Integer(required=True)
datasource_type = fields.String(required=True)
datasource_name = fields.String(allow_none=True)
dashboards = fields.List(fields.Integer())
class ChartPutSchema(Schema):
slice_name = fields.String(allow_none=True, validate=Length(0, 250))
description = fields.String(allow_none=True)
viz_type = fields.String(allow_none=True, validate=Length(0, 250))
owners = fields.List(fields.Integer())
params = fields.String(allow_none=True)
cache_timeout = fields.Integer(allow_none=True)
datasource_id = fields.Integer(allow_none=True)
datasource_type = fields.String(allow_none=True)
dashboards = fields.List(fields.Integer())
| [
"noreply@github.com"
] | noreply@github.com |
a21dfa9182883f7045cd35880f722f3d9a36a0ab | 45e376ae66b78b17788b1d3575b334b2cb1d0b1c | /tests/terraform/checks/resource/azure/test_SynapseWorkspaceEnablesDataExfilProtection.py | 2f0a8e8e46b503edb13ed42ed956bc6d6a70830a | [
"Apache-2.0"
] | permissive | bridgecrewio/checkov | aeb8febed2ed90e61d5755f8f9d80b125362644d | e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d | refs/heads/main | 2023-08-31T06:57:21.990147 | 2023-08-30T23:01:47 | 2023-08-30T23:01:47 | 224,386,599 | 5,929 | 1,056 | Apache-2.0 | 2023-09-14T20:10:23 | 2019-11-27T08:55:14 | Python | UTF-8 | Python | false | false | 1,453 | py | import unittest
from pathlib import Path
from checkov.runner_filter import RunnerFilter
from checkov.terraform.checks.resource.azure.SynapseWorkspaceEnablesDataExfilProtection import check
from checkov.terraform.runner import Runner
class TestSynapseWorkspaceEnablesDataExfilProtection(unittest.TestCase):
def test(self):
# given
test_files_dir = Path(__file__).parent / "example_SynapseWorkspaceEnablesDataExfilProtection"
# when
report = Runner().run(root_folder=str(test_files_dir), runner_filter=RunnerFilter(checks=[check.id]))
# then
summary = report.get_summary()
passing_resources = {
"azurerm_synapse_workspace.pass",
}
failing_resources = {
"azurerm_synapse_workspace.fail",
"azurerm_synapse_workspace.fail2",
}
passed_check_resources = {c.resource for c in report.passed_checks}
failed_check_resources = {c.resource for c in report.failed_checks}
self.assertEqual(summary["passed"], 1)
self.assertEqual(summary["failed"], 2)
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
self.assertEqual(summary["resource_count"], 3) # 3 unknown
self.assertEqual(passing_resources, passed_check_resources)
self.assertEqual(failing_resources, failed_check_resources)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
848b00dce8c68b93c85b751b4d5c57683f6980f1 | 2ed86a79d0fcd299ad4a01310954c5eddcf01edf | /homeassistant/components/airzone/coordinator.py | ba0296557a1be58bacea112719a507f82be0fb6b | [
"Apache-2.0"
] | permissive | konnected-io/home-assistant | 037f12c87bb79e19220192eb918e49db1b1a8b3e | 2e65b77b2b5c17919939481f327963abdfdc53f0 | refs/heads/dev | 2023-05-11T08:57:41.891518 | 2023-05-07T20:03:37 | 2023-05-07T20:03:37 | 109,931,626 | 24 | 10 | Apache-2.0 | 2023-02-22T06:24:01 | 2017-11-08T05:27:21 | Python | UTF-8 | Python | false | false | 1,309 | py | """The Airzone integration."""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Any
from aioairzone.exceptions import AirzoneError
from aioairzone.localapi import AirzoneLocalApi
import async_timeout
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import AIOAIRZONE_DEVICE_TIMEOUT_SEC, DOMAIN
SCAN_INTERVAL = timedelta(seconds=60)
_LOGGER = logging.getLogger(__name__)
class AirzoneUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
"""Class to manage fetching data from the Airzone device."""
def __init__(self, hass: HomeAssistant, airzone: AirzoneLocalApi) -> None:
"""Initialize."""
self.airzone = airzone
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=SCAN_INTERVAL,
)
async def _async_update_data(self) -> dict[str, Any]:
"""Update data via library."""
async with async_timeout.timeout(AIOAIRZONE_DEVICE_TIMEOUT_SEC):
try:
await self.airzone.update()
except AirzoneError as error:
raise UpdateFailed(error) from error
return self.airzone.data()
| [
"noreply@github.com"
] | noreply@github.com |
5fff7525d5f2305f06f03162747b3f3fad7c7ead | 61a6f6e5756066546b4be09e45c8214fcc6e15ba | /mutvariant_extraction.py | a04dbc75afa19b6fdcd943b282a9a0f50434e32c | [] | no_license | outbreak-info/topic_classifier | 9f07164105a70dcfaeb2edf1aa322688392ecde1 | d5838f9aa4ee55b5a67fcbd77fbfcb0b0937db74 | refs/heads/main | 2023-04-15T11:49:36.392049 | 2022-12-15T02:48:37 | 2022-12-15T02:48:37 | 359,611,695 | 0 | 4 | null | 2021-07-02T22:43:59 | 2021-04-19T22:10:44 | Jupyter Notebook | UTF-8 | Python | false | false | 630 | py | import os
import re
import requests
import json
import pathlib
import pandas as pd
from pandas import read_csv
from datetime import datetime
from src.common import *
from src.extract_variants import *
script_path = pathlib.Path(__file__).parent.absolute()
DATAPATH = os.path.join(script_path,'data/')
RESULTSPATH = os.path.join(script_path,'results/')
allids = get_pub_ids('all')
metadf = batch_fetch_dated_meta(allids)
textdf = merge_texts(metadf)
mutationsclean = extract_mutations(RESULTSPATH, textdf, token_dict, export=True)
cleanlineageslist = extract_lineages(DATAPATH, RESULTSPATH, lineagequerylist, textdf, export=True) | [
"gtsueng@gmail.com"
] | gtsueng@gmail.com |
4542ce8b7fa747af2ca0b266c787f0c496bb198e | 54dc4eb7d4809f902ed8e4aa5a230578c5ab91e7 | /mysocialmedia/mysocialmedia/asgi.py | 8b2b8327642bb2b0a567c329b8b91c3fa18d986a | [] | no_license | lufepama/sinparo-django-react | 5c901d717fe4460753b49aff55a5b0dcf0961907 | ff7042a69194c54421370ad1a4b89933e7256b4c | refs/heads/main | 2023-03-20T23:26:28.895045 | 2021-03-08T23:34:44 | 2021-03-08T23:34:44 | 345,486,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
ASGI config for mysocialmedia project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysocialmedia.settings')
application = get_asgi_application()
| [
"lufepama31@gmail.com"
] | lufepama31@gmail.com |
1d8f54ca4a88c470d03a417bd277657ac061b42a | 6622c5a9eaac04540d7704372edc6050a735f522 | /df_virginica_setosa.py | 4cf53b99c8ab2b7d3420812fdde3cc9eb3ac92ba | [] | no_license | mihirdshirur/NNLS-Assignment-3 | a603d50a6e23fd735e1fba99b15bb4b13e8ea15d | f13df10aef21d4ac2b76381547c6a55d350017b0 | refs/heads/main | 2023-08-29T17:40:25.841161 | 2021-11-12T17:29:06 | 2021-11-12T17:29:06 | 427,441,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,233 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("/Users/apple/Desktop/Coding/Sem5/Assignment3_Code/iris.data")
error1 = []
x = []
data_setosa = df.iloc[0:50,0:4].values
data_versicolor = df.iloc[50:100,0:4].values
data_virginica = df.iloc[100:150,0:4].values
# Setosa is of class 1, versicolor is of class 2, virginica is of class 3
data_class_setosa = np.concatenate((data_setosa,np.ones((50,1))*(-1)),axis = 1)
data_class_virginica = np.concatenate((data_virginica,np.ones((50,1))*1),axis = 1)
data_class = np.concatenate((data_class_setosa,data_class_virginica),axis = 0)
np.random.shuffle(data_class)
percentage = 0.7 # Percentage of training set
size = 70
training_set = data_class[0:int(percentage*100),:] # Training data
test_set = data_class[int(percentage*100):100,:] # Test data
# We implement K means clustering
K = 1
while K<25:
mu = np.random.rand(K,4) # Kx4 mean vector
for t in range(K):
mu[t,0] = mu[t,0] + 6
mu[t,1] = mu[t,1] + 2.5
mu[t,2] = mu[t,2] + 4.2
mu[t,3] = mu[t,3] + 1.5
C = np.zeros((size,1))
for i in range(100):
# Update coder
for j in range(size):
dist = np.zeros((K,1))
for k in range(K):
dist[k,0] = np.linalg.norm(training_set[j:j+1,0:4]-mu[k:k+1,0:4])
k_close = np.argmin(dist)
C[j,0] = int(k_close)
# Update means
sum_K = np.zeros((K,4))
count_K = np.zeros((K,1))
for j in range(size):
k = int(C[j,0])
count_K[k,0] = count_K[k,0] + 1
sum_K[k:k+1,0:4] = sum_K[k:k+1,0:4] + training_set[j:j+1,0:4]
for k in range(K):
if count_K[k,0] != 0:
for t in range(4):
mu[k,t] = sum_K[k:k+1,t:t+1]/count_K[k:k+1,0:1]
# Calculate error for test set
error = 0
for j in range(100-size):
dist = np.zeros((K,1))
for k in range(K):
dist[k,0] = np.linalg.norm(test_set[j:j+1,0:4]-mu[k:k+1,0:4])
error = error + np.amin(dist)
error1.append(error/(100-size))
K = K+1
print(K)
x.append(K)
plt.plot(x,error1)
plt.show() | [
"noreply@github.com"
] | noreply@github.com |
784f3b3c4ad379098fcd5976886c425f69bc8941 | f979434a8bf71df06f1d36eeab8397806a530978 | /create_pool_with_different_settings.py | 8284562a15666c47955556371814d821ea9b9f98 | [] | no_license | symlib/gui | 4dc505c3b31d9347ad22d5ad45d43ce6e388c47a | 7d0a259ad3cd9a7b154f1a92b1d5a48fea7cb6e4 | refs/heads/master | 2021-01-20T07:27:49.664039 | 2017-08-03T06:24:38 | 2017-08-03T06:24:38 | 89,999,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,435 | py | # -*- coding: utf-8 -*-
# !/usr/bin/env python
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, re, random
from login_ds import loginFirefox
from login_ds import loginIE
from VerifyWords import VerifyWords
from time import sleep
from to_log import tolog
from namegenerator import random_key
import time
Pass = "'result': 'p'"
Fail = "'result': 'f'"
class CreatePool(unittest.TestCase):
def test_create_pool(self):
Failflag = False
self.driver = loginFirefox()
# self.driver.implicitly_wait(30)
self.verificationErrors = []
self.accept_next_alert = True
driver = self.driver
strip_size = ["64 KB", "128 KB", "256 KB", "512 KB", "1 MB"]
sector_size = ["512 Bytes", "1 KB", "2 KB", "4 KB"]
Prefer_ctrl = [1, 2]
disklist = ["1", "3", "4", "5", "6", "8", "9", "10", "11", "12"]
# disklist = [1, 3, 4, 5, 6, 8, 9, 10, 11, 12]
volume_capacity = str(random.randint(16, 10000))
block_size = ['512 Bytes', '1 KB', '2 KB', '4 KB', '8 KB', '16 KB', '32 KB', '64 KB', '128 KB']
volume_sector = ['512 Bytes', '1 KB', '2 KB', '4 KB']
raid_level = ["RAID0", "RAID1", "RAID5", "RAID6", "RAID10", "RAID50", "RAID60"]
tolog("Start to create pool with different settings")
driver.find_element_by_link_text("Pool").click()
#driver.find_element_by_xpath("//div[2]/button").click()
sleep(1)
validatelist = list()
# try:
sleep(1)
# raid = random.choice(raid_level)
for raid in raid_level:
for stripsize in strip_size:
# stripsize=random.choice(strip_size)
for sectorsize in sector_size:
#driver.find_element_by_link_text("Pool").click()
driver.find_element_by_xpath("//div[2]/button").click()
time.sleep(2)
driver.find_element_by_name("name").clear()
pool_name = random_key(10)
driver.find_element_by_name("name").send_keys(pool_name)
sleep(0.5)
Select(driver.find_element_by_name("mediatype")).select_by_visible_text("Hard Disk Drive")
sleep(0.5)
if raid=="RAID0":
disks = random.sample(disklist, 1)
if raid == "RAID1":
disks = random.sample(disklist, 2)
elif raid == "RAID50":
disks = random.sample(disklist, 6)
elif raid == "RAID60":
disks = random.sample(disklist, 8)
else:
disks = random.sample(disklist, 4)
disks.sort()
verifydisk=list()
for disk in disks:
verifydisk.append(int(disk))
# the verifydisk list will be verified in detail list by removing spaces
verifydisk.sort()
disks = verifydisk[::-1]
# print disks
# click disk in reverse order to avoid the unapplicable disk selection
#
for disk in disks:
sleep(1)
driver.find_element_by_xpath("//div[2]/div/div/ul/li[%s]" % (str(disk))).click()
sleep(0.5)
#verifydisk.sort()
Select(driver.find_element_by_name("raidlevel")).select_by_visible_text(raid)
sleep(0.5)
# sectorsize=random.choice(sector_size)
Select(driver.find_element_by_name("strip")).select_by_visible_text(stripsize)
Select(driver.find_element_by_name("sector")).select_by_visible_text(sectorsize)
# sleep(1)
# ctrlid = random.choice(Prefer_ctrl)
# driver.find_element_by_xpath("//label[%d]/span" % ctrlid).click()
sleep(5)
driver.find_element_by_xpath("//button[@type='submit']").click()
for i in range(60):
try:
if re.search(r"^[\s\S]*Pool was added successfully.[\s\S]*$",
driver.find_element_by_css_selector("BODY").text):
tolog("Pool %s was added successfully." % pool_name);
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
time.sleep(3)
validatelist.append(VerifyWords(driver, (pool_name, raid)))
driver.find_element_by_xpath("//pr-gear-button/div/a").click()
time.sleep(2)
try:
driver.find_element_by_link_text("View Detail").click()
sleep(5)
verifydiskstr = str(verifydisk).replace("[", "").replace("]", "").replace(" ", "")
validatelist.append(VerifyWords(driver, (pool_name, raid, stripsize, sectorsize,verifydiskstr)))
except:
pass
#driver.find_element_by_link_text("Pool").click()
#time.sleep(1)
driver.find_element_by_xpath("//div/ul/li[2]/a/span/span").click()
time.sleep(1)
driver.find_element_by_xpath("//button[@type='button']").click()
time.sleep(2)
driver.find_element_by_name("name").clear()
driver.find_element_by_name("name").send_keys("confirm")
time.sleep(1)
driver.find_element_by_xpath("//button[@type='submit']").click()
# time.sleep(5)
for i in range(60):
try:
if re.search(r"^[\s\S]*Pool was deleted successfully.[\s\S]*$",
driver.find_element_by_css_selector("BODY").text):
tolog("Pool %s was deleted successful!" % pool_name);
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
time.sleep(2)
# except:
# driver.get_screenshot_as_file("snapshot at " +
# re.sub(':', '.', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(
# time.time()))) + "create_pool" + "." + "png")
# tolog("Error: please refer to the screen-shot in the folder")
for val in validatelist:
if val:
Failflag = True
break
if Failflag:
tolog(Fail)
else:
tolog(Pass)
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException as e:
return False
return True
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException as e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
e6ea4e632b0b731721851c7db5ec5498ae307b76 | 3cb06711ab1a6e379e5778456fce5770ac994ba9 | /python/wait_functions_test_py3.py | 02cab39f268b7e1880b29bbcbcffa372099fe449 | [
"MIT"
] | permissive | glenn-edgar/chain_flow | 7e8238c1f5e5c00f4c5906e2eb356d33c2b4696c | 750a9b126de04e46b71a58c5bd3e7500c4d26459 | refs/heads/master | 2021-01-02T22:41:30.066536 | 2017-09-05T19:34:57 | 2017-09-05T19:34:57 | 99,368,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py |
from py_cf_py3.chain_flow_py3 import CF_Base_Interpreter
def test_function_1( cf_handle, chainObj, parameters, event ):
print("test function 1 ",event)
def wait_test_function( cf_handle, chainObj, parameters, event ):
print("event",event)
return_value = False
if event["name"] == "INIT":
parameters.append(0)
if event["name"] == "TIME_TICK":
parameters[-1] = parameters[-1] +1
if parameters[-1] >= parameters[1]:
return_value = True
return return_value
cf = CF_Base_Interpreter()
cf.define_chain("Chain_1", False) # wait_tod
cf.insert.log("Chain 1 started")
cf.insert.wait_tod( "*","*","*",15 ) # wait for 15 seconds
cf.insert.one_step( test_function_1)
cf.insert.log("Chain 1 is reset")
cf.insert.reset( )
cf.define_chain("Chain_2",False) # wait_tod_ge wait_tod_le
cf.insert.log("Chain 2 started")
cf.insert.wait_tod_ge( "*","*","*",45 ) # wait for 15 seconds
cf.insert.check_event( test_function_1, "TIME_TICK" )
cf.insert.wait_tod_le( "*","*","*",15) # wait for 15 seconds
cf.insert.reset( )
cf.define_chain("Chain_3",False) #wait_event_count
cf.insert.log("Chain 3 started")
cf.insert.wait_event_count(count = 10)
cf.insert.one_step( test_function_1)
cf.insert.reset()
cf.define_chain("Chain_4",True) # wait_function
cf.insert.log("Chain 4 has started")
cf.insert.wait_function(wait_test_function, 10 )
cf.insert.log("Chain 4 is ended ")
cf.insert.reset()
cf.execute()
| [
"glenn-edgar@onyxengr.com"
] | glenn-edgar@onyxengr.com |
4f08c4b5e1285ffe0a86c36c93632b2c2a6cb41a | aa869786dc2108b549acf689a4f6a8369d9eb0b8 | /gaussian.py | a9d286b70a6b6a1cafe44161ff091ee5999fd043 | [] | no_license | ard1498/Pattern_Recognition_Techniques | 8617c065655ffbdee61a51961db3d397e8e88462 | a3542f50eb9333fb278232c526604373b3cdc4d4 | refs/heads/master | 2020-06-02T20:57:34.586648 | 2019-07-10T07:27:57 | 2019-07-10T07:27:57 | 191,307,828 | 0 | 0 | null | 2019-06-11T06:36:38 | 2019-06-11T06:27:27 | Jupyter Notebook | UTF-8 | Python | false | false | 3,359 | py | import numpy as np
from pprint import pprint
import math
class Gaussian:
def __init__(self,sigma = 1, m = 1):
self.result = {}
self.sigma = sigma
self.K = 1/((2*3.14)**(m/2) * self.sigma**m)
self.classes = set()
def train(self, X, Y):
self.classes = set(Y)
no_of_features = len(X[0])
self.result['total'] = len(X)
for i in self.classes:
self.result[i] = {}
for j in range(no_of_features):
self.result[i][j] = list()
for i in range(len(X)):
self.result[Y[i]]['total'] = (Y == Y[i]).sum()
for j in range(no_of_features):
self.result[Y[i]][j].append(X[i][j])
pprint(self.result)
return
def get_gaussian_probab(self, X_test):
# calculating for every classes
no_of_features = X_test.shape[-1]
final_probabilities = {}
for i in self.classes:
final_post_probab = 1
for j in range(no_of_features):
final_expr = 0
for k in self.result[i][j]:
final_expr += math.exp(-((k - X_test[j])**2)/(2*(self.sigma**2)))
final_post_probab *= final_expr
final_probabilities[i] = final_post_probab
return final_probabilities
def predict(self, X_test):
# calculating priori of classes
priori = {}
for i in self.classes:
priori[i] = self.result[i]['total'] / self.result['total']
# get the posteieri probabilities
final_post_probability = self.get_gaussian_probab(X_test)
for i in self.classes:
print('probability of class ' + i + ' is :' + str(final_post_probability[i] * priori[i]))
return
def main():
m = int(input("enter the number of independent attributes:"))
n = int(input("enter the number of data points:"))
features = []
for j in range(m):
features.append(str(input('enter f'+str(j) + ' name:')))
outputs = []
X = []
for i in range(n):
Xi= []
print('enter the '+str(i)+' data point:')
for j in range(m):
Xi.append(float(input('Value of ' + features[j] + ':')))
X.append(Xi)
outputs.append(str(input('enter the class for the data point:')))
print('____________________________________________________________________________')
X = np.array(X)
Y = np.array(outputs)
for i in range(n):
for j in range(m):
print(X[i][j], end=' ')
print(outputs[i])
clf = Gaussian()
print(clf.train(X, Y))
# clf.printdictionaries()
print('_____________________________________________________________________________')
print('enter the test data point:')
X_test = []
for j in range(len(features)):
X_test.append(float(input('Value of ' + features[j] + ':')))
X_test = np.array(X_test)
print("x test is :")
print(X_test)
print("__________________Now the prediction and results _________________________")
clf.predict(X_test)
# print('class is :' + clf.predict(x, confidence))
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
88510c98e614cffb3f41acaeb92caf8f37774ff5 | fe4af6b73828193684575644010a2cb741db91be | /pandas_practice | 0fdd1eca7386b41bd623460f2bd4c5806e8a649c | [] | no_license | ZheRenoO/Python4DS | 416765b15808438b73294da54127f14fae38c9e9 | 5df4adbe537c47ac683e1841b731b152d9da2adb | refs/heads/master | 2022-08-02T09:32:12.459498 | 2020-06-06T00:44:10 | 2020-06-06T00:44:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,351 | #!/usr/bin/env python
# coding: utf-8
# ___
#
# <a href='http://www.pieriandata.com'> <img src='../../Pierian_Data_Logo.png' /></a>
# ___
# # SF Salaries Exercise
#
# Welcome to a quick exercise for you to practice your pandas skills! We will be using the [SF Salaries Dataset](https://www.kaggle.com/kaggle/sf-salaries) from Kaggle! Just follow along and complete the tasks outlined in bold below. The tasks will get harder and harder as you go along.
# ** Import pandas as pd.**
# In[1]:
import pandas as pd
# ** Read Salaries.csv as a dataframe called sal.**
# In[2]:
sal = pd.read_csv('Salaries.csv')
# ** Check the head of the DataFrame. **
# In[3]:
sal.head()
# In[4]:
sal.info()
# ** Use the .info() method to find out how many entries there are.**
# In[9]:
# **What is the average BasePay ?**
# In[9]:
sal['BasePay'].mean()
# In[10]:
# ** What is the highest amount of OvertimePay in the dataset ? **
# In[10]:
sal['OvertimePay'].max()
# In[11]:
# ** What is the job title of JOSEPH DRISCOLL ? Note: Use all caps, otherwise you may get an answer that doesn't match up (there is also a lowercase Joseph Driscoll). **
# In[17]:
sal[sal['EmployeeName'] == 'JOSEPH DRISCOLL']['JobTitle']
# In[12]:
# ** How much does JOSEPH DRISCOLL make (including benefits)? **
# In[23]:
sal[sal['EmployeeName'] == 'JOSEPH DRISCOLL']['TotalPayBenefits']
# In[13]:
# ** What is the name of highest paid person (including benefits)?**
# In[27]:
sal.sort_values(by='TotalPayBenefits',ascending=False).head(1)['EmployeeName']
# In[14]:
# ** What is the name of lowest paid person (including benefits)? Do you notice something strange about how much he or she is paid?**
# In[28]:
sal.sort_values(by='TotalPayBenefits',ascending=True).head(1)
# In[15]:
# ** What was the average (mean) BasePay of all employees per year? (2011-2014) ? **
# In[42]:
sal.groupby('Year')['BasePay'].mean()
# In[16]:
# ** How many unique job titles are there? **
# In[43]:
sal['JobTitle'].nunique()
# In[17]:
# ** What are the top 5 most common jobs? **
# In[49]:
sal['JobTitle'].value_counts().head()
# In[18]:
# ** How many Job Titles were represented by only one person in 2013? (e.g. Job Titles with only one occurence in 2013?) **
# In[63]:
sal[(sal['JobTitle'].value_counts()==1) & (sal['Year']==2013)]
# In[75]:
df1=sal[sal['Year']==2013]
# In[78]:
df2=sal[sal['Year']==2013]['JobTitle'].value_counts() == 1
# In[87]:
sum(df2)
# In[19]:
# ** How many people have the word Chief in their job title? (This is pretty tricky) **
# In[94]:
def countc(word):
if 'chief' in word.lower():
return True
else:
return False
# In[96]:
sum(sal['JobTitle'].apply(lambda word:countc(word)))
# In[97]:
sum(sal['JobTitle'].str.contains('Chief') | sal['JobTitle'].str.contains('chief'))
# In[98]:
def chief_string(title):
if 'chief' in title.lower():
return True
else:
return False
# In[100]:
sum(sal['JobTitle'].apply(lambda x: chief_string(x)))
# In[21]:
# ** Bonus: Is there a correlation between length of the Job Title string and Salary? **
# In[103]:
sal['title_len']=sal['JobTitle'].apply(len)
# In[104]:
sal[['title_len','TotalPayBenefits']].corr()
# # Great Job!
| [
"noreply@github.com"
] | noreply@github.com | |
6312c87325c4ac19ffd4a502d580ce4c926c0ab6 | 6f269812a96d47c5670b4a7d5512f01bc7156217 | /manage.py | 18c1b3390a3f1f98873d26d42fc900e32bb82d06 | [] | no_license | kalkins/buk-django | 00a1724c19127840ac19182f003e28ed4f4f4480 | 708071d144b06ab289abdea6046437c40a81d230 | refs/heads/dev | 2022-12-13T05:51:30.664433 | 2019-02-12T03:10:04 | 2019-02-12T03:10:04 | 77,866,135 | 4 | 0 | null | 2022-12-08T01:28:31 | 2017-01-02T22:34:59 | Python | UTF-8 | Python | false | false | 801 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "buk.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"sindre@sindrestephansen.com"
] | sindre@sindrestephansen.com |
eb0103e62ae9f3d18f040c6db9cc6d787793b6df | 219be32fc6958365deb47e9f1037abfe23be46a2 | /HackerRank/candies.py | 6f5b0439788998668fcdd1dfdd581bd2a4b59c7c | [] | no_license | JustinHoyt/interview-practice | 65179ec56df71b2e807d4b6c548889058e8a3000 | c1608c166290cac892576043f8d3b84f7a756fc8 | refs/heads/master | 2022-06-30T00:25:55.694391 | 2022-05-30T02:37:20 | 2022-05-30T02:37:20 | 73,835,577 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | def give_candies(students, memo, current, prev):
key = str(current)
if key in memo:
return memo[key]
if len(students) <= current or current < 0:
return 0
left = 0
right = 0
if current - 1 != prev and current-1 >= 0:
left = give_candies(students, memo, current-1, current)
if students[current] > students[current-1]:
left += 1
if current + 1 != prev and current+1 < len(students):
right = give_candies(students, memo, current+1, current)
if students[current] > students[current+1]:
right += 1
result = left + right
# result = max(left, right)
memo[key] = result
return result
def candies(students):
memo = {}
current = 0
prev = -1
return give_candies(students, memo, current, prev)
testcase1 = [4, 6, 4, 5, 6, 2]
print(candies(testcase1))
| [
"justinhoyt24@gmailcom"
] | justinhoyt24@gmailcom |
73474c8bf71b439bda5aed84f134b363b55c7e2f | 41d23de79d8fd85d31955d9fac0b5228ffca2a29 | /day/3/map_route.py | 7fafa72bbdd8c6792251559b1523fd3536631a42 | [
"MIT"
] | permissive | philparkbot/advent2020 | 652d90e9d192bcd1c444e83516358d9836a1777b | e5ee7e0703ae82d4c773024e4289d7d37879af35 | refs/heads/main | 2023-02-21T16:42:39.065252 | 2021-01-04T00:42:51 | 2021-01-04T00:42:51 | 324,212,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,625 | py | #!/usr/bin/env python3
'''
Input file represents a map. '#' represents trees, '.' represents free space. Traverse the map down and to the right
in steps of the x/y increments. If you go past the last character in the X direction, you wrap back around. You are
done when you reach the last row. The goal is to count the number of trees in your route.
'''
l_file = 'input.txt'
l_row = 0
l_col = 0
l_x_increment = 3
l_y_increment = 1
l_x_size = None
l_debug = True
'''
[ ] Calculate next row/col values. Handle wraparound for column.
[ ] Iterate through each row until you reach next_row.
[ ] Move until you reach the next column.
[ ] When you have arrived, check if you're on a tree or free land, and increment if tree.
[ ] Repeat the first step.
'''
l_curr_col = 0
l_next_row = l_y_increment
l_next_col = l_x_increment
l_tree_count = 0
for l_curr_row, l_line in enumerate(open(l_file).readlines()):
l_line = l_line.rstrip()
if l_x_size == None:
l_x_size = len(l_line)
if l_debug:
print("length is {}".format(l_x_size))
if l_curr_row < l_next_row: continue
l_curr_col = l_next_col
# At this point, we're at the correct coordinates. Read
l_spot = l_line[l_curr_col]
l_is_tree = l_spot == '#'
if l_is_tree:
l_tree_count += 1
if l_debug:
l_list = list(l_line)
l_list[l_curr_col] = 'X' if l_is_tree else 'O'
l_line = ''.join(l_list)
l_line = l_line.rstrip()
print("{}: [{}][{}]".format(l_line, l_curr_row, l_curr_col))
l_next_row += l_y_increment
l_next_col = (l_next_col + l_x_increment) % l_x_size
print("There were {} trees".format(l_tree_count))
| [
"phil.park@gmail.com"
] | phil.park@gmail.com |
8294ffcfd711b3cf43a17380fbbd667b66521dc5 | 412e0178562be3a33916d02679bd84ab83fc4f8a | /FPROPlay/Py05/LongestWord.py | 2c4cd9bb02957146bd074848312d4d9cea8697de | [] | no_license | filiperecharte/FEUP-FPRO | 401a9a9ec5349f1ada928b67bc7e160e40ee6d7a | 7295524f42f6af9cb455957d5b75ce587bb7d4ca | refs/heads/master | 2022-03-24T19:07:24.895356 | 2019-12-19T17:36:34 | 2019-12-19T17:36:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 12:27:04 2019
@author: filipe
"""
def longestInlist(alist):
longest=alist[0]
for i in alist:
if(len(i)>=len(longest)):
longest=i
return longest
def longest(s):
lista=[]
s=s+" "
for i in s:
if(i==" "):
lista.append(s[0:s.index(i)])
s=s[s.index(i)+1:]
return len(longestInlist(lista))
print(longest("A list with some words")) | [
"filipeteixeira00@hotmail.com"
] | filipeteixeira00@hotmail.com |
e8fb8b6c7a2c7ba04314e431ec618dd22761941e | 612325535126eaddebc230d8c27af095c8e5cc2f | /src/build/android/pylib/utils/device_dependencies.py | c448396fbc0ab0c74370a723afeb7c9fb47be053 | [
"BSD-3-Clause"
] | permissive | TrellixVulnTeam/proto-quic_1V94 | 1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673 | feee14d96ee95313f236e0f0e3ff7719246c84f7 | refs/heads/master | 2023-04-01T14:36:53.888576 | 2019-10-17T02:23:04 | 2019-10-17T02:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,315 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
from pylib import constants
_BLACKLIST = [
re.compile(r'.*OWNERS'), # Should never be included.
re.compile(r'.*\.crx'), # Chrome extension zip files.
re.compile(r'.*\.so'), # Libraries packed into .apk.
re.compile(r'.*Mojo.*manifest\.json'), # Some source_set()s pull these in.
re.compile(r'.*\.py'), # Some test_support targets include python deps.
re.compile(r'.*\.stamp'), # Stamp files should never be included.
# Some test_support targets include python deps.
re.compile(r'.*\.mojom\.js'),
# Chrome external extensions config file.
re.compile(r'.*external_extensions\.json'),
# Exists just to test the compile, not to be run.
re.compile(r'.*jni_generator_tests'),
# v8's blobs get packaged into APKs.
re.compile(r'.*natives_blob.*\.bin'),
re.compile(r'.*snapshot_blob.*\.bin'),
]
def DevicePathComponentsFor(host_path, output_directory):
"""Returns the device path components for a given host path.
This returns the device path as a list of joinable path components,
with None as the first element to indicate that the path should be
rooted at $EXTERNAL_STORAGE.
e.g., given
'$CHROMIUM_SRC/foo/bar/baz.txt'
this would return
[None, 'foo', 'bar', 'baz.txt']
This handles a couple classes of paths differently than it otherwise would:
- All .pak files get mapped to top-level paks/
- Anything in the output directory gets mapped relative to the output
directory rather than the source directory.
e.g. given
'$CHROMIUM_SRC/out/Release/icu_fake_dir/icudtl.dat'
this would return
[None, 'icu_fake_dir', 'icudtl.dat']
Args:
host_path: The absolute path to the host file.
Returns:
A list of device path components.
"""
if host_path.startswith(output_directory):
if os.path.splitext(host_path)[1] == '.pak':
return [None, 'paks', os.path.basename(host_path)]
rel_host_path = os.path.relpath(host_path, output_directory)
else:
rel_host_path = os.path.relpath(host_path, constants.DIR_SOURCE_ROOT)
device_path_components = [None]
p = rel_host_path
while p:
p, d = os.path.split(p)
if d:
device_path_components.insert(1, d)
return device_path_components
def GetDataDependencies(runtime_deps_path):
"""Returns a list of device data dependencies.
Args:
runtime_deps_path: A str path to the .runtime_deps file.
Returns:
A list of (host_path, device_path) tuples.
"""
if not runtime_deps_path:
return []
with open(runtime_deps_path, 'r') as runtime_deps_file:
rel_host_files = [l.strip() for l in runtime_deps_file if l]
output_directory = constants.GetOutDirectory()
abs_host_files = [
os.path.abspath(os.path.join(output_directory, r))
for r in rel_host_files]
filtered_abs_host_files = [
host_file for host_file in abs_host_files
if not any(blacklist_re.match(host_file) for blacklist_re in _BLACKLIST)]
return [(f, DevicePathComponentsFor(f, output_directory))
for f in filtered_abs_host_files]
| [
"2100639007@qq.com"
] | 2100639007@qq.com |
a29bead203db36e725015751cd87e1ad705a7280 | af78246fc438f09f6e17d9a32bc698117edd8954 | /blog/migrations/0001_initial.py | b69dfb87cc66e24be3d9e18cbe0e558301ca11a6 | [] | no_license | deek28/my-first-blog | 968b627147444a2dc0ee2733689a2da65fc3ffce | 6a7cc99eaa7c18918182a5c338e08aff6f00f9e9 | refs/heads/master | 2021-05-24T16:37:52.362891 | 2020-04-13T15:29:07 | 2020-04-13T15:29:07 | 253,658,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Generated by Django 2.2.4 on 2020-04-07 01:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"dkshtbn@gmail.com"
] | dkshtbn@gmail.com |
c833d1c9767af5dcdba2857ebd60cf7735f680aa | 2447637c04a881d6ea9b569232da3460883fb237 | /settings.py | f9c840fac2aa638a95f1ead6b85d70f5d90cfe7b | [] | no_license | Hardysong/web-crawler-spider- | 315078ef7502cf12208106548f0468a7b111ec7d | 8cac837bc7889c2be3fec3962af970adbd5badfe | refs/heads/master | 2021-01-23T00:06:47.115545 | 2017-03-21T12:35:39 | 2017-03-21T12:35:39 | 85,699,871 | 16 | 8 | null | null | null | null | UTF-8 | Python | false | false | 3,330 | py | # -*- coding: utf-8 -*-
# Scrapy settings for tutorial project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'tutorial'
SPIDER_MODULES = ['tutorial.spiders']
NEWSPIDER_MODULE = 'tutorial.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
DOWNLOAD_DELAY = 5
RANDOMIZE_DOWNLOAD_DELAY = True
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.54 Safari/536.5'
COOKIES_ENABLED = True
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'tutorial.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'tutorial.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'tutorial.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"sunyibo110@sina.com"
] | sunyibo110@sina.com |
d28590752bf1d79eb7d65ed1f3d70627c6484cb6 | 3056277f38904026704d023a1708c10049eb598e | /Smtp.py | 61e5871f2ccaaa4cc1fbe0cb35b97d27069018ba | [] | no_license | Vampyy/EmailSpammer | fcc0f67ff8531bbdd97dc8aa3bc3299ccc105950 | 117ed4755bc95595b3ac33d83c27beb86581041d | refs/heads/master | 2021-01-11T11:58:25.919699 | 2017-01-22T08:09:42 | 2017-01-22T08:09:42 | 79,703,553 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | import smtplib
import getpass
# In the lines below please insert the domain name and port address of the SMTP server of your Email provider
domain_name = 'smtp.gmail.com'
port_address = 587
# In the lines below please insert your email login credentials
username = input ("Enter your mail address : ")
password = getpass.getpass ("Enter your password : ")
# In the lines below please insert the mail adderss of the receiver
receivers = input ('Enter the mail address(s) of the receiver(s) (space separated in case of multiple receivers) : ').split()
nummess = int (input ("How many messages do you wish to send? "))
subject = input ('Enter the subject if your mail : ')
message = input ('Enter the message : ')
smtp_conn = smtplib.SMTP (domain_name, port_address)
print (smtp_conn.ehlo())
print (smtp_conn.starttls())
print (smtp_conn.login (username, password))
for receiver in receivers :
for i in range (nummess) :
smtp_conn.sendmail (username, receiver, 'Subject : ' + subject + '\n' + message)
print ("Mail sent to " + receiver)
print (smtp_conn.quit())
print ('All mails sent and disconnected from SMTP server!')
| [
"noreply@github.com"
] | noreply@github.com |
eb95e12f1a17e86ad399d5b8dc910a900f87944b | 83dd196a98fe80fc8521513070c9189ba4fcec62 | /Analysis_Code/Had_Data_43.py | a79628e7d11889ad079b2e4e5673f3efc4089441 | [] | no_license | DarrenBurton/RA1_2012 | e7162e81251c090277d43d0cb7fa2227161b36fd | ab46ff5434b3c2bea2b5d5c0050d19372c74317c | refs/heads/master | 2021-01-16T18:39:59.269229 | 2012-07-27T18:57:06 | 2012-07-27T18:57:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,607 | py | #!/usr/bin/env python
import setupSUSY
from libFrameworkSUSY import *
#from libbryn import *
from libHadronic import *
from libOneLepton import *
from icf.core import PSet,Analysis
from time import strftime
from batchGolden_singlemu import *
from ra1objectid.vbtfElectronId_cff import *
from ra1objectid.vbtfMuonId_cff import *
from ra1objectid.ra3PhotonId_cff import *
from ra1objectid.ra3PhotonId2012_cff import *
vbtfMuonId_cff = Muon_IDFilter( vbtfmuonidps.ps() )
vbtfElectronIdFilter = Electron_IDFilter( vbtfelectronidWP95ps.ps() )
ra3PhotonIdFilter = Photon_IDFilter2012( ra3photonid2012ps.ps() )
CustomEleID = Electron_Egamma_Veto()
CustomMuID = OL_TightMuID(mu_2012_had.ps())
default_common.Jets.PtCut=50.*(325./375.)
# Change the settings from golden to use the lowest scaled bin.
cutTree,blah,blah2,l = MakeDataTree(100.*(325./375.), Muon = None)
def addCutFlowData(a) :
a.AddMuonFilter("PreCC",CustomMuID)
a.AddPhotonFilter("PreCC",ra3PhotonIdFilter)
a.AddElectronFilter("PreCC",CustomEleID)
a+=cutTree
# AK5 Calo
conf_ak5_caloData = deepcopy(defaultConfig)
conf_ak5_caloData.Ntuple = deepcopy(ak5_calo)
conf_ak5_caloData.XCleaning = deepcopy(default_cc)
conf_ak5_caloData.Common = deepcopy(default_common)
# conf_ak5_calo.Common.print_out()
anal_ak5_caloData=Analysis("AK5Calo")
addCutFlowData(anal_ak5_caloData)
#outDir = "../Split_Jsons_"+strftime("%d_%b")+"/Data43/"
#outDir = "../RA2B_"+strftime("%d_%b")+"/Data43/"
outDir = "../../results_"+strftime("%d_%b")+"//Data43"
ensure_dir(outDir)
anal_ak5_caloData.Run(outDir,conf_ak5_caloData,switches()["data_samples"][0])
| [
"d.burton10@imerial.ac.uk"
] | d.burton10@imerial.ac.uk |
3016c687ec5ae81b1cd9d16c05eb06f58500219f | 968968aa5e81043cad5af6883f23ef077c36b65f | /load_model.py | 87518857933f46b083d4611584a50ca9100d20e9 | [] | no_license | Guya-LTD/profanity-detector | 59dbcb2e3e2fe4eba29cd49f5f028c48413f035f | ba957c42c4d14dd3c68ef2c48fce317e9db17f8f | refs/heads/main | 2023-02-11T18:26:59.205036 | 2021-01-10T06:41:25 | 2021-01-10T06:41:25 | 307,553,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | import numpy as np
import joblib
def _get_profane_prob(prob):
return prob[1]
def predict(lang, texts):
vectorizer = joblib.load(lang + '/vectorizer.joblib')
model = joblib.load(lang + '/model.joblib')
return model.predict(vectorizer.transform(texts))
def predict_prob(lang, texts):
vectorizer = joblib.load(lang + '/vectorizer.joblib')
model = joblib.load(lang + '/model.joblib')
return np.apply_along_axis(_get_profane_prob, 1, model.predict_proba(vectorizer.transform(texts))) | [
"simonbelete@gmail.com"
] | simonbelete@gmail.com |
4075efc0ed3a769582d7d67cd20953c61bab212c | 950297160fae36138ada537f87c71e4ba17bb6af | /my_agent.py | 50fe583096b91294edafd1637688333f588eae0f | [] | no_license | Vishal0703/DeepRL_Navigation | c6d217771c7bcdc704bb1daf6c48e0c3e7a30fe2 | 7b2c56cd1cc0bb640a9a6593a35aa9ca534e4179 | refs/heads/master | 2020-04-12T15:50:52.127872 | 2019-01-16T09:32:58 | 2019-01-16T09:32:58 | 162,594,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,899 | py | import numpy as np
import random
from collections import namedtuple, deque
from model import QNetwork
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e4) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
self.criterion = torch.nn.MSELoss()
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma = GAMMA):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
## TODO: compute and minimize the loss
"*** YOUR CODE HERE ***"
# local_params = self.qnetwork_local.parameters()
# for i in len(experiences):
# aval = self.qnetwork_local(states[i])[actions[i]]
# target_actionval = target_model(next_states[i])
# self.optimizer.zero_grad()
# if(dones[i] == False):
# tval = rewards[i] + gamma*np.max(target_actionval)
# else:
# tval = rewards[i]
# loss = self.criterion(aval, tval)
# loss.backward()
# self.optimizer.step()
# # for f in net.parameters():
# # f.data.sub_(f.grad.data * learning_rate)
# loss.backwards()
self.optimizer.zero_grad()
avals = self.qnetwork_local(states).gather(1, actions)
target_aval = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
tvals = rewards + (gamma*target_aval*(1-dones))
loss = self.criterion(avals, tvals)
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) | [
"noreply@github.com"
] | noreply@github.com |
1ebd7b2c006bec2429d3ea7c144429ca6a16ab58 | 34599596e145555fde0d4264a1d222f951f49051 | /pcat2py/class/235864d6-5cc5-11e4-af55-00155d01fe08.py | 203705756d386be4768e626b13c813ce06acf1fd | [
"MIT"
] | permissive | phnomcobra/PCAT2PY | dc2fcbee142ce442e53da08476bfe4e68619346d | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | refs/heads/master | 2021-01-11T02:23:30.669168 | 2018-02-13T17:04:03 | 2018-02-13T17:04:03 | 70,970,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | #!/usr/bin/python
################################################################################
# 235864d6-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "235864d6-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Execute command and parse capture standard output
stdout = cli.system("ls -l /etc/group")
# Split output lines
self.output = stdout.split('\n')
# Process standard output
lineNumber = 0
for line in self.output:
lineNumber += 1
if len(line.strip()) > 0:
subStrings = line.split(' ')
if subStrings[3] == "root":
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.system("chgrp root /etc/group")
| [
"phnomcobra@gmail.com"
] | phnomcobra@gmail.com |
83d009922a8912f1f5ddf4fecf3fddade6bf61cc | d05bb67ed5fefcb979c7ffa343ec970a3b0f18d8 | /build/numpy_tutorial/catkin_generated/stamps/numpy_tutorial/numpy_talkers52s8.py.stamp | 88707d7b0d6f7f70ffa2089aa7310b027d94564b | [] | no_license | GillMarMad/catkin_ws | eafeedbc49f76ccbec0cf283ef2e113e5354b2de | 550b8a8d815704cf57e423be9580f047770cb8fe | refs/heads/master | 2023-02-10T17:58:27.450209 | 2021-01-09T23:22:12 | 2021-01-09T23:22:12 | 328,265,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | stamp | #!/usr/bin/env python
import rospy
from rospy.numpy_msg import numpy_msg
from rospy_tutorials.msg import Floats
import random
import numpy
def talker():
pub = rospy.Publisher('s52s8', numpy_msg(Floats),queue_size=10)
rospy.init_node('talker', anonymous=True)
r = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
a = numpy.array([1+random.random(), 1+random.random(), 1+random.random(), 1+random.random()], dtype=numpy.float32)
pub.publish(a)
r.sleep()
if __name__ == '__main__':
talker() | [
"gillmarmad@gmail.com"
] | gillmarmad@gmail.com |
d09e8f1f8f6ce69f17db42f0cc74904c1ba4e74e | e48375c39c0d1fc71742b1964dffdd3af0ff86c0 | /nlu/components/sentence_detectors/deep_sentence_detector/deep_sentence_detector.py | ab4ea95db84960ec483781f792af9daed7b121c3 | [
"Apache-2.0"
] | permissive | ahmedlone127/nlu | b8da5a84f0e47640cb09616559bf8b84c259f278 | 614bc2ff94c80a7ebc34a78720ef29a1bf7080e0 | refs/heads/master | 2023-02-09T05:10:29.631583 | 2022-05-20T15:16:33 | 2022-05-20T15:16:33 | 325,437,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | from sparknlp.annotator import *
class SentenceDetectorDeep:
@staticmethod
def get_default_model():
return SentenceDetectorDLModel\
.pretrained()\
.setInputCols(["document"]) \
.setOutputCol("sentence")
@staticmethod
def get_pretrained_model(name,lang, bucket=None):
return SentenceDetectorDLModel.pretrained(name,lang,bucket) \
.pretrained() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
#
#
# @staticmethod
# def get_trainable_model():
# return SentenceDetectorDLApproach \
# .setInputCol("document") \
# .setOutputCol("sentence")
| [
"christian.kasim.loan@gmail.com"
] | christian.kasim.loan@gmail.com |
70568dbd8fea74a804629bbf8c0ba8699ea10aaf | b0d7d91ccb7e388829abddb31b4aa04a2f9365cd | /archive-20200922/uncategorized/quick_palindrome_check.py | 4e1d9675666f0b9bddffa3ece524d351e0e26a37 | [] | no_license | clarkngo/python-projects | fe0e0aa02896debe82d1e9de84b1ae7d00932607 | 139a20063476f9847652b334a8495b7df1e80e27 | refs/heads/master | 2021-07-02T10:45:31.242041 | 2020-10-25T08:59:23 | 2020-10-25T08:59:23 | 188,570,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py |
# function which return reverse of a string
def reverse(s):
return s[::-1]
def isPalindrome(s):
# Calling reverse function
rev = reverse(s)
# Checking if both string are equal or not
if (s == rev):
return True
return False
# Driver code
s = "malayalam"
ans = isPalindrome(s)
if ans == 1:
print("Yes")
else:
print("No")
| [
"clarkngo@gmail.com"
] | clarkngo@gmail.com |
5a62de54810f05b493445f3c2654a56a4247d18c | 11d051965e2121d60ccedc66dcfc893e4ae505b8 | /lintcode/365 countOnes.py | ba67bdea5bc8fe0f6c09785ef2e64302b9f88df1 | [] | no_license | shuaili8/CodingPractice | f2d519f2a479b11ae82edf4b44f2046226bb77e9 | a04b194703a42d0c5d833d54cc8582e37c3fa151 | refs/heads/master | 2021-06-12T08:23:28.883687 | 2017-03-05T07:21:10 | 2017-03-05T07:21:10 | 43,593,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | import sys
class Solution:
# @param num: an integer
# @return: an integer, the number of ones in num
def countOnes(self, num):
# write your code here
r = 0
if num < 0:
r = 1
num = num & sys.maxsize
while num != 0:
r = r + (num & 1)
num = num >> 1
return r
| [
"lishuai.sherry@gmail.com"
] | lishuai.sherry@gmail.com |
e20932bf17f71d78ac469d9c003fc8b2ae163ad0 | 283b2a1f39fa1da3daa6bc58990682968b0fdd46 | /src/scene/brick.py | b84a9b44f4056d5f2d41a7137a9741b7cb28ba6c | [
"Apache-2.0"
] | permissive | vlad94568/bricks | c2de4a4c0a2657f14182c4a13371cc03c59898d5 | ffe6b761930fcb592c802cd7fde4b0657b05f951 | refs/heads/master | 2021-04-28T13:35:45.402367 | 2020-12-18T05:14:54 | 2020-12-18T05:14:54 | 122,108,325 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,673 | py | #
# _/_/_/ _/_/_/ _/_/_/ _/_/_/ _/ _/ _/_/_/
# _/ _/ _/ _/ _/ _/ _/ _/ _/
# _/_/_/ _/_/_/ _/ _/ _/_/ _/_/
# _/ _/ _/ _/ _/ _/ _/ _/ _/
# _/_/_/ _/ _/ _/_/_/ _/_/_/ _/ _/ _/_/_/
#
# By Vlad Ivanov, 2018.
# By Nikita Ivanov, 2018
#
# Email: vlad94568@gmail.com
from src.common import *
# Falling brick.
class Brick(SceneElement):
# kinds:
# 1 - normal brick (RED)
# 2 - ammo brick (WHITE)
# 3 - live break (GREEN)
def __init__(self, x, y, y_speed, kind):
SceneElement.__init__(self, x, y)
self.y_speed = y_speed
self.kind = kind
self.state = 1 # 1 - falling (normal), 2 - explosion
self.frame_cnt = 0
self.x_adj = 0
if self.kind == 1:
self.color = RED_COLOR
elif self.kind == 2:
self.color = WHITE_COLOR
else:
self.color = GREEN_COLOR
# Draws the brick.
def draw(self, screen):
# Common rectangle.
pygame.draw.rect(screen, self.color, (self.x, self.y, 15, 10), 3)
# Additional features.
if self.kind == 2: # White.
pygame.draw.line(screen, self.color, (self.x + 5, self.y + 3), (self.x + 5, self.y + 6), 1)
pygame.draw.line(screen, self.color, (self.x + 9, self.y + 3), (self.x + 9, self.y + 6), 1)
elif self.kind == 3: # Green.
pygame.draw.line(screen, self.color, (self.x + 7, self.y + 3), (self.x + 7, self.y + 6), 1)
pygame.draw.line(screen, self.color, (self.x + 5, self.y + 5), (self.x + 9, self.y + 5), 1)
| [
"nivanov@gridgain.com"
] | nivanov@gridgain.com |
d82a7c81e00fa27c5ad59a4fc4811c1928d2518e | 63daf225819636397fda6ef7e52783331c27f295 | /taobao-sdk/top/api/rest/TmallProductSpecsGetRequest.py | b7150211c3b1b6c63e9ce9e9c0ee66bd56c5f336 | [] | no_license | cash2one/language-Python | e332ecfb4e9321a11407b29987ee64d44e552b15 | 8adb4f2fd2f023f9cc89b4edce1da5f71a3332ab | refs/heads/master | 2021-06-16T15:15:08.346420 | 2017-04-20T02:44:16 | 2017-04-20T02:44:16 | 112,173,361 | 1 | 0 | null | 2017-11-27T09:08:57 | 2017-11-27T09:08:57 | null | UTF-8 | Python | false | false | 356 | py | '''
Created by auto_sdk on 2014.02.28
'''
from top.api.base import RestApi
class TmallProductSpecsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.cat_id = None
self.product_id = None
self.properties = None
def getapiname(self):
return 'tmall.product.specs.get'
| [
"a@ie9.org"
] | a@ie9.org |
53837fb5e2aad28f95b52a92f23678b03b832595 | a9bbd4dbcc74eaeddf6ba29c04454c89c7e4a98c | /main.py | 56f50c21480d4d47b46d00809049677014862eb4 | [] | no_license | texcaltech/windmilltownhomes | c5b531c07b95fc751afe3a68a11509dedefdc077 | 8747c75a23304e28f64cc207be9d06b81679c355 | refs/heads/master | 2021-01-19T21:28:47.899739 | 2013-03-20T02:52:51 | 2013-03-20T02:52:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,193 | py | import webapp2
import jinja2
import os
jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
def getBackground(page):
bg = {}
if (page == 'contact'):
bg['large'] = 'http://farm9.staticflickr.com/8382/8552707810_b5110806a8_h.jpg'
bg['small'] = 'http://farm9.staticflickr.com/8382/8552707810_e3a58d737f_c.jpg'
elif (page == 'floorPlans'):
bg['large'] = 'http://farm9.staticflickr.com/8382/8552707810_b5110806a8_h.jpg'
bg['small'] = 'http://farm9.staticflickr.com/8382/8552707810_e3a58d737f_c.jpg'
elif (page == 'home'):
bg['large'] = 'http://farm9.staticflickr.com/8382/8552707810_b5110806a8_h.jpg'
bg['small'] = 'http://farm9.staticflickr.com/8382/8552707810_e3a58d737f_c.jpg'
elif (page == 'location'):
bg['large'] = 'http://farm9.staticflickr.com/8382/8552707810_b5110806a8_h.jpg'
bg['small'] = 'http://farm9.staticflickr.com/8382/8552707810_e3a58d737f_c.jpg'
elif (page == 'preLease'):
bg['large'] = 'http://farm9.staticflickr.com/8382/8552707810_b5110806a8_h.jpg'
bg['small'] = 'http://farm9.staticflickr.com/8382/8552707810_e3a58d737f_c.jpg'
else:
bg['large'] = 'http://farm9.staticflickr.com/8382/8552707810_b5110806a8_h.jpg'
bg['small'] = 'http://farm9.staticflickr.com/8382/8552707810_e3a58d737f.jpg'
return bg
class Contact(webapp2.RequestHandler):
def get(self):
vars = {}
vars['bg'] = getBackground('contact')
template = jinja.get_template('templates/contact.html')
self.response.out.write(template.render(vars))
class FloorPlans(webapp2.RequestHandler):
def get(self):
vars = {}
vars['bg'] = getBackground('floorPlans')
template = jinja.get_template('templates/floorPlans.html')
self.response.out.write(template.render(vars))
class Home(webapp2.RequestHandler):
def get(self):
vars = {}
vars['bg'] = getBackground('home')
template = jinja.get_template('templates/home.html')
self.response.out.write(template.render(vars))
class Location(webapp2.RequestHandler):
def get(self):
vars = {}
vars['bg'] = getBackground('location')
template = jinja.get_template('templates/location.html')
self.response.out.write(template.render(vars))
class PayRent(webapp2.RequestHandler):
def get(self):
vars = {}
vars['bg'] = getBackground('payRent')
template = jinja.get_template('templates/payRent.html')
self.response.out.write(template.render(vars))
class PreLease(webapp2.RequestHandler):
def get(self):
vars = {}
vars['bg'] = getBackground('preLease')
template = jinja.get_template('templates/preLease.html')
self.response.out.write(template.render(vars))
app = webapp2.WSGIApplication([
('/contact', Contact),
('/floor-plans', FloorPlans),
('/location', Location),
('/maintenance', Contact),
('/pay-rent', PayRent),
('/pre-lease', PreLease),
('/', Home),
], debug=True) | [
"texcal.tech@gmail.com"
] | texcal.tech@gmail.com |
ce502221c2081beadd2ed01aa5ddd02cf7cf7901 | 89a90707983bdd1ae253f7c59cd4b7543c9eda7e | /data_structures_and_algorithms_in_python/ch04/power_fast.py | c7f98d650facb9e5b5bb39c4db5cd09f1ee64c4c | [] | no_license | timothyshull/python_reference_code | 692a7c29608cadfd46a6cc409a000023e95b9458 | f3e2205dd070fd3210316f5f470d371950945028 | refs/heads/master | 2021-01-22T20:44:07.018811 | 2017-03-17T19:17:22 | 2017-03-17T19:17:22 | 85,346,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | def power(x, n):
if n == 0:
return 1
else:
partial = power(x, n // 2) # rely on truncated division
result = partial * partial
if n % 2 == 1: # if n odd, include extra factor of x
result *= x
return result
| [
"timothyshull@gmail.com"
] | timothyshull@gmail.com |
89f4e42b94db78a216da5d14a10ae08a72d8c834 | 13e0ae2c2bb5442a099a5e5acd90a81082de6cea | /entities/entity.py | 2473ea0df75aa19494afccd9c27474102b3cb6f9 | [] | no_license | eseraygun/python-entities | 03345364075c47c7370b898f178420766ab2bbb7 | 9763ef61dacda70cb7d9731f74bfff2e35efe08b | refs/heads/master | 2016-09-08T00:42:13.032623 | 2014-03-24T13:33:20 | 2014-03-24T13:33:20 | 9,154,428 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | from basic import PRIMARY, MultipleErrors, ValidationError
from field import Field
from schema import Schema
class Entity(object):
__metaclass__ = Schema
# noinspection PyArgumentList
def __new__(cls, *args, **kwargs):
self = super(Entity, cls).__new__(cls, *args, **kwargs)
self._values = dict()
return self
def __init__(self, *args, **kwargs):
# Check positional arguments.
if len(args) > len(self._fields):
raise TypeError(
'__init__() takes at most %d arguments (%d given)'
% (len(self._fields), len(args))
)
# Check keyword arguments.
for name in kwargs:
if name not in self._fields:
raise TypeError(
'%r is an invalid keyword argument for this function'
% name
)
# Initialize values.
for index, (name, field) in enumerate(self._fields.iteritems()):
if index < len(args):
self._values[name] = args[index]
elif name in kwargs:
self._values[name] = kwargs[name]
def _get_value(self, name):
if name in self._values:
return self._values[name]
else:
value = self._fields[name].make_default()
self._values[name] = value
return value
def validate(self):
errors = []
for name, field in self._fields.iteritems():
try:
field.validate(self._get_value(name))
except ValidationError, ex:
errors.append(ex)
if len(errors) == 1:
raise errors[0]
elif len(errors) > 1:
raise MultipleErrors(None, self, errors)
def keyify(self, group=PRIMARY, child_group=None):
if child_group is None:
child_group = group
return tuple(field.keyify(self._get_value(field.name), child_group)
for field in self._groups[group])
def __repr__(self):
fields = self._groups.get(PRIMARY)
if fields is None:
fields = self._fields.itervalues()
args = ', '.join('%s=%r' % (field.name, self._get_value(field.name))
for field in fields)
return '%s(%s)' % (self.__class__.__name__, args)
class EntityField(Field):
base_class = Entity
def __init__(self, entity_class, default=None, null=True, group=None):
super(EntityField, self).__init__(default, null, group)
self.base_class = entity_class
def validate(self, value):
super(EntityField, self).validate(value)
if value is not None:
value.validate()
def keyify(self, value, group=PRIMARY):
if value is None:
return None
else:
return value.keyify(group)
class ReferenceField(Field):
base_class = Entity
def __init__(self, entity_class, reference_group=PRIMARY,
default=None, null=True, group=None):
super(ReferenceField, self).__init__(default, null, group)
self.base_class = entity_class
self.reference_group = reference_group
def make_empty(self):
return None
def keyify(self, value, group=PRIMARY):
if value is None:
return None
else:
return value.keyify(self.reference_group, group)
| [
"eser.aygun@gmail.com"
] | eser.aygun@gmail.com |
fe8c1b110597c61d4d0d0cf4df44e625ada09bf3 | d7c0193f7f81138466ea182aa624bb4305aa65e2 | /Calculator.py | 8f3f0f255d10648bb662341465089242a1899537 | [] | no_license | Avilash2001/Calculator-Python | 9103c4631b8f27f7f989b55a78d6ce61f1603915 | 3dd4fa61f4faa90a500bcfa7a45b93f90cd4a0c8 | refs/heads/master | 2022-11-21T20:14:40.870119 | 2020-07-25T15:52:00 | 2020-07-25T15:52:00 | 282,469,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,219 | py | from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5 import QtCore, QtGui, QtWidgets
from math import sqrt
def is_integer(n):
try:
float(n)
except ValueError:
return False
else:
return float(n).is_integer()
class Ui_Calculator(object):
def setupUi(self, Calculator):
Calculator.setObjectName("Calculator")
Calculator.resize(800, 600)
Calculator.setMinimumSize(QtCore.QSize(800, 600))
Calculator.setMaximumSize(QtCore.QSize(800, 600))
Calculator.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
Calculator.setStyleSheet("")
Calculator.setTabShape(QtWidgets.QTabWidget.Rounded)
Calculator.setWindowIcon(QIcon("C:/Users/avila/Desktop/Python/Calculator/calc.png"))
self.centralwidget = QtWidgets.QWidget(Calculator)
self.centralwidget.setObjectName("centralwidget")
self.btEqual = QtWidgets.QPushButton(self.centralwidget)
self.btEqual.setGeometry(QtCore.QRect(0, 500, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btEqual.setFont(font)
self.btEqual.setStyleSheet("background-color: rgb(255, 170, 0);\n""color: rgb(255, 255, 255);")
self.btEqual.setObjectName("btEqual")
self.btEqual.clicked.connect(self.equal)
self.btZero = QtWidgets.QPushButton(self.centralwidget)
self.btZero.setGeometry(QtCore.QRect(200, 500, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btZero.setFont(font)
self.btZero.setStyleSheet("color: rgb(255, 255, 255);background-color: rgb(50, 50, 50);")
self.btZero.setObjectName("btZero")
self.btZero.clicked.connect(self.zero)
self.btDec = QtWidgets.QPushButton(self.centralwidget)
self.btDec.setGeometry(QtCore.QRect(400, 500, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btDec.setFont(font)
self.btDec.setStyleSheet("color: rgb(255, 255, 255);background-color: rgb(50, 50, 50);")
self.btDec.setObjectName("btDec")
self.btDec.clicked.connect(self.dec)
self.btSub = QtWidgets.QPushButton(self.centralwidget)
self.btSub.setGeometry(QtCore.QRect(600, 500, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btSub.setFont(font)
self.btSub.setStyleSheet("background-color: rgb(95, 95, 95);color: rgb(255, 255, 255);")
self.btSub.setObjectName("btSub")
self.btSub.clicked.connect(self.sub)
self.btThree = QtWidgets.QPushButton(self.centralwidget)
self.btThree.setGeometry(QtCore.QRect(400, 400, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btThree.setFont(font)
self.btThree.setStyleSheet("color: rgb(255, 255, 255);background-color: rgb(50, 50, 50);\n""")
self.btThree.setObjectName("btThree")
self.btThree.clicked.connect(self.three)
self.btAdd = QtWidgets.QPushButton(self.centralwidget)
self.btAdd.setGeometry(QtCore.QRect(600, 400, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btAdd.setFont(font)
self.btAdd.setStyleSheet("background-color: rgb(95, 95, 95);color: rgb(255, 255, 255);")
self.btAdd.setObjectName("btAdd")
self.btAdd.clicked.connect(self.add)
self.btOne = QtWidgets.QPushButton(self.centralwidget)
self.btOne.setGeometry(QtCore.QRect(0, 400, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btOne.setFont(font)
self.btOne.setStyleSheet("color: rgb(255, 255, 255);background-color: rgb(50, 50, 50);")
self.btOne.setObjectName("btOne")
self.btOne.clicked.connect(self.one)
self.btTwo = QtWidgets.QPushButton(self.centralwidget)
self.btTwo.setGeometry(QtCore.QRect(200, 400, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btTwo.setFont(font)
self.btTwo.setStyleSheet("color: rgb(255, 255, 255);background-color: rgb(50, 50, 50);\n""")
self.btTwo.setObjectName("btTwo")
self.btTwo.clicked.connect(self.two)
self.btSix = QtWidgets.QPushButton(self.centralwidget)
self.btSix.setGeometry(QtCore.QRect(400, 300, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btSix.setFont(font)
self.btSix.setStyleSheet("color: rgb(255, 255, 255);background-color: rgb(50, 50, 50);")
self.btSix.setObjectName("btSix")
self.btSix.clicked.connect(self.six)
self.btMul = QtWidgets.QPushButton(self.centralwidget)
self.btMul.setGeometry(QtCore.QRect(600, 300, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btMul.setFont(font)
self.btMul.setStyleSheet("background-color: rgb(95, 95, 95);color: rgb(255, 255, 255);\n""")
self.btMul.setObjectName("btMul")
self.btMul.clicked.connect(self.mul)
self.btFour = QtWidgets.QPushButton(self.centralwidget)
self.btFour.setGeometry(QtCore.QRect(0, 300, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btFour.setFont(font)
self.btFour.setStyleSheet("color: rgb(255, 255, 255);background-color: rgb(50, 50, 50);")
self.btFour.setObjectName("btFour")
self.btFour.clicked.connect(self.four)
self.btFive = QtWidgets.QPushButton(self.centralwidget)
self.btFive.setGeometry(QtCore.QRect(200, 300, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btFive.setFont(font)
self.btFive.setStyleSheet("color: rgb(255, 255, 255);background-color: rgb(50, 50, 50);")
self.btFive.setObjectName("btFive")
self.btFive.clicked.connect(self.five)
self.btNine = QtWidgets.QPushButton(self.centralwidget)
self.btNine.setGeometry(QtCore.QRect(400, 200, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btNine.setFont(font)
self.btNine.setStyleSheet("color: rgb(255, 255, 255);background-color: rgb(50, 50, 50);")
self.btNine.setObjectName("btNine")
self.btNine.clicked.connect(self.nine)
self.btDiv = QtWidgets.QPushButton(self.centralwidget)
self.btDiv.setGeometry(QtCore.QRect(600, 200, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btDiv.setFont(font)
self.btDiv.setStyleSheet("background-color: rgb(95, 95, 95);color: rgb(255, 255, 255);")
self.btDiv.setObjectName("btDiv")
self.btDiv.clicked.connect(self.div)
self.btSeven = QtWidgets.QPushButton(self.centralwidget)
self.btSeven.setGeometry(QtCore.QRect(0, 200, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btSeven.setFont(font)
self.btSeven.setStyleSheet("color: rgb(255, 255, 255);background-color: rgb(50, 50, 50);")
self.btSeven.setObjectName("btSeven")
self.btSeven.clicked.connect(self.seven)
self.btEight = QtWidgets.QPushButton(self.centralwidget)
self.btEight.setGeometry(QtCore.QRect(200, 200, 200, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btEight.setFont(font)
self.btEight.setStyleSheet("color: rgb(255, 255, 255);\n""background-color: rgb(50, 50, 50);")
self.btEight.setObjectName("btEight")
self.btEight.clicked.connect(self.eight)
self.labRes = QtWidgets.QLabel(self.centralwidget)
self.labRes.setGeometry(QtCore.QRect(0, 50, 800, 100))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.labRes.setFont(font)
self.labRes.setStyleSheet("background-color: rgb(255, 255, 255);")
self.labRes.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.labRes.setObjectName("labRes")
self.labHis = QtWidgets.QLabel(self.centralwidget)
self.labHis.setGeometry(QtCore.QRect(0, 0, 800, 50))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(12)
self.labHis.setFont(font)
self.labHis.setStyleSheet("background-color: rgb(255, 255, 255);")
self.labHis.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing)
self.labHis.setObjectName("labHis")
self.btNeg = QtWidgets.QPushButton(self.centralwidget)
self.btNeg.setGeometry(QtCore.QRect(0, 150, 160, 50))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btNeg.setFont(font)
self.btNeg.setStyleSheet("background-color: rgb(95, 95, 95);color: rgb(255, 255, 255);")
self.btNeg.setObjectName("btNeg")
self.btNeg.clicked.connect(self.neg)
self.btSq = QtWidgets.QPushButton(self.centralwidget)
self.btSq.setGeometry(QtCore.QRect(160, 150, 160, 50))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btSq.setFont(font)
self.btSq.setStyleSheet("background-color: rgb(95, 95, 95);color: rgb(255, 255, 255);")
self.btSq.setObjectName("btSq")
self.btSq.clicked.connect(self.sq)
self.btRoot = QtWidgets.QPushButton(self.centralwidget)
self.btRoot.setGeometry(QtCore.QRect(320, 150, 160, 50))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btRoot.setFont(font)
self.btRoot.setStyleSheet("background-color: rgb(95, 95, 95);color: rgb(255, 255, 255);")
self.btRoot.setObjectName("btRoot")
self.btRoot.clicked.connect(self.root)
self.btCls = QtWidgets.QPushButton(self.centralwidget)
self.btCls.setGeometry(QtCore.QRect(640, 150, 160, 50))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btCls.setFont(font)
self.btCls.setStyleSheet("background-color: rgb(95, 95, 95);color: rgb(255, 255, 255);")
self.btCls.setObjectName("btCls")
self.btCls.clicked.connect(self.cls)
self.btBksp = QtWidgets.QPushButton(self.centralwidget)
self.btBksp.setGeometry(QtCore.QRect(480, 150, 160, 50))
font = QtGui.QFont()
font.setFamily("Verdana")
font.setPointSize(16)
self.btBksp.setFont(font)
self.btBksp.setStyleSheet("background-color: rgb(95, 95, 95);color: rgb(255, 255, 255);")
self.btBksp.setObjectName("btBksp")
self.btBksp.clicked.connect(self.bksp)
Calculator.setCentralWidget(self.centralwidget)
self.retranslateUi(Calculator)
QtCore.QMetaObject.connectSlotsByName(Calculator)
def dec(self):
prev = self.labRes.text()
res=prev+"."
self.labRes.setText(res)
def one(self):
prev = self.labRes.text()
if prev == "0" or prev == "Cannot Divide By Zero" or prev == "Invalid Input":
res="1"
self.labRes.setText(res)
else:
res=prev+"1"
self.labRes.setText(res)
def two(self):
prev = self.labRes.text()
if prev == "0" or prev == "Cannot Divide By Zero" or prev == "Invalid Input":
res="2"
self.labRes.setText(res)
else:
res=prev+"2"
self.labRes.setText(res)
def three(self):
prev = self.labRes.text()
if prev == "0" or prev == "Cannot Divide By Zero" or prev == "Invalid Input":
res="3"
self.labRes.setText(res)
else:
res=prev+"3"
self.labRes.setText(res)
def four(self):
prev = self.labRes.text()
if prev == "0" or prev == "Cannot Divide By Zero" or prev == "Invalid Input":
res="4"
self.labRes.setText(res)
else:
res=prev+"4"
self.labRes.setText(res)
def five(self):
prev = self.labRes.text()
if prev == "0" or prev == "Cannot Divide By Zero" or prev == "Invalid Input":
res="5"
self.labRes.setText(res)
else:
res=prev+"5"
self.labRes.setText(res)
def six(self):
prev = self.labRes.text()
if prev == "0" or prev == "Cannot Divide By Zero" or prev == "Invalid Input":
res="6"
self.labRes.setText(res)
else:
res=prev+"6"
self.labRes.setText(res)
def seven(self):
prev = self.labRes.text()
if prev == "0" or prev == "Cannot Divide By Zero" or prev == "Invalid Input":
res="7"
self.labRes.setText(res)
else:
res=prev+"7"
self.labRes.setText(res)
def eight(self):
prev = self.labRes.text()
if prev == "0" or prev == "Cannot Divide By Zero" or prev == "Invalid Input":
res="8"
self.labRes.setText(res)
else:
res=prev+"8"
self.labRes.setText(res)
def nine(self):
prev = self.labRes.text()
if prev == "0" or prev == "Cannot Divide By Zero" or prev == "Invalid Input":
res="9"
self.labRes.setText(res)
else:
res=prev+"9"
self.labRes.setText(res)
def zero(self):
prev = self.labRes.text()
if prev == "0" or prev == "Cannot Divide By Zero" or prev == "Invalid Input":
res="0"
self.labRes.setText(res)
else:
res=prev+"0"
self.labRes.setText(res)
def add(self):
prev = self.labRes.text()
prev1= self.labHis.text()
if prev != "0":
calc=False
check=list(self.labHis.text())
flag=True
for j in check:
if j == "=":
flag=False
break
for i in check:
if i == "+" or i == "-" or i == "*" or i == "/":
calc=True
break
if calc and flag:
res=0
num1=""
for x in check:
if x== " ":
break
else:
num1=num1+x
if is_integer(prev):
num2=int(prev)
else:
num2=float(prev)
if is_integer(num1):
num1=int(num1)
else:
num1=float(num1)
if check[-2] == "+":
res= num1+num2
elif check[-2] == "-":
res= num1-num2
elif check[-2] == "*":
res= num1*num2
elif check[-2] == "/":
if num2 == 0:
self.labRes.setText("Cannot Divide By Zero")
self.labHis.setText("0")
return
else:
res= num1/num2
if is_integer(res):
res= int(res)
else:
res=float(num1)/float(num2)
res=str(res)
res= res+" "+"+"+" "
self.labRes.setText("0")
self.labHis.setText(res)
elif not flag:
res = prev+" "+"+"+" "
self.labHis.setText(res)
self.labRes.setText("0")
else:
res=prev+" "+"+"+" "
self.labRes.setText("0")
if prev1 !="0":
self.labHis.setText(prev1+res)
else:
self.labHis.setText(res)
else:
if prev1 =="0":
res="0 + "
self.labHis.setText(res)
else:
check=list(self.labHis.text())
check.pop(-2)
check.insert(-1,"+")
new=""
for i in check:
new=new+i
self.labHis.setText(new)
def sub(self):
prev = self.labRes.text()
prev1= self.labHis.text()
if prev != "0":
calc=False
check=list(self.labHis.text())
flag=True
for j in check:
if j == "=":
flag=False
break
for i in check:
if i == "+" or i == "-" or i == "*" or i == "/":
calc=True
break
if calc and flag:
res=0
num1=""
for x in check:
if x== " ":
break
else:
num1=num1+x
if is_integer(prev):
num2=int(prev)
else:
num2=float(prev)
if is_integer(num1):
num1=int(num1)
else:
num1=float(num1)
if check[-2] == "+":
res= num1+num2
elif check[-2] == "-":
res= num1-num2
elif check[-2] == "*":
res= num1*num2
elif check[-2] == "/":
if num2 == 0:
self.labRes.setText("Cannot Divide By Zero")
self.labHis.setText("0")
return
else:
res= num1/num2
if is_integer(res):
res= int(res)
else:
res=float(num1)/float(num2)
res=str(res)
res= res+" "+"-"+" "
self.labRes.setText("0")
self.labHis.setText(res)
elif not flag:
res = prev+" "+"-"+" "
self.labHis.setText(res)
self.labRes.setText("0")
else:
res=prev+" "+"-"+" "
self.labRes.setText("0")
if prev1 !="0":
self.labHis.setText(prev1+res)
else:
self.labHis.setText(res)
else:
if prev1 =="0":
res="0 - "
self.labHis.setText(res)
else:
check=list(self.labHis.text())
check.pop(-2)
check.insert(-1,"-")
new=""
for i in check:
new=new+i
self.labHis.setText(new)
def mul(self):
prev = self.labRes.text()
prev1= self.labHis.text()
if prev != "0":
calc=False
check=list(self.labHis.text())
flag=True
for j in check:
if j == "=":
flag=False
break
for i in check:
if i == "+" or i == "-" or i == "*" or i == "/":
calc=True
break
if calc and flag:
res=0
num1=""
for x in check:
if x== " ":
break
else:
num1=num1+x
if is_integer(prev):
num2=int(prev)
else:
num2=float(prev)
if is_integer(num1):
num1=int(num1)
else:
num1=float(num1)
if check[-2] == "+":
res= num1+num2
elif check[-2] == "-":
res= num1-num2
elif check[-2] == "*":
res= num1*num2
elif check[-2] == "/":
if num2 == 0:
self.labRes.setText("Cannot Divide By Zero")
self.labHis.setText("0")
return
else:
res= num1/num2
if is_integer(res):
res= int(res)
else:
res=float(num1)/float(num2)
res=str(res)
res= res+" "+"*"+" "
self.labRes.setText("0")
self.labHis.setText(res)
elif not flag:
res = prev+" "+"*"+" "
self.labHis.setText(res)
self.labRes.setText("0")
else:
res=prev+" "+"*"+" "
self.labRes.setText("0")
if prev1 !="0":
self.labHis.setText(prev1+res)
else:
self.labHis.setText(res)
else:
if prev1 =="0":
res="0 * "
self.labHis.setText(res)
else:
check=list(self.labHis.text())
check.pop(-2)
check.insert(-1,"*")
new=""
for i in check:
new=new+i
self.labHis.setText(new)
def div(self):
prev = self.labRes.text()
prev1= self.labHis.text()
if prev != "0":
calc=False
check=list(self.labHis.text())
flag=True
for j in check:
if j == "=":
flag=False
break
for i in check:
if i == "+" or i == "-" or i == "*" or i == "/":
calc=True
break
if calc and flag:
res=0
num1=""
for x in check:
if x== " ":
break
else:
num1=num1+x
if is_integer(prev):
num2=int(prev)
else:
num2=float(prev)
if is_integer(num1):
num1=int(num1)
else:
num1=float(num1)
if check[-2] == "+":
res= num1+num2
elif check[-2] == "-":
res= num1-num2
elif check[-2] == "*":
res= num1*num2
elif check[-2] == "/":
if num2 == 0:
self.labRes.setText("Cannot Divide By Zero")
self.labHis.setText("0")
return
else:
res= num1/num2
if is_integer(res):
res= int(res)
else:
res=float(num1)/float(num2)
res=str(res)
res= res+" "+"/"+" "
self.labRes.setText("0")
self.labHis.setText(res)
elif not flag:
res = prev+" "+"/"+" "
self.labHis.setText(res)
self.labRes.setText("0")
else:
res=prev+" "+"/"+" "
self.labRes.setText("0")
if prev1 !="0":
self.labHis.setText(prev1+res)
else:
self.labHis.setText(res)
else:
if prev1 =="0":
res="0 / "
self.labHis.setText(res)
else:
check=list(self.labHis.text())
check.pop(-2)
check.insert(-1,"/")
new=""
for i in check:
new=new+i
self.labHis.setText(new)
def cls(self):
self.labRes.setText("0")
self.labHis.setText("0")
def bksp(self):
prev=list(self.labRes.text())
if self.labRes.text() !="0":
prev.pop()
new=""
for i in prev:
new=new+i
self.labRes.setText(new)
else:
pass
def neg(self):
prev=list(self.labRes.text())
if self.labRes.text() !="0":
if prev[0] == "-":
prev.pop(0)
else:
prev.insert(0,"-")
new=""
for i in prev:
new=new+i
self.labRes.setText(new)
def sq(self):
prev=self.labRes.text()
if is_integer(prev):
prev=int(prev)
else:
prv=float(prev)
prev=prev*prev
prev=str(prev)
self.labRes.setText(prev)
def root(self):
prev=self.labRes.text()
if is_integer(prev):
prev=int(prev)
else:
prev=float(prev)
if prev >= 0:
prev=sqrt(prev)
if is_integer(prev):
prev=int(prev)
else:
prev=float(prev)
prev=str(prev)
self.labRes.setText(prev)
else:
self.labHis.setText("0")
self.labRes.setText("Invalid Input")
def equal(self):
prev = self.labRes.text()
prev1= self.labHis.text()
if prev != "0" and prev1 != "0":
check=list(self.labHis.text())
res=0
num1=""
for x in check:
if x== " ":
break
else:
num1=num1+x
if is_integer(prev):
num2=int(prev)
else:
num2=float(prev)
if is_integer(num1):
num1=int(num1)
else:
num1=float(num1)
if check[-2] == "+":
res= num1+num2
elif check[-2] == "-":
res= num1-num2
elif check[-2] == "*":
res= num1*num2
elif check[-2] == "/":
if num2 == 0:
self.labRes.setText("Cannot Divide By Zero")
self.labHis.setText("0")
else:
res= num1/num2
if is_integer(res):
res= int(res)
else:
res=float(num1)/float(num2)
res=str(res)
self.labRes.setText(res)
res= prev1+prev+" = "
self.labHis.setText(res)
def retranslateUi(self, Calculator):
_translate = QtCore.QCoreApplication.translate
Calculator.setWindowTitle(_translate("Calculator", "Avilash Calculator"))
self.btEqual.setText(_translate("Calculator", "="))
self.btZero.setText(_translate("Calculator", "0"))
self.btDec.setText(_translate("Calculator", "."))
self.btSub.setText(_translate("Calculator", "-"))
self.btThree.setText(_translate("Calculator", "3"))
self.btAdd.setText(_translate("Calculator", "+"))
self.btOne.setText(_translate("Calculator", "1"))
self.btTwo.setText(_translate("Calculator", "2"))
self.btSix.setText(_translate("Calculator", "6"))
self.btMul.setText(_translate("Calculator", "*"))
self.btFour.setText(_translate("Calculator", "4"))
self.btFive.setText(_translate("Calculator", "5"))
self.btNine.setText(_translate("Calculator", "9"))
self.btDiv.setText(_translate("Calculator", "/"))
self.btSeven.setText(_translate("Calculator", "7"))
self.btEight.setText(_translate("Calculator", "8"))
self.labRes.setText(_translate("Calculator", "0"))
self.labHis.setText(_translate("Calculator", "0"))
self.btNeg.setText(_translate("Calculator", "+/-"))
self.btSq.setText(_translate("Calculator", "Square"))
self.btRoot.setText(_translate("Calculator", "Sq root"))
self.btCls.setText(_translate("Calculator", "Clear"))
self.btBksp.setText(_translate("Calculator", "Bksp"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Calculator = QtWidgets.QMainWindow()
ui = Ui_Calculator()
ui.setupUi(Calculator)
Calculator.show()
sys.exit(app.exec_())
| [
"noreply@github.com"
] | noreply@github.com |
87e458170ac80e1d168ac33dc51962ad66b4f833 | 9134990653ca039918ad14a2c345f0ee4c421fe1 | /setup.py | 7ecc05f9d920cdb9bc37e3340e20015b75fed2b6 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | soderluk/bottle-oauthlib | 31e2bfba817f04931de5f0db4cd0fbdd2bce2e99 | 92bd9ed6a7b6c23b073625d45c284e293b70b28c | refs/heads/master | 2020-04-08T20:01:31.077961 | 2018-11-06T08:31:14 | 2018-11-06T08:31:14 | 159,681,033 | 0 | 0 | BSD-3-Clause | 2018-11-29T14:43:56 | 2018-11-29T14:43:56 | null | UTF-8 | Python | false | false | 1,028 | py | from setuptools import setup
import inspect
from os import path
import os
with open(path.join(path.dirname(path.abspath(inspect.getfile(inspect.currentframe()))), "requirements.in")) as fd:
dependencies = fd.read().split('\n')
try:
version_tag = os.environ["TRAVIS_TAG"]
except KeyError:
version_tag = "1.0.0"
setup(
name='bottle-oauthlib',
version=version_tag,
description='Bottle adapter for OAuthLib framework (OAuth2.0)',
url='https://github.com/thomsonreuters/bottle-oauthlib',
license='BSD-3-Clause',
author="Thomson Reuters",
author_email="EikonEdge.Infra-Dev@thomsonreuters.com",
packages=['bottle_oauthlib'],
install_requires=dependencies,
test_suite='tests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Programming Language :: Python :: 3.6',
]
)
| [
"jonathan.huot@thomsonreuters.com"
] | jonathan.huot@thomsonreuters.com |
cb20e9e52b32e9c6326a763015d867ba85acb885 | 7b5f7dc5b6a0fc063aeabc9f2408dc867586c129 | /env/lib/python2.7/site-packages/sure/old.py | 59cef893c4075eff83a691a530ebb8822e678328 | [] | no_license | kingbifoe/django-calendar-reminder | 687dfa419895cfc67f5fad542179d9d4a716e75d | 3325717f53fd9825e036f21f391510f6a754aa93 | refs/heads/master | 2023-01-02T05:10:47.418059 | 2016-04-10T19:04:44 | 2016-04-10T19:04:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,705 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <sure - utility belt for automated testing in python>
# Copyright (C) <2010-2013> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import traceback
import inspect
from copy import deepcopy
from pprint import pformat
from functools import wraps
try:
from collections import Iterable
except ImportError:
Iterable = (list, dict, tuple, set)
try:
import __builtin__ as builtins
except ImportError:
import builtins
from six import string_types, text_type
from sure.core import DeepComparison
from sure.core import _get_file_name
from sure.core import _get_line_number
from sure.core import itemize_length
def is_iterable(obj):
return hasattr(obj, '__iter__') and not isinstance(obj, string_types)
def all_integers(obj):
if not is_iterable(obj):
return
for element in obj:
if not isinstance(element, int):
return
return True
def explanation(msg):
def dec(func):
@wraps(func)
def wrap(self, what):
ret = func(self, what)
assert ret, msg % (self._src, what)
return True
return wrap
return dec
class AssertionHelper(object):
def __init__(self, src,
within_range=None,
with_args=None,
with_kwargs=None,
and_kwargs=None):
self._src = src
self._attribute = None
self._eval = None
self._range = None
if all_integers(within_range):
if len(within_range) != 2:
raise TypeError(
'within_range parameter must be a tuple with 2 objects',
)
self._range = within_range
self._callable_args = []
if isinstance(with_args, (list, tuple)):
self._callable_args = list(with_args)
self._callable_kw = {}
if isinstance(with_kwargs, dict):
self._callable_kw.update(with_kwargs)
if isinstance(and_kwargs, dict):
self._callable_kw.update(and_kwargs)
@classmethod
def is_a_matcher(cls, func):
def match(self, *args, **kw):
return func(self._src, *args, **kw)
new_matcher = deepcopy(match)
new_matcher.__name__ = func.__name__
setattr(cls, func.__name__, new_matcher)
return new_matcher
def raises(self, exc, msg=None):
if not callable(self._src):
raise TypeError('%r is not callable' % self._src)
try:
self._src(*self._callable_args, **self._callable_kw)
except BaseException as e:
if isinstance(exc, string_types):
msg = exc
exc = type(e)
err = text_type(e)
if isinstance(exc, type) and issubclass(exc, BaseException):
if not isinstance(e, exc):
raise AssertionError(
'%r should raise %r, but raised %r:\nORIGINAL EXCEPTION:\n\n%s' % (
self._src, exc, e.__class__, traceback.format_exc(e)))
if isinstance(msg, string_types) and msg not in err:
raise AssertionError('''
%r raised %s, but the exception message does not
match.\n\nEXPECTED:\n%s\n\nGOT:\n%s'''.strip() % (
self._src,
type(e).__name__,
msg, err))
elif isinstance(msg, string_types) and msg not in err:
raise AssertionError(
'When calling %r the exception message does not match. ' \
'Expected: %r\n got:\n %r' % (self._src, msg, err))
else:
raise e
else:
if inspect.isbuiltin(self._src):
_src_filename = '<built-in function>'
else:
_src_filename = _get_file_name(self._src)
if inspect.isfunction(self._src):
_src_lineno = _get_line_number(self._src)
raise AssertionError(
'calling function %s(%s at line: "%d") with args %r and kwargs %r did not raise %r' % (
self._src.__name__,
_src_filename, _src_lineno,
self._callable_args,
self._callable_kw, exc))
else:
raise AssertionError(
'at %s:\ncalling %s() with args %r and kwargs %r did not raise %r' % (
_src_filename,
self._src.__name__,
self._callable_args,
self._callable_kw, exc))
return True
def deep_equals(self, dst):
deep = DeepComparison(self._src, dst)
comparison = deep.compare()
if isinstance(comparison, bool):
return comparison
raise comparison.as_assertion(self._src, dst)
def equals(self, dst):
if self._attribute and is_iterable(self._src):
msg = '%r[%d].%s should be %r, but is %r'
for index, item in enumerate(self._src):
if self._range:
if index < self._range[0] or index > self._range[1]:
continue
attribute = getattr(item, self._attribute)
error = msg % (
self._src, index, self._attribute, dst, attribute)
if attribute != dst:
raise AssertionError(error)
else:
return self.deep_equals(dst)
return True
def looks_like(self, dst):
old_src = pformat(self._src)
old_dst = pformat(dst)
self._src = re.sub(r'\s', '', self._src).lower()
dst = re.sub(r'\s', '', dst).lower()
error = '%s does not look like %s' % (old_src, old_dst)
assert self._src == dst, error
return self._src == dst
def every_one_is(self, dst):
msg = 'all members of %r should be %r, but the %dth is %r'
for index, item in enumerate(self._src):
if self._range:
if index < self._range[0] or index > self._range[1]:
continue
error = msg % (self._src, dst, index, item)
if item != dst:
raise AssertionError(error)
return True
@explanation('%r should differ to %r, but is the same thing')
def differs(self, dst):
return self._src != dst
@explanation('%r should be a instance of %r, but is not')
def is_a(self, dst):
return isinstance(self._src, dst)
def at(self, key):
assert self.has(key)
if isinstance(self._src, dict):
return AssertionHelper(self._src[key])
else:
return AssertionHelper(getattr(self._src, key))
@explanation('%r should have %r, but have not')
def has(self, that):
return that in self
def _get_that(self, that):
try:
that = int(that)
except TypeError:
that = len(that)
return that
def len_greater_than(self, that):
that = self._get_that(that)
length = len(self._src)
if length <= that:
error = 'the length of the %s should be greater then %d, but is %d' % (
type(self._src).__name__,
that,
length,
)
raise AssertionError(error)
return True
def len_greater_than_or_equals(self, that):
that = self._get_that(that)
length = len(self._src)
if length < that:
error = 'the length of %r should be greater then or equals %d, but is %d' % (
self._src,
that,
length,
)
raise AssertionError(error)
return True
def len_lower_than(self, that):
original_that = that
if isinstance(that, Iterable):
that = len(that)
else:
that = self._get_that(that)
length = len(self._src)
if length >= that:
error = 'the length of %r should be lower then %r, but is %d' % (
self._src,
original_that,
length,
)
raise AssertionError(error)
return True
def len_lower_than_or_equals(self, that):
that = self._get_that(that)
length = len(self._src)
error = 'the length of %r should be lower then or equals %d, but is %d'
if length > that:
msg = error % (
self._src,
that,
length,
)
raise AssertionError(msg)
return True
def len_is(self, that):
that = self._get_that(that)
length = len(self._src)
if length != that:
error = 'the length of %r should be %d, but is %d' % (
self._src,
that,
length,
)
raise AssertionError(error)
return True
def len_is_not(self, that):
that = self._get_that(that)
length = len(self._src)
if length == that:
error = 'the length of %r should not be %d' % (
self._src,
that,
)
raise AssertionError(error)
return True
def like(self, that):
return self.has(that)
def the_attribute(self, attr):
self._attribute = attr
return self
def in_each(self, attr):
self._eval = attr
return self
def matches(self, items):
msg = '%r[%d].%s should be %r, but is %r'
get_eval = lambda item: eval(
"%s.%s" % ('current', self._eval), {}, {'current': item},
)
if self._eval and is_iterable(self._src):
if isinstance(items, string_types):
items = [items for x in range(len(items))]
else:
if len(items) != len(self._src):
source = list(map(get_eval, self._src))
source_len = len(source)
items_len = len(items)
raise AssertionError(
'%r has %d items, but the matching list has %d: %r'
% (source, source_len, items_len, items),
)
for index, (item, other) in enumerate(zip(self._src, items)):
if self._range:
if index < self._range[0] or index > self._range[1]:
continue
value = get_eval(item)
error = msg % (self._src, index, self._eval, other, value)
if other != value:
raise AssertionError(error)
else:
return self.equals(items)
return True
@builtins.property
def is_empty(self):
try:
lst = list(self._src)
length = len(lst)
assert length == 0, \
'%r is not empty, it has %s' % (self._src,
itemize_length(self._src))
return True
except TypeError:
raise AssertionError("%r is not iterable" % self._src)
@builtins.property
def are_empty(self):
return self.is_empty
def __contains__(self, what):
if isinstance(self._src, dict):
items = self._src.keys()
if isinstance(self._src, Iterable):
items = self._src
else:
items = dir(self._src)
return what in items
def contains(self, what):
assert what in self._src, '%r should be in %r' % (what, self._src)
return True
def does_not_contain(self, what):
assert what not in self._src, \
'%r should NOT be in %r' % (what, self._src)
return True
doesnt_contain = does_not_contain
that = AssertionHelper
| [
"cndeti@gmail.com"
] | cndeti@gmail.com |
5d93ce633829dede636e08e12802f99ad3b751d6 | b806c351b5b09c68fb82edd26b7f48ac60124293 | /examples/tutorials/04_cubes_and_objects/13_pop_a_wheelie.py | f96809dd4c591c60124b58427a2a22972974c1e5 | [
"Apache-2.0"
] | permissive | cashcat/cozmo-python-sdk | ddd9ac042fe5b254ba2d721d89b552a4931d913b | c4dcfb2c323bbfdce0c3affed2b64a59edfd0f68 | refs/heads/master | 2022-07-08T13:28:19.862721 | 2022-07-03T14:10:02 | 2022-07-03T14:10:02 | 172,061,034 | 1 | 0 | NOASSERTION | 2019-02-22T12:15:38 | 2019-02-22T12:15:38 | null | UTF-8 | Python | false | false | 1,214 | py | #!/usr/bin/env python3
# Copyright (c) 2017 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Tell Cozmo to pop a wheelie on a cube that is placed in front of him.
This example demonstrates Cozmo driving to a cube and pushing himself onto
his back by pushing his lift against that cube.
'''
import cozmo
async def pop_a_wheelie(robot: cozmo.robot.Robot):
print("Cozmo is waiting until he sees a cube")
cube = await robot.world.wait_for_observed_light_cube()
print("Cozmo found a cube, and will now attempt to pop a wheelie on it")
action = robot.pop_a_wheelie(cube, num_retries=2)
await action.wait_for_completed()
cozmo.run_program(pop_a_wheelie)
| [
"noreply@github.com"
] | noreply@github.com |
ab18d390675297b3dc7088e097b39afb5af818fd | 45619f832fbb3939098f2d0e890f7f81e6dd19a3 | /hornet/settings/production.py | 0ef5a7307d42591a2e16a866bfbadecbaea57894 | [] | no_license | coding-buzz/hornet | bba6b4db59434e1ee4e29284752cf7fcc244873d | 5054eddac1edfc427ac07ea7dc419853cf33f0a4 | refs/heads/master | 2021-01-17T10:19:10.202806 | 2017-12-07T10:38:13 | 2017-12-07T10:38:13 | 59,466,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | import os
from base import *
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = False
ALLOWED_HOSTS = ['coding.buzz']
COMPRESS_CSS_FILTERS = (
'django_compressor_autoprefixer.AutoprefixerFilter',
)
COMPRESS_AUTOPREFIXER_BINARY = os.path.join(BASE_DIR, '..', 'node_modules/postcss-cli/bin/postcss')
COMPRESS_AUTOPREFIXER_ARGS = ' --use autoprefixer'
COMPRESS_ENABLED = True
BASE_URL = 'http://coding.buzz'
MAILCHIMP_SEND_EMAILS = True
MAILCHIMP_API_KEY = os.environ['MAILCHIMP_API_KEY']
MAILCHIMP_SUB_LIST_NAME = 'Coding Buzz Subscriptions'
MAILCHIMP_SUBSCRIPTION_FORM_URL = os.environ['MAILCHIMP_SUBSCRIPTION_FORM_URL']
RECAPTCHA_PUBLIC_KEY = os.environ['RECAPTCHA_PUBLIC_KEY']
RECAPTCHA_PRIVATE_KEY = os.environ['RECAPTCHA_PRIVATE_KEY']
ENABLE_GOOGLE_ANALYTICS = True
| [
"marcinskiba91@gmail.com"
] | marcinskiba91@gmail.com |
8f625038c620cfe6780ab65acbcdcdc12082f039 | 8df9a0b12b8451d6c198164b85f8776fd4fccc31 | /casesrc/test5updateBook.py | abc1873aa5785eebc6cc87417ade51e1e8a2a26c | [] | no_license | alexxu56/httpbook | aa4cd295e9269fda9d3598f674e65871bf6453e0 | c5f3708d8e8584926273ba2ae154e5565b339884 | refs/heads/master | 2020-03-08T05:12:52.905480 | 2018-04-03T17:19:06 | 2018-04-03T17:19:06 | 127,942,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | # -*- coding: utf-8 -*-
from updateBook import UpdateBook
from getBookList import GetBookList
import unittest
class testUpdateBook(unittest.TestCase):
def testupdatebook(self):
# addbook
filename = 'caseUpdate.xlsx'
begincase = '00301'
casenum = 2
updatebooklist = UpdateBook()
booklist = updatebooklist.update_book(filename, begincase, casenum)
# query
filenameq = 'caseData.xlsx'
casenumq = 1
for book in booklist:
inbookid = int(book['id'])
begincase = book['caseid']
print int(inbookid)
if inbookid > 0:
books = GetBookList()
books.get_book_list(filenameq, begincase, casenumq, inbookid)
#if __name__ == '__main__':
| [
"156461040@youremail.com"
] | 156461040@youremail.com |
14e91cfd8dbbec97c9ab3e312f3b5779cd6e22fb | ed7907d39df5ecfa2ed40f5dba77646dd680c0ba | /exceptionTest3.py | 28645142fe713c83a936ec0f81af7b12a677c398 | [] | no_license | neighborpil/Py_EasyPython | 2cc0a7b7914c6a720f9c2da426f5bceba2bf8f2d | 042e82e6c67549da307800c87492145b731319ec | refs/heads/master | 2021-07-06T05:28:43.364552 | 2017-10-01T04:41:10 | 2017-10-01T04:41:10 | 103,515,499 | 0 | 0 | null | 2017-10-01T04:41:11 | 2017-09-14T09:46:24 | null | UTF-8 | Python | false | false | 438 | py | #finally 처리
#파일 닫기와 같이 예외발생 여부와 상관없이 반드시 발생해야 하는 경우 사용한다
try:
#테스트를 위하여 파일 생성
open('__test.txt', 'w').close()
#파일 읽기로 연다
f = open('__test.txt', 'r')
#파일에 먼가 쓰면 예외
f.write('xxx')
except OSError as e:
print('Exception : {}'.format(e))
finally:
f.close()
print('release file')
| [
"neighborpil@naver.com"
] | neighborpil@naver.com |
60e1fae3e610401595ac4c325afa9c1d4c049bfa | 7c0369b719201ac461a053bf385c191faf82adb9 | /cnngeometric_yong/DataSet_old/venv/bin/easy_install | d1bd7d69b4efe14d31ced0cca45c7659b168c1d5 | [] | no_license | yoyongbo/Thesis-Image-Alignment-Using-Machine-Learning- | ab9f692450ea2d408fccc8b56da7baae9617ac92 | 3a6abf9b5e0082a2b4a9786d6f7f0960955e6c91 | refs/heads/master | 2023-08-29T21:14:55.282716 | 2021-10-04T22:41:24 | 2021-10-04T22:41:24 | 413,600,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | #!/Users/yongboyu/Desktop/yong_honors/DataSet/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"yoyu@upei.ca"
] | yoyu@upei.ca | |
b1dea3c4983f09b3a6dc08bf597ea9ff4f8bd617 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2158/60876/250371.py | 2410110cab9dc4be3dd9ff187554b0e5447e6868 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | string=input()
index=0
while string[index]==' ':
index+=1
temp=""
if string[index]=='-':
temp+="-"
index+=1
while index<len(string) and string[index].isdigit():
temp+=string[index]
index+=1
if int(temp)<-2**(31):
print( -2**(31))
else:
print(temp)
elif not string[index].isdigit():
print(0)
else:
while index<len(string) and string[index].isdigit():
temp+=string[index]
index+=1
if int(temp)>2**31-1:
print(2**31-1)
else:
print(temp) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
68c248e166090f6f4c128958819a7b23e91b52f8 | f7d686ff80f262cb3812980dc7e3e9b1d142b368 | /venv/nums.py | 1f5d1c782c5949aba3cd9287a2a6e0a12979d760 | [] | no_license | orDasha/Python_Algorithm_Home_Work | b569525cea267b77b6c1952797e3f26b62be2fe1 | b1c4fcb07410f5374036580dcc0ad58f6c895b1e | refs/heads/master | 2021-02-26T05:58:34.785475 | 2020-03-06T19:30:09 | 2020-03-06T19:30:09 | 245,500,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,871 | py | #Функция приведения вводимого значения к числовому (FLOAT), на входе число, на выходе float
def digitalize_prompt(num):
if num.isdigit():
return float(num)
else:
try:
float(num)
return float(num)
except ValueError:
print('Неверно введено число')
exit(0)
#Функция упорядочивания введенного массива, на входе массив, на выходе сортированный массив
def order_elements(sort_arr):
tmp = 0
size = len(sort_arr)
for i in range(1,size):
for k in range(0, size-i):
if sort_arr[k] > sort_arr[k+1]:
tmp = sort_arr[k]
sort_arr[k] = sort_arr[k+1]
sort_arr[k + 1] = tmp
k += 1
#print (sort_arr) # debug
return sort_arr
#Запрос чисел, на входе указание кол-ва числе, на выходе массив float значений
def ask_dig(size):
user_digits = []
for j in range(size):
prompted_dig = input(f'Введите число №{j+1}: ')
user_digits.append(digitalize_prompt(prompted_dig))
return user_digits
#Выдача результата, на входе сортированный массив
def res_func(user_digits):
size = len(user_digits)
print (f'Введено: {user_digits}') # debug
user_digits = order_elements(user_digits)
if user_digits[0] < user_digits[round((size-1)/2)] < user_digits[size-1]:
print(f'Число между Min и Max = {user_digits[round((size-1)/2)]}: ')
else:
print('Числа между Min и Max не обнаружено')
if __name__ == '__main__':
res_func(ask_dig(3)) | [
"21261637+orDasha@users.noreply.github.com"
] | 21261637+orDasha@users.noreply.github.com |
08a3fe7445f742ea3b4da8caf57c52bbc9acc66b | 13da6acd43e63b8be0e6e6971d06bd4706b2be71 | /tests/test_octodns_record_geo.py | 6b9cd4eaa90c74a52ff2cd2b1e5d329ac137ef04 | [
"MIT"
] | permissive | grahamhayes/octodns | 4db23d47c6397dec2dee5f75285bfde5b30c7791 | ea81b7a535d722f81cb862767a295f277960e257 | refs/heads/master | 2023-01-11T00:03:03.461018 | 2019-01-28T17:51:40 | 2019-01-28T17:51:40 | 168,031,154 | 0 | 0 | MIT | 2022-12-27T15:33:45 | 2019-01-28T20:24:02 | Python | UTF-8 | Python | false | false | 2,110 | py | #
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
from octodns.record.geo import GeoCodes
class TestRecordGeoCodes(TestCase):
def test_validate(self):
prefix = 'xyz '
# All valid
self.assertEquals([], GeoCodes.validate('NA', prefix))
self.assertEquals([], GeoCodes.validate('NA-US', prefix))
self.assertEquals([], GeoCodes.validate('NA-US-OR', prefix))
# Just plain bad
self.assertEquals(['xyz invalid geo code "XX-YY-ZZ-AA"'],
GeoCodes.validate('XX-YY-ZZ-AA', prefix))
self.assertEquals(['xyz unknown continent code "X-Y-Z"'],
GeoCodes.validate('X-Y-Z', prefix))
self.assertEquals(['xyz unknown continent code "XXX-Y-Z"'],
GeoCodes.validate('XXX-Y-Z', prefix))
# Bad continent
self.assertEquals(['xyz unknown continent code "XX"'],
GeoCodes.validate('XX', prefix))
# Bad continent good country
self.assertEquals(['xyz unknown continent code "XX-US"'],
GeoCodes.validate('XX-US', prefix))
# Bad continent good country and province
self.assertEquals(['xyz unknown continent code "XX-US-OR"'],
GeoCodes.validate('XX-US-OR', prefix))
# Bad country, good continent
self.assertEquals(['xyz unknown country code "NA-XX"'],
GeoCodes.validate('NA-XX', prefix))
# Bad country, good continent and state
self.assertEquals(['xyz unknown country code "NA-XX-OR"'],
GeoCodes.validate('NA-XX-OR', prefix))
# Good country, good continent, but bad match
self.assertEquals(['xyz unknown country code "NA-GB"'],
GeoCodes.validate('NA-GB', prefix))
# Bad province code, good continent and country
self.assertEquals(['xyz unknown province code "NA-US-XX"'],
GeoCodes.validate('NA-US-XX', prefix))
| [
"ross@github.com"
] | ross@github.com |
b69f8beff48dd50cb0d5282793730a6584dc94dc | 3c2b7c4f632697b382cbd55bf0e4652c11cbf685 | /python-Exp3/Q3.py | 87a97054c957025233ea449d5e9a093f46240468 | [] | no_license | ajitjha393/PYTHON-EXP | 0b7c56157ff1054d84cfbd5b9820cae46567adc1 | 1ae42a9bc4f163fc512b887f5ff8c2734f6d62ab | refs/heads/master | 2020-04-22T22:04:36.314429 | 2019-04-07T17:30:03 | 2019-04-07T17:30:03 | 170,695,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,628 | py | # ---------Simple Calculator
def add(*args):
sum = 0
for i in args:
sum += i
return sum
def sub(*args):
subtraction = args[0]
for i in args:
if i == args[0]:
continue
else:
subtraction -= i
return subtraction
def Multiply(*args):
mul = 1
for i in args:
mul *= i
return mul
def Divide(*args):
res = args[0]
for i in args:
if i == args[0]:
continue
else:
if i == 0:
return "Error Cannot Divide by Zero , Try Again"
res //= i
return res
myChoice = True
while(myChoice != '5'):
print('''
*************MENU*************
1.Add
2.Subtract
3.Multiply
4.Divide
5.Exit
''')
myChoice = input("Enter a choice")
if myChoice == '1':
myNumber = input("Enter list of numbers to add : ").split(',')
myNumber = list(map(int, myNumber))
print(f"Addition => {add(*myNumber)}")
elif myChoice == '2':
myNumber = input("Enter list of numbers to Subtract : ").split(',')
myNumber = list(map(int, myNumber))
print(f"Subtraction => {sub(*myNumber)}")
elif myChoice == '3':
myNumber = input("Enter list of numbers to Multiply : ").split(',')
myNumber = list(map(int, myNumber))
print(f"Multiplication => {Multiply(*myNumber)}")
elif myChoice == '4':
myNumber = input("Enter list of numbers to Divide : ").split(',')
myNumber = list(map(int, myNumber))
print(f"Division => {Divide(*myNumber)}")
| [
"noreply@github.com"
] | noreply@github.com |
d1c4dcce1c4d93f639798a0b8d7b8ac902d9ad74 | 4d90a1f16b90589c893c250d59b3edf2913ebdf2 | /Coding Topics/NestedInteger/Nest weight sum.py | 71875c33f380c741c74581d9f30745ba0a099844 | [] | no_license | cliu0507/CodeForFun | 76cfd63f7eca9e6791be6fdda569d6d1d52c9405 | 5751a5176f86ac03b351d47efb5cfd9c2f6c886d | refs/heads/master | 2021-08-18T09:33:10.046130 | 2021-08-13T17:00:50 | 2021-08-13T17:00:50 | 62,689,547 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15 | py | Nest weight sum | [
"cliu0507@yahoo.com"
] | cliu0507@yahoo.com |
a136fc23e69ebae89afe6c5d639ca6272310d0dc | 3c3fccf2087fb1a7057485fb5c0205ed4c7a5bb2 | /luffy_strak/wsgi.py | 2dfce00ac61a81ab1f45566387632c10632a5968 | [] | no_license | heehoo-ai/stark | 6ae22b4072f92d5ab753242611432837e43ac944 | d0824590dbfa2c7ae7cb8e7fca7ff96a88b4e294 | refs/heads/master | 2022-12-08T06:06:37.203234 | 2020-09-03T08:00:03 | 2020-09-03T08:00:03 | 290,717,157 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for luffy_strak project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'luffy_strak.settings')
application = get_wsgi_application()
| [
"heehoo@qq.com"
] | heehoo@qq.com |
5c5fdc9dd71759badee24f5bc5a70e72ab1d9938 | 78beced1a6a96380077059e3b1520fa6a7faed95 | /compare_classifiers_DATA401/base_model.py | 610411d0ac0e3cc221e3de5669042a57289e4772 | [] | no_license | markellekelly/portfolio | da59e0e39eea4396819d8acb385fac04bfdbf890 | 787bfaf10976af038879aebe21556c7240027c31 | refs/heads/master | 2020-08-31T04:56:43.193459 | 2020-02-10T00:34:02 | 2020-02-10T00:34:02 | 218,595,183 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,326 | py | import abc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from abc import abstractmethod
def _convert_dataframe(x):
"""
Converts x from a Pandas DataFrame to a Numpy ndarray if x is a DataFrame. If x is already a
ndarray, then x is returned
:param x: Input DataFrame
:return: ndarray
"""
try:
if isinstance(x, np.ndarray):
return x
elif isinstance(x, list) or isinstance(x, pd.Series):
return np.asarray(x).reshape((len(x), 1))
else:
return x.to_numpy().reshape(len(x), len(x.columns))
except NameError:
print(f'Invalid input. Input must be either Pandas DataFrame or Numpy ndarray.')
class BaseModel(abc.ABC):
def __init__(self, **kwargs):
# Model Definition
self.coef_ = None
self.intercept_ = None
# Model Parameters
self.method = None
self.learning_rate = None
self.epsilon = None
self.penalty = None
self.max_iters = None
self.max_iters_no_change = None
self.fit_intercept = None
self.num_workers = None
self.loss = None
# Extra metrics
self.errors = []
self.iterations = []
def fit(self, x, y, **kwargs):
"""
This method actually builds the model.
:parameter x - Pandas DataFrame or numpy ndarray containing explanatory variables
:parameter y - Pandas DataFrame or numpy ndarray containing response variables
:parameter method - String that determines the method used to build the model.
"""
x0 = _convert_dataframe(x)
y0 = _convert_dataframe(y)
return self._fit(x0, y0, **kwargs)
@abstractmethod
def _fit(self, x, y, **kwargs):
pass
def generate_2d_plot(self, x, y):
"""
Method used for sanity checking fit functions. It will generate a 2d plot of x and y
with the best fit generated by our fit function.
:parameter x - X values
:parameter y - y values
"""
if self.coef_ is None:
print(f'Model must be fit before a plot can be generated. Please use fit() first.')
domain = pd.DataFrame(np.arange(np.min(x).iloc[0], np.max(x).iloc[0] + 1))
predictions = self.predict(domain)
plt.plot(domain, predictions)
plt.text(np.mean(x).iloc[0], np.max(predictions),
f'y = {np.round(self.intercept_, 3)} + {np.round(self.coef_[0], 3)}x',
horizontalalignment='center',
verticalalignment='center',
bbox=dict(facecolor='blue', alpha=0.25))
plt.title('Line of Best Fit')
plt.xlabel('X')
plt.ylabel('Y')
plt.scatter(x, y)
def plot_error(self):
"""
Plots the error vs iterations of the fit algorithm. Therefore, fit() must be called before this.
:return - None
"""
plt.title('Errors vs. Iterations')
plt.xlabel('Iterations')
plt.ylabel('Error')
plt.plot(self.iterations, self.errors)
def plot_loss(self):
plt.title('Training Loss')
plt.xlabel('Training Iterations')
plt.ylabel('SSE Loss')
plt.plot(self.iterations, self.losses)
def predict(self, x):
"""
Makes predictions based on fit data. This means that fit() must be called before predict()
:parameter x - Pandas DataFrame of data you want to make predictions about.
"""
if self.intercept_ is None or self.coef_ is None:
print(f'Unable to make predictions until the model is fit. Please use fit() first.')
return
elif len(x[0]) != len(self.coef_):
print(f'Column mismatch. Expected(,{len(self.coef_)}) but was {np.shape(x)}')
return
else:
slopes = self.coef_
return [(self.intercept_ + row.dot(slopes)) for row in x]
def score(self, x, y, metric='adj'):
"""
Method for calculating the score of a prediction.
:param x - Pandas DataFrame containing data you want to make predictions about.
:param y - Pandas DataFrame dependent variables.
:param metric - Scoring metric to use. Default is adjusted R^2. Can be one of: 'adj', 'r2', 'aic', 'bic'
"""
x0 = _convert_dataframe(x)
y0 = _convert_dataframe(y).T
predicted = [pred[0] for pred in self.predict(x0)]
if metric == 'adj':
# 1 - (1 - R^2)(n-1/n-k-1)
ssr = ((y0 - predicted) ** 2).sum()
sst = ((y0 - y0.mean()) ** 2).sum()
r2 = 1 - (ssr / sst)
n = len(x0) # Number of observations
p = len(x0[0]) # Number of predictor variables
return 1 - (1 - r2 * (n - 1) / (n - p - 1))
elif metric == 'r2':
ssr = ((y0 - predicted) ** 2).sum()
sst = ((y0 - y0.mean()) ** 2).sum()
return 1 - ssr / sst
elif metric == 'aic':
# Not implemented yet since these rely on MLE
pass
elif metric == 'bic':
pass
else:
print(f'Unsupported score metric: {metric}')
pass
@abstractmethod
def _loss(self, x, y):
pass
| [
"mkelly23@calpoly.edu"
] | mkelly23@calpoly.edu |
d09ba860d2dd4bfc110801c221ab765641fde85a | b1ca83526e516b173adac193fb988f66f14ed65d | /halotools_ia/correlation_functions/tests/test_ed_3d.py | 30d36c4fe39c51678989b1ac0a60bfb956ea8513 | [
"BSD-3-Clause"
] | permissive | nehapjoshi/halotools_ia | a520149ce208e9b5b2bba10d1628115de9190b70 | 36b228bf788354fac692cb4d55d32bfd4e39a998 | refs/heads/master | 2023-06-13T00:17:31.123130 | 2020-06-08T19:09:03 | 2020-06-08T19:09:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,070 | py | """
Module providing unit-testing for the `~halotools.mock_observables.alignments.w_gplus` function.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import warnings
import pytest
from astropy.utils.misc import NumpyRNGContext
from ..ed_3d import ed_3d
from halotools.custom_exceptions import HalotoolsError
slow = pytest.mark.slow
__all__ = ('test_shape', 'test_threading', 'test_pbcs', 'test_random_result')
fixed_seed = 43
def test_shape():
"""
make sure the result that is returned has the correct shape
"""
ND = 1000
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((ND, 3))
random_orientation = np.random.random((len(sample1), 3))*2 - 1.0
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
# analytic randoms
result_1 = ed_3d(sample1, random_orientation, sample1,
rbins, period=period, num_threads=1)
assert np.shape(result_1) == (len(rbins)-1, )
result_2 = ed_3d(sample1, random_orientation, sample1,
rbins, period=period, num_threads=3)
assert np.shape(result_2) == (len(rbins)-1, )
def test_threading():
"""
test to make sure the results are consistent when num_threads=1 or >1
"""
ND = 1000
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((ND, 3))
random_orientation = np.random.random((len(sample1), 3))*2 - 1.0
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
result_1 = ed_3d(sample1, random_orientation, sample1,
rbins, period=period, num_threads=1)
result_2 = ed_3d(sample1, random_orientation, sample1,
rbins, period=period, num_threads=3)
assert np.allclose(result_1, result_2)
def test_pbcs():
"""
test to make sure the results are consistent with and without PBCs
"""
ND = 1000
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((ND, 3))
random_orientation = np.random.random((len(sample1), 3))*2 - 1.0
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
result_1 = ed_3d(sample1, random_orientation, sample1,
rbins, period=period, num_threads=1)
result_2 = ed_3d(sample1, random_orientation, sample1,
rbins, period=None, num_threads=1)
tol = 10.0/ND
assert np.allclose(result_1, result_2, atol=tol)
@slow
def test_random_result():
"""
test to make sure the correlation function returns the expected result for a random distribution of orientations
"""
ND = 1000
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((ND, 3))
sample2 = np.random.random((ND, 3))
random_orientation1 = np.random.random((len(sample1), 3))*2 - 1.0
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
result_1 = ed_3d(sample1, random_orientation1, sample2,
rbins, period=period, num_threads=1)
tol = 10.0/ND
assert np.allclose(result_1, 0.0, atol=tol)
| [
"duncan@donut-hole.local"
] | duncan@donut-hole.local |
683ce542c7bd81520833cab65d8c4764426527d6 | a90d3018fc1ca827c79c169259b9b071c4548864 | /Python_edition/frame2d_eg/frame2d_ele_info.py | c0dc94ada2d471fe98f5d8414ad78c1dbde6a45a | [] | no_license | yiphon/FEM_code | 706958c1784a25392134b06d9fbabfb373a8ae99 | 2c9cabc1dd3a37efc418d5ecdcb71a04374ce6f1 | refs/heads/master | 2020-03-16T15:58:31.591619 | 2018-05-25T02:56:00 | 2018-05-25T02:56:00 | 132,766,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | import numpy as np
from math import *
def frame2d_ele_info(node_coordinate):
#利用各单元对应节点坐标,完成ele_info矩阵。
eles = node_coordinate.shape[1]
a = np.zeros((3, eles))
for i in range(eles):
x1 = node_coordinate[0, i]
y1 = node_coordinate[1, i]
x2 = node_coordinate[2, i]
y2 = node_coordinate[3, i]
l = sqrt(pow((x2 - x1),2) + pow((y2 - y1) ,2))
a[0, i] = l
c = (x2 - x1) / l
s = (y2 - y1) / l
a[1, i] = c
a[2, i] = s
return a | [
"noreply@github.com"
] | noreply@github.com |
8d1fa11057a3eb756835e5592f14169c1e60d7ef | 12aed04fd2bcedb7405c8fc70e4312907fdb30b5 | /comparator/rms_final_comparator.py | bd6abd124f86f3ef91ff6e52cd377021e809e9f9 | [] | no_license | rogerdenisvieira/myoplotter | 8387f99e01c473d9d72ef2b09374e3586514d17c | 64bd2d82b02cc84aebe779aaea1ff6a565843a96 | refs/heads/master | 2020-03-22T22:36:57.543475 | 2019-06-27T13:18:42 | 2019-06-27T13:18:42 | 140,761,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,988 | py | import matplotlib.pyplot as plt
import numpy as np
from numpy import mean
from comparator.base_comparator import BaseComparator
DPI = 100
class RMSFinalComparator(BaseComparator):
def __init__(self):
BaseComparator.__init__(self)
pass
def compare(self, rep_rms_list, cvm_rms_list, output_path, person_name, show_charts, save_charts):
print('Calculating mean cvm signal by channel')
ch4_mean_cvm = mean([cvm_rms_list[0][1], cvm_rms_list[1][1], cvm_rms_list[2][1]])
ch5_mean_cvm = mean([cvm_rms_list[0][2], cvm_rms_list[1][2], cvm_rms_list[2][2]])
ch6_mean_cvm = mean([cvm_rms_list[0][3], cvm_rms_list[1][3], cvm_rms_list[2][3]])
ch7_mean_cvm = mean([cvm_rms_list[0][4], cvm_rms_list[1][4], cvm_rms_list[2][4]])
print('Calculating mean repetition signal by channel')
ch4_mean_rep = mean([rep_rms_list[0][2], rep_rms_list[1][2], rep_rms_list[2][2]])
ch5_mean_rep = mean([rep_rms_list[0][3], rep_rms_list[1][3], rep_rms_list[2][3]])
ch6_mean_rep = mean([rep_rms_list[0][4], rep_rms_list[1][4], rep_rms_list[2][4]])
ch7_mean_rep = mean([rep_rms_list[0][5], rep_rms_list[1][5], rep_rms_list[2][5]])
n = 4
ind = np.arange(n)
width = 0.15
plt.bar(
ind,
(ch4_mean_cvm, ch5_mean_cvm, ch6_mean_cvm, ch7_mean_cvm),
width,
label='CVM'
)
plt.bar(ind + width,
(ch4_mean_rep, ch5_mean_rep, ch6_mean_rep, ch7_mean_rep),
width,
label='Repetição')
plt.xticks(ind + width, ('CH4', 'CH5', 'CH6', 'CH7'))
plt.legend(loc='best')
plt.title('Médias RMS CVM e Repetição')
plt.xlabel('Canais')
plt.ylabel('RMS')
plt.grid()
if save_charts:
plt.savefig('{}/[BAR]_Final_{}'.format(output_path, person_name), dpi=DPI)
plt.clf()
if show_charts:
plt.show()
| [
"roger_v@dell.com"
] | roger_v@dell.com |
6b13c73368d607436536d7bb16a10162a3ac9dc2 | 1c9fadd68fe1a22d873c9b3d9d50e54ca67614cb | /plaid_integration/urls.py | e823bbb4a90aa8a6923cac0758ca97bb57a7c833 | [] | no_license | vijayhk94/plaid_integration | aacda2c350d58710d9f6bb2c7429c41d998bcfb7 | 266ab0d6f9c9ba799ab773d40d6312138b66c16a | refs/heads/master | 2022-11-28T17:03:07.378996 | 2020-08-06T11:44:09 | 2020-08-06T11:44:09 | 285,233,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | """plaid_integration URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework.routers import SimpleRouter
from plaid_integration.Item.views import ItemViewSet
router = SimpleRouter()
router.register(r'item', ItemViewSet, basename='item')
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('rest_auth.urls')),
path('accounts/signup/', include('rest_auth.registration.urls')),
path('', include(router.urls))]
| [
"vijay.kahalekar@belong.co"
] | vijay.kahalekar@belong.co |
3767e3ce1ee0d228ed60b849ea6b096818fdab1b | 764b0e8f4175661c1e26d4d2a1f8a70ecf2899b9 | /Find the Runner-up score HackerRank/Find the Runner-up Score.py | 1d6272c6e96875205071f932ee514626050ad3fb | [] | no_license | Abishek-Git/HackerRank | 5a427a2ce6d287c738f1935954ec952b7771dcb9 | 436d58b277f8ecb5cb65ec1f950717e4d45a2570 | refs/heads/main | 2023-07-23T09:57:03.269478 | 2021-08-30T13:48:06 | 2021-08-30T13:48:06 | 397,639,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
Given the participants' score sheet for your University Sports Day,
you are required to find the runner-up score. You are given scores.
Store them in a list and find the score of the runner-up.
"""
total = int(input())
marks = list(map(int, input().split()))
marks.sort()
runner = 0
for i in marks[::-1]:
if(i < marks[-1]):
runner = i
break
print(i)
| [
"noreply@github.com"
] | noreply@github.com |
745598179a1f4f9fc81a7259c54c7e70776b17c7 | 7083f3e6121a63b0c21e15d8b23c32067d7e4e5e | /scripts/debugging/test_data_utils.py | 471d21749014c77fe08ae8e78ca5ffb1b5c2ab15 | [] | no_license | nathanin/milk | e46317cf0d1b2162fd301a144f5aa3b889cf5d27 | 9afb3b01715a4f65a03b7cd45dcd121745b117f8 | refs/heads/master | 2023-02-22T05:23:06.850961 | 2019-10-26T19:08:20 | 2019-10-26T19:08:20 | 154,750,122 | 2 | 0 | null | 2023-02-15T21:36:19 | 2018-10-25T23:29:33 | Jupyter Notebook | UTF-8 | Python | false | false | 79 | py | import sys
sys.path.insert(0, '../..')
from milk.utilities import data_utils
| [
"ing.nathany@gmail.com"
] | ing.nathany@gmail.com |
978111f2aee347cc7bb6b68d5008784cbd0cfd2e | 9b286a676f3db10da382ef165b747766cad903d5 | /02.Programming Fundamentals/11.text-processing/emoticon_finder.py | 3b8f332d0214ca4e970910d11dab24ff8b53c9d4 | [] | no_license | moshu1337/SoftUni | 77213cd12fce989557fb3891ce0befb53a6a85e9 | 262351b9def88a2810c7fc0d05d161671a0a89e9 | refs/heads/master | 2022-12-01T13:50:40.785701 | 2020-08-19T18:18:22 | 2020-08-19T18:18:22 | 281,163,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | emoticon = ':'
text = input()
for char in range(len(text)):
if text[char] == emoticon:
if text[char + 1] != " ":
print(f":{text[char + 1]}")
| [
"68555478+moshu1337@users.noreply.github.com"
] | 68555478+moshu1337@users.noreply.github.com |
9fb17ce7b6fb0a7b73112825f591381e23c30c80 | fe70774ff6898c5bdb0c941b4f335de576abfdb6 | /autotest/test_flopy_io.py | bb09cd2207661c1b0258d7feb56b3d6788f12990 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | robinthibaut/flopy | 35af468415d1ba6e1de119a7cb335381304fada9 | 22ef330bcfb9259fc23735d6b174d27804b624a0 | refs/heads/develop | 2023-06-30T21:43:24.101593 | 2023-06-13T19:46:03 | 2023-06-13T19:46:03 | 255,560,877 | 0 | 0 | BSD-3-Clause | 2022-10-10T12:23:38 | 2020-04-14T09:05:42 | null | UTF-8 | Python | false | false | 3,153 | py | import os
import platform
from os import getcwd
from os.path import relpath, splitdrive
from pathlib import Path
from shutil import which
import pytest
from modflow_devtools.markers import requires_exe
from modflow_devtools.misc import set_dir
from flopy.utils.flopy_io import line_parse, relpath_safe
def test_line_parse():
"""t027 test line_parse method in MNW2 Package class"""
# ensure that line_parse is working correctly
# comment handling
line = line_parse("Well-A -1 ; 2a. WELLID,NNODES")
assert line == ["Well-A", "-1"]
@requires_exe("mf6")
@pytest.mark.parametrize("scrub", [True, False])
@pytest.mark.parametrize("use_paths", [True, False])
def test_relpath_safe(function_tmpdir, scrub, use_paths):
if (
platform.system() == "Windows"
and splitdrive(function_tmpdir)[0] != splitdrive(getcwd())[0]
):
if use_paths:
assert (
Path(relpath_safe(function_tmpdir))
== function_tmpdir.absolute()
)
assert relpath_safe(Path(which("mf6"))) == str(
Path(which("mf6")).absolute()
)
else:
assert (
Path(relpath_safe(str(function_tmpdir)))
== function_tmpdir.absolute()
)
assert relpath_safe(which("mf6")) == str(
Path(which("mf6")).absolute()
)
else:
if use_paths:
assert Path(
relpath_safe(function_tmpdir, function_tmpdir.parent)
) == Path(function_tmpdir.name)
assert (
Path(
relpath_safe(
function_tmpdir, function_tmpdir.parent.parent
)
)
== Path(function_tmpdir.parent.name) / function_tmpdir.name
)
assert relpath_safe(Path(which("mf6"))) == relpath(
Path(which("mf6")), Path(getcwd())
)
else:
assert Path(
relpath_safe(str(function_tmpdir), str(function_tmpdir.parent))
) == Path(function_tmpdir.name)
assert (
Path(
relpath_safe(
str(function_tmpdir),
str(function_tmpdir.parent.parent),
)
)
== Path(function_tmpdir.parent.name) / function_tmpdir.name
)
assert relpath_safe(which("mf6")) == relpath(
which("mf6"), getcwd()
)
# test user login obfuscation
with set_dir("/"):
try:
login = os.getlogin()
if use_paths:
p = relpath_safe(Path.home(), scrub=scrub)
else:
p = relpath_safe(str(Path.home()), scrub=scrub)
if login in str(Path.home()) and scrub:
assert "***" in p
assert login not in p
except OSError:
# OSError is possible in CI, e.g. 'No such device or address'
pass
| [
"noreply@github.com"
] | noreply@github.com |
ecab9c466f0cd944b8390cc5ff0302f9d8b7cbd3 | 7a1b0ac96ab98666ba29092c12378aca583e090e | /Content/Jay's Oled+Ultrasonic Sensor.py | c37979a1f0493c767b173a80421d3f807c674498 | [] | no_license | wendahere/JAWS | 12a2e571996c7b6ff81ebf13a8fc37c62a6e161b | 303800c42fa159bbb969eba475b99f49cc65dd16 | refs/heads/master | 2021-07-17T03:16:05.307792 | 2020-10-01T08:03:36 | 2020-10-01T08:03:36 | 215,438,213 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,981 | py | import machine
import sys
import time
from machine import Pin
import utime
from machine import Pin, I2C
import ssd1306
from time import sleep
trig=machine.Pin(25, machine.Pin.OUT) #Purple wire
echo=machine.Pin(26, machine.Pin.IN) #Blue wire
jaws_state="Stop" #Starting position is Stop, this will be reflected on the ESP html page
#Transducer portion
def depth(): #default is air
t1=0
t2=0
trig.off() #//stop reading
utime.sleep_us(2)
trig.on()
utime.sleep_us(10)
trig.off()
while echo.value() == 0:
pass
t1 = utime.ticks_us()
while echo.value() == 1:
pass
t2 = utime.ticks_us()
cm = ((t2 - t1)*(340/10000))/2;
print(cm)
utime.sleep(2)
return cm
cm = depth() #Run transducer once
def depthwood(): #wood depth
t1=0
t2=0
#Transducer portion
trig.off() #//stop reading
utime.sleep_us(2)
trig.on()
utime.sleep_us(10)
trig.off()
while echo.value() == 0:
pass
t1 = utime.ticks_us()
while echo.value() == 1:
pass
t2 = utime.ticks_us()
cmw = ((t2 - t1)*(12.25/10000))/2;
print(cmw)
utime.sleep(2)
return cmw
def depthice(): #ice depth
t1=0
t2=0
#Transducer portion
trig.off() #//stop reading
utime.sleep_us(2)
trig.on()
utime.sleep_us(10)
trig.off()
while echo.value() == 0:
pass
t1 = utime.ticks_us()
while echo.value() == 1:
pass
t2 = utime.ticks_us()
cmi = ((t2 - t1)*(4000/10000))/2;
print(cmi)
utime.sleep(2)
return cmi
i2c = I2C(-1, scl=Pin(22), sda=Pin(21))#SDA Yellow & SCL White; 3.3V Red & Gnd Black
oled_width = 128
oled_height = 64
oled = ssd1306.SSD1306_I2C(oled_width, oled_height, i2c)
cm = round(depth())
def display():
oled.text(' '+ str(cm), 0, 0)
oled.text('NOT SAFE', 0, 10)
oled.text('NOT SAFE', 0, 20)
oled.text('NOT SAFE', 0, 30)
oled.show()
display()
| [
"noreply@github.com"
] | noreply@github.com |
efb162f0cd66acb5dedda151c7f0b21528f0fffe | bee7dc3fd989fa3b7d79a1a4d58255d8a2019cb9 | /categories/utils.py | 8b372ce17ce801dd7fb730195e8c80f5abdec229 | [] | no_license | philipp94831/mmds | c7a10dabc204b6c9f640c508508068ef298645a8 | 6d1ae5776996c4235a3c9a517286d72eeaf6237a | refs/heads/master | 2020-04-06T07:06:45.144039 | 2016-08-31T08:43:12 | 2016-08-31T08:43:12 | 59,962,975 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | import sys
def uprint(*objects, sep=' ', end='\n', file=sys.stdout):
enc = file.encoding
if enc == 'UTF-8':
print(*objects, sep=sep, end=end, file=file)
else:
f = lambda obj: str(obj).encode(enc, errors='backslashreplace').decode(enc)
print(*map(f, objects), sep=sep, end=end, file=file)
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix) | [
"richard.trebichavsky@gmail.com"
] | richard.trebichavsky@gmail.com |
06746b9cdd028e85cf95b1206bf2464f527454c1 | f5533537b9e1b79afe8f034ebcd453bfe8adab5d | /0x07-python-test_driven_development/6-main.py | 9f21b3c501094ffe4261da7c20e7fca8c21d9010 | [] | no_license | Matteo-lu/holbertonschool-higher_level_programming | 99d06bf4ed6b4ca770a45d30e3158a18ad7d5a0b | 77311f452e8d62145b5e7afe151557ed7a6d210a | refs/heads/main | 2023-09-03T09:55:12.144861 | 2021-09-25T14:47:47 | 2021-09-25T14:47:47 | 361,899,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | #!/usr/bin/python3
max_integer = __import__('6-max_integer').max_integer
print(max_integer([1.1, 2.2, 4.4, 6.6]))
print(max_integer([1, 3, 4, 2]))
| [
"mateolondono.u@gmail.com"
] | mateolondono.u@gmail.com |
d9642c6dc06e274de14cd94b6e2fc5eaadf3a6fb | a98f696107817c68f058ff4148276372078940e0 | /BERT/src/dataset.py | 8005aefa81ec9c3f7210bb6a58922763c81eba29 | [] | no_license | MiuLab/CQA-Study | 90d57e4356d39cc9b82039dd544149fdf25c8866 | 4a8f72dd43c5cf1937883336670180b6a11e4762 | refs/heads/master | 2022-04-01T17:46:21.066198 | 2020-02-03T12:38:52 | 2020-02-03T12:38:52 | 222,895,519 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,763 | py | import numpy as np
import torch
from torch.utils.data import Dataset
class QuACDataset(Dataset):
def __init__(self, data, padding=0,
context_truncated_len=400,
utterance_truncated_len=100):
self.padding = padding
self.context_truncated_len = context_truncated_len
self.turn_truncated_len = utterance_truncated_len
self.data = []
for sample in data:
for paragraph in sample['paragraphs']:
for i, qa in enumerate(paragraph['qas']):
self.data.append({
'id': qa['id'],
'context_raw': paragraph['context_raw'],
'context_tokenized': paragraph['context_tokenized'],
'context': paragraph['context'],
'context_offset': paragraph['context_offset'],
'context_len': len(paragraph['context']),
'followup': 'mny'.index(qa['followup']),
'yesno': 'xny'.index(qa['yesno']),
'questions_tokenized': [
qa['question_tokenized']
for qa in paragraph['qas'][:i + 1]
],
'questions': [
qa['question']
for qa in paragraph['qas'][:i + 1]
],
'question_lens': [
len(qa['question'])
for qa in paragraph['qas'][:i + 1]
],
'answers_raw': [paragraph['qas'][i]['orig_answer_raw']],
'answers': [
qa['orig_answer_text']
for qa in paragraph['qas'][:i + 1]
],
'answer_lens': [
len(qa['orig_answer_text'])
for qa in paragraph['qas'][:i + 1]
],
'answer_spans': [
[qa['orig_answer_start'], qa['orig_answer_end']]
for qa in paragraph['qas'][:i + 1]
],
'answer_indicator': qa['answer_indicator'],
'turn_offset': i
})
if 'answers' in qa:
self.data[-1]['answers_raw'] += \
[ans['raw'] for ans in qa['answers']]
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def collate_fn(self, samples):
samples = [s for s in samples
if s['answer_spans'][-1][0] < self.context_truncated_len
and s['answer_spans'][-1][1] < self.context_truncated_len]
batch = {}
for key in ['id', 'context_raw', 'context_offset', 'context_len',
'answers_raw', 'answers', 'answer_lens', 'answer_spans',
'question_lens', 'questions', 'turn_offset', 'context_tokenized',
'questions_tokenized']:
if key == 'answers' and (len(samples) == 0 or key not in samples[0]):
continue
batch[key] = [sample[key]
for sample in samples]
for key in ['followup', 'yesno']:
batch[key] = torch.tensor(
[sample[key]
for sample in samples]
).long()
for key in ['context', 'answer_indicator']:
# compatible with older code
if key in ['answer_indicator'] and key not in samples[0]:
continue
batch[key] = torch.tensor([
pad_to_len(
sample[key],
min(
self.context_truncated_len,
max(batch['context_len'])
),
self.padding
)
for sample in samples
]).long()
for key in ['context_tokenized']:
batch[key] = [sample['context_tokenized'][:self.context_truncated_len]
for sample in samples]
return batch
def pad_to_len(seq, padded_len, padding=0):
""" Pad sequences to min(max(lens), padded_len).
Sequence will also be truncated if its length is greater than padded_len.
Args:
seqs (list)
padded_len (int)
padding (int)
Return:
seqs (list)
"""
padded = [padding] * padded_len
n_copy = min(len(seq), padded_len)
padded[:n_copy] = seq[:n_copy]
return padded
| [
"b03902072@ntu.edu.tw"
] | b03902072@ntu.edu.tw |
4371051b460fbdb7f7e35435ddd12876a32f7a6e | 21b0b4c27193898207751c91b8b2ed168a1b1638 | /py/py_0198_ambiguous_numbers.py | ca312ac6f6f03c67908c6bd6ae8705a25e557c7b | [
"MIT"
] | permissive | lcsm29/project-euler | 67560a4e66968f1671a3d7ecf2dda6c956893dca | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | refs/heads/main | 2023-07-04T11:45:24.374841 | 2021-08-07T08:20:41 | 2021-08-07T08:20:41 | 371,808,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | # Solution of;
# Project Euler Problem 198: Ambiguous Numbers
# https://projecteuler.net/problem=198
#
# A best approximation to a real number $x$ for the denominator bound $d$ is a
# rational number $\frac r s$ (in reduced form) with $s \le d$, so that any
# rational number $\frac p q$ which is closer to $x$ than $\frac r s$ has $q >
# d$. Usually the best approximation to a real number is uniquely determined
# for all denominator bounds. However, there are some exceptions, e. g. $\frac
# 9 {40}$ has the two best approximations $\frac 1 4$ and $\frac 1 5$ for the
# denominator bound $6$. We shall call a real number $x$ ambiguous, if there
# is at least one denominator bound for which $x$ possesses two best
# approximations. Clearly, an ambiguous number is necessarily rational. How
# many ambiguous numbers $x=\frac p q, 0 < x < \frac 1 {100}$, are there whose
# denominator $q$ does not exceed $10^8$?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 198
timed.caller(dummy, n, i, prob_id)
| [
"lcsm29@outlook.com"
] | lcsm29@outlook.com |
cac2318a8b307ad741c58dda75e970b204bed67a | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_4/bgrtej001/piglatin.py | 8a3ae80180102da97b97c2eee4594a3e8512b2c3 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | #Tejasvin Bagirathi
#Assignment 4, Question 3
def toPigLatin(s):
wrdno = 1
new = ""
for i in range(len(s)):
if s[i] == " ":
wrdno+=1
string = s.split(" ")
for i in range(wrdno):
wrd = string[i]
#If word starts with vowel
if wrd[0] in "aeiou":
wrd = wrd + "way"
if wrdno == i:
new += wrd
else:
new += wrd + " "
else:
k = 0
for c in wrd[:]:
if c not in "aeiou":
k+=1
else: break
wrd = wrd[k:len(wrd)] + "a" + wrd[0:k] + "ay"
if wrdno == i:
new += wrd
else:
new += wrd + " "
return new
def toEnglish(s):
sentence=s.split()
newsentence=""
for word in range(len(sentence)):
if sentence[word][-3:]=="way":
newsentence+=sentence[word][:-3]+" "
elif sentence[word][-2:]=="ay":
nWord=sentence[word][:-2]
aPos=nWord.rfind("a")
newsentence+=nWord[aPos+1:]+nWord[:aPos]+" "
return(newsentence)
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
0ea8b20ca248bdf1880742e069342724e1b07a53 | 21f01e2a697f1ec37d49934a46084882d700d9f8 | /nhom-3/source/item/models.py | 2b1731a0c869b9c32b2318dfa252fc6e856cfeb0 | [] | no_license | trunganhvu/INT2208-8-2019 | 362b1238e8568876fa0b09cfccfac0b3edb47225 | 58867a3844cbc2633516082f8dae198523b76d2d | refs/heads/master | 2020-04-28T08:57:21.570832 | 2019-05-12T17:58:07 | 2019-05-12T17:58:07 | 170,449,725 | 1 | 0 | null | 2019-02-13T06:01:43 | 2019-02-13T06:01:40 | null | UTF-8 | Python | false | false | 563 | py | from django.db import models
from django.contrib.auth.models import User
from user.models import Restaurant
# Create your models here.
class Comment(models.Model):
userId = models.OneToOneField(User, on_delete=models.CASCADE)
content = models.CharField
def __str__(self):
return content
class Item(models.Model):
name = models.CharField(max_length=50)
restaurant_id = models.ForeignKey(Restaurant, on_delete=models.CASCADE)
price = models.IntegerField
rating = models.FloatField
def __str__(self):
return name
| [
"conglb@yahoo.com"
] | conglb@yahoo.com |
80581b80f9d902d4e72466d2931fc4f87459aa48 | e672677f921d11f59c6a83dcbb8b947330730c5d | /source/incqueryserver-jupyter/iqs_jupyter/mms_extensions.py | 3d23072c7e2f4ddde1ad7f11044a8666dca919d1 | [
"Apache-2.0"
] | permissive | IncQueryLabs/incquery-server-jupyter | 4e1cc36c2643c5ba324e2f98618a5d4912bbe6bf | 40072eb8cdfd60ba36783faf5bef4f88e5342f8a | refs/heads/master | 2023-08-17T01:10:06.501516 | 2021-10-21T09:51:41 | 2021-10-21T09:51:41 | 187,173,984 | 2 | 1 | Apache-2.0 | 2023-02-02T08:43:24 | 2019-05-17T08:04:37 | Python | UTF-8 | Python | false | false | 10,410 | py | # Copyright 2019 IncQuery Labs Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Created on 2019. máj. 24.
@author: Gabor Bergmann
'''
import ipywidgets as widgets
from IPython.display import display
import iqs_client
import iqs_jupyter.config_defaults as defaults
import iqs_jupyter.tool_extension_point as ext_point
class MMCCommitSelectorWidget:
def __init__(
self,
iqs,
initial_org = defaults.default_mms_org,
initial_project = defaults.default_mms_project,
initial_ref = defaults.default_mms_ref,
initial_commit = defaults.default_mms_commit,
auto_display=True
):
self.iqs = iqs
self.org_widget = widgets.Dropdown(description='')
self.project_widget = widgets.Dropdown(description='')
self.ref_widget = widgets.Dropdown(description='')
self.commit_widget = widgets.Dropdown(description='')
self.box = widgets.HBox([
widgets.VBox([widgets.Label(value="org"), widgets.Label(value="project"), widgets.Label(value="ref"),
widgets.Label(value="commit")]),
widgets.VBox([self.org_widget, self.project_widget, self.ref_widget, self.commit_widget])
])
self.fetch_contents()
def org_handler(change):
new_idx = change['new']
old_idx = change['old']
owner = change['owner']
if (new_idx == old_idx):
pass
elif (not new_idx):
self._reset_project()
elif (owner.disabled):
pass
else:
label, _id = owner.options[new_idx] # @UnusedVariable
org = self.org_map[_id]
self._setup_project(org.projects)
def project_handler(change):
new_idx = change['new']
old_idx = change['old']
owner = change['owner']
if (new_idx == old_idx):
pass
elif (not new_idx):
self._reset_ref()
elif (owner.disabled):
pass
else:
label, _id = self.project_widget.options[new_idx] # @UnusedVariable
project = self.project_map[_id]
self._setup_ref(project.refs)
def ref_handler(change):
new_idx = change['new']
old_idx = change['old']
owner = change['owner']
if (new_idx == old_idx):
pass
elif (not new_idx):
self._reset_commit()
elif (owner.disabled):
pass
else:
label, _id = self.ref_widget.options[new_idx] # @UnusedVariable
ref = self.ref_map[_id]
self._setup_commit(ref.commits)
self.org_widget.observe(names='index', handler=org_handler)
self.project_widget.observe(names='index', handler=project_handler)
self.ref_widget.observe(names='index', handler=ref_handler)
if initial_org:
self.org_widget.value = initial_org
if initial_project:
self.project_widget.value = initial_project
if initial_ref:
self.ref_widget.value = initial_ref
if initial_commit:
self.commit_widget.value = initial_commit
if auto_display: self.display()
def _reset_project(self):
self.project_map = None
self.project_widget.disabled = True
self.project_widget.options = [('', None)]
self.project_widget.index = 0
# self._reset_ref()
def _reset_ref(self):
self.ref_map = None
self.ref_widget.disabled = True
self.ref_widget.options = [('', None)]
self.ref_widget.index = 0
# self._reset_commit()
def _reset_commit(self):
self.commit_map = None
self.commit_widget.disabled = True
self.commit_widget.options = [('', None)]
self.commit_widget.index = 0
def _setup_org(self, org_list):
import collections
org_list_sorted = sorted(org_list, key=lambda org: org.name)
self.org_map = collections.OrderedDict(
[(org.org_id, org) for org in org_list_sorted])
self.org_widget.options = [('---- Select org', None)] + [
(
"{} (ID: {})".format(org.name, _id),
_id
)
for _id, org in self.org_map.items()
]
self.org_widget.index = 0
self.org_widget.disabled = False
# self._reset_project()
def _setup_project(self, project_list):
import collections
project_list_sorted = sorted(project_list, key=lambda project: project.name)
self.project_map = collections.OrderedDict([(project.project_id, project) for project in project_list_sorted])
self.project_widget.options = [('---- Select project', None)] + [
(
"{} (ID: {})".format(project.name, _id),
_id
)
for _id, project in self.project_map.items()
]
self.project_widget.index = 0
self.project_widget.disabled = False
# self._reset_ref()
def _setup_ref(self, ref_list):
import collections
ref_list_sorted = sorted(ref_list, key=lambda ref: ref.name)
self.ref_map = collections.OrderedDict([(ref.ref_id, ref) for ref in ref_list_sorted])
self.ref_widget.options = [('---- Select ref', None)] + [
(
"{} (ID: {})".format(ref.name, _id),
_id
)
for _id, ref in self.ref_map.items()
]
self.ref_widget.index = 0
self.ref_widget.disabled = False
# self._reset_commit()
def _setup_commit(self, commit_list):
import collections
commit_list_sorted = sorted(commit_list, key=lambda commit: commit['name'], reverse=True)
self.commit_map = collections.OrderedDict([(commit['commitId'], commit) for commit in commit_list_sorted])
self.commit_widget.options = [('---- Select commit', None)] + [
(
"{} (ID: {})".format(commit['name'], _id),
_id
)
for _id, commit in self.commit_map.items()
]
self.commit_widget.index = 0
self.commit_widget.disabled = False
def fetch_contents(self):
repo_info = self.iqs.mms_repository.get_mms_repository_info()
org_list = repo_info.repository_structure.orgs
self._setup_org(org_list)
def display(self):
display(self.box)
def _repr_html_(self):
self.display()
def value(self):
if (self.commit_widget.index != 0 and
self.ref_widget.index != 0 and
self.project_widget.index != 0 and
self.org_widget.index != 0
):
return iqs_client.MMSCommitDescriptor.from_fields(
name=self.commit_map[self.commit_widget.value]['name'],
commit_id=self.commit_widget.value,
ref_id=self.ref_widget.value,
project_id=self.project_widget.value,
org_id=self.org_widget.value
)
else:
return None
def value_as_model_compartment(self):
value = self.value()
if value:
return value.to_model_compartment()
else:
return None
# monkey patch section
def _monkey_patch_static_mms_commit_descriptor_from_compartment_uri_or_none(compartment_uri):
segments = compartment_uri.split('/')
if (
9 != len(segments) or
segments[0] != "mms-index:" or
segments[1] != "orgs" or
segments[3] != "projects" or
segments[5] != "refs" or
segments[7] != "commits"
):
return None
return iqs_client.MMSCommitDescriptor(
org_id = segments[2],
project_id = segments[4],
ref_id = segments[6],
commit_id = segments[8],
compartment_uri = compartment_uri
)
def _monkey_patch_mms_commit_to_compartment_uri(self):
return "mms-index:/orgs/{}/projects/{}/refs/{}/commits/{}".format(
self.org_id,
self.project_id,
self.ref_id,
self.commit_id
)
def _mms_compartment_uri(org_id, project_id, ref_id, commit_id):
return "mms-index:/orgs/{}/projects/{}/refs/{}/commits/{}".format(
org_id,
project_id,
ref_id,
commit_id
)
def _monkey_patch_static_mms_commit_descriptor_from_fields(org_id, project_id, ref_id, commit_id, name = None):
return iqs_client.MMSCommitDescriptor(
name=name,
commit_id=commit_id,
ref_id=ref_id,
project_id=project_id,
org_id=org_id,
compartment_uri=_mms_compartment_uri(org_id, project_id, ref_id, commit_id)
)
def _monkey_patch_mms_commit_to_model_compartment(self):
return iqs_client.ModelCompartment(compartment_uri=self.to_compartment_uri())
def _monkey_patch_jupytertools_mms_commit_selector_widget(self, **kwargs):
return MMCCommitSelectorWidget(iqs=self._iqs, **kwargs)
def _do_monkey_patching():
iqs_client.MMSCommitDescriptor.from_compartment_uri_or_none = staticmethod(_monkey_patch_static_mms_commit_descriptor_from_compartment_uri_or_none)
iqs_client.MMSCommitDescriptor.from_fields = staticmethod(_monkey_patch_static_mms_commit_descriptor_from_fields)
iqs_client.MMSCommitDescriptor.to_compartment_uri = _monkey_patch_mms_commit_to_compartment_uri
iqs_client.MMSCommitDescriptor.to_model_compartment = _monkey_patch_mms_commit_to_model_compartment
ext_point.IQSJupyterTools.mms_commit_selector_widget = _monkey_patch_jupytertools_mms_commit_selector_widget
_do_monkey_patching()
| [
"gabor.bergmann@incquerylabs.com"
] | gabor.bergmann@incquerylabs.com |
929415e28cd27f08856ade898c069d066e6a7851 | 80000409c75d68f15f04d10075df7abae6b1c244 | /stamper_targets/Wedge100B65/bfrt_grpc/bfruntime_pb2_grpc.py | 0178b1671a5179a99f03ff074ff10281f3401dcb | [
"Apache-2.0"
] | permissive | ralfkundel/P4STA | a778ca1b9bbe2f4063da5fa25affec90c6d7ca85 | a9fea4d7e48de05e17b9da14e5c31455a9f00f9d | refs/heads/master | 2023-07-21T18:37:57.558802 | 2023-04-13T11:58:42 | 2023-04-13T11:58:42 | 186,822,141 | 27 | 7 | Apache-2.0 | 2023-07-05T21:30:57 | 2019-05-15T12:30:40 | JavaScript | UTF-8 | Python | false | false | 9,014 | py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import bfruntime_pb2 as bfruntime__pb2
class BfRuntimeStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Write = channel.unary_unary(
'/bfrt_proto.BfRuntime/Write',
request_serializer=bfruntime__pb2.WriteRequest.SerializeToString,
response_deserializer=bfruntime__pb2.WriteResponse.FromString,
)
self.Read = channel.unary_stream(
'/bfrt_proto.BfRuntime/Read',
request_serializer=bfruntime__pb2.ReadRequest.SerializeToString,
response_deserializer=bfruntime__pb2.ReadResponse.FromString,
)
self.SetForwardingPipelineConfig = channel.unary_unary(
'/bfrt_proto.BfRuntime/SetForwardingPipelineConfig',
request_serializer=bfruntime__pb2.SetForwardingPipelineConfigRequest.SerializeToString,
response_deserializer=bfruntime__pb2.SetForwardingPipelineConfigResponse.FromString,
)
self.GetForwardingPipelineConfig = channel.unary_unary(
'/bfrt_proto.BfRuntime/GetForwardingPipelineConfig',
request_serializer=bfruntime__pb2.GetForwardingPipelineConfigRequest.SerializeToString,
response_deserializer=bfruntime__pb2.GetForwardingPipelineConfigResponse.FromString,
)
self.StreamChannel = channel.stream_stream(
'/bfrt_proto.BfRuntime/StreamChannel',
request_serializer=bfruntime__pb2.StreamMessageRequest.SerializeToString,
response_deserializer=bfruntime__pb2.StreamMessageResponse.FromString,
)
class BfRuntimeServicer(object):
"""Missing associated documentation comment in .proto file."""
def Write(self, request, context):
"""Update one or more P4 entities on the target.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Read(self, request, context):
"""Read one or more P4 entities from the target.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetForwardingPipelineConfig(self, request, context):
"""Sets the P4 fowarding-pipeline config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetForwardingPipelineConfig(self, request, context):
"""Gets the current P4 fowarding-pipeline config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamChannel(self, request_iterator, context):
"""Represents the bidirectional stream between the controller and the
switch (initiated by the controller).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BfRuntimeServicer_to_server(servicer, server):
rpc_method_handlers = {
'Write': grpc.unary_unary_rpc_method_handler(
servicer.Write,
request_deserializer=bfruntime__pb2.WriteRequest.FromString,
response_serializer=bfruntime__pb2.WriteResponse.SerializeToString,
),
'Read': grpc.unary_stream_rpc_method_handler(
servicer.Read,
request_deserializer=bfruntime__pb2.ReadRequest.FromString,
response_serializer=bfruntime__pb2.ReadResponse.SerializeToString,
),
'SetForwardingPipelineConfig': grpc.unary_unary_rpc_method_handler(
servicer.SetForwardingPipelineConfig,
request_deserializer=bfruntime__pb2.SetForwardingPipelineConfigRequest.FromString,
response_serializer=bfruntime__pb2.SetForwardingPipelineConfigResponse.SerializeToString,
),
'GetForwardingPipelineConfig': grpc.unary_unary_rpc_method_handler(
servicer.GetForwardingPipelineConfig,
request_deserializer=bfruntime__pb2.GetForwardingPipelineConfigRequest.FromString,
response_serializer=bfruntime__pb2.GetForwardingPipelineConfigResponse.SerializeToString,
),
'StreamChannel': grpc.stream_stream_rpc_method_handler(
servicer.StreamChannel,
request_deserializer=bfruntime__pb2.StreamMessageRequest.FromString,
response_serializer=bfruntime__pb2.StreamMessageResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'bfrt_proto.BfRuntime', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class BfRuntime(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Write(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/bfrt_proto.BfRuntime/Write',
bfruntime__pb2.WriteRequest.SerializeToString,
bfruntime__pb2.WriteResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Read(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/bfrt_proto.BfRuntime/Read',
bfruntime__pb2.ReadRequest.SerializeToString,
bfruntime__pb2.ReadResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetForwardingPipelineConfig(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/bfrt_proto.BfRuntime/SetForwardingPipelineConfig',
bfruntime__pb2.SetForwardingPipelineConfigRequest.SerializeToString,
bfruntime__pb2.SetForwardingPipelineConfigResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetForwardingPipelineConfig(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/bfrt_proto.BfRuntime/GetForwardingPipelineConfig',
bfruntime__pb2.GetForwardingPipelineConfigRequest.SerializeToString,
bfruntime__pb2.GetForwardingPipelineConfigResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def StreamChannel(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/bfrt_proto.BfRuntime/StreamChannel',
bfruntime__pb2.StreamMessageRequest.SerializeToString,
bfruntime__pb2.StreamMessageResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| [
"ralf.kundel@kom.tu-darmstadt.de"
] | ralf.kundel@kom.tu-darmstadt.de |
0c60aa2fb9f53122c19b8224e91ee6cccdc8ee08 | 41976ba2c94759b16f165236cbce8616bb2adeb8 | /eventex/core/tests/test_views_talk_list.py | 5e68a215dffaf3bb6a4734970a341233198ba44f | [] | no_license | AdeilsonF/eventex-adeilson | 6d194849573ca350c94e80ee2f013a7ee699ab72 | cbca9bfbeae19c65e28b7fb32f94f23f7a38f260 | refs/heads/master | 2020-12-24T14:56:54.911529 | 2014-06-13T20:40:01 | 2014-06-13T20:40:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,012 | py | # coding: utf-8
from django.test import TestCase
from django.core.urlresolvers import reverse as r
from eventex.core.models import Speaker, Talk
class TalkListTest(TestCase):
def setUp(self):
s = Speaker.objects.create(name='Henrique Bastos',
slug='henrique-bastos',
url='http://henriquebastos.net',
description='Passionate software developer!')
t1 = Talk.objects.create(description=u'Descrição da palestra',
title=u'Título da palestra',
start_time='10:00')
t2 = Talk.objects.create(description=u'Descrição da palestra',
title=u'Título da palestra',
start_time='13:00')
t1.speakers.add(s)
t2.speakers.add(s)
self.resp = self.client.get(r('core:talk_list'))
def test_get(self):
self.assertEqual(200, self.resp.status_code)
def test_template(self):
'Template should be core/talk_list.html'
self.assertTemplateUsed(self.resp, 'core/talk_list.html')
def test_html(self):
'Html should list talks.'
self.assertContains(self.resp, u'Título da palestra', 2)
self.assertContains(self.resp, u'10:00')
self.assertContains(self.resp, u'13:00')
self.assertContains(self.resp, u'/palestras/1/')
self.assertContains(self.resp, u'/palestras/2/')
self.assertContains(self.resp, u'/palestrantes/henrique-bastos/', 2)
self.assertContains(self.resp, u'Passionate software developer', 2)
self.assertContains(self.resp, u'Henrique Bastos', 2)
self.assertContains(self.resp, u'Descrição da palestra', 2)
def test_morning_talks_in_context(self):
self.assertIn('morning_talks', self.resp.context)
def test_afternoon_talks_in_context(self):
self.assertIn('afternoon_talks', self.resp.context) | [
"adeilsono.oinfo@gmail.com"
] | adeilsono.oinfo@gmail.com |
bf87b37a2e04bb39ba5a09c86b581bd34be15a03 | cde373aef58da4226bfadee3d1a7086d22f33414 | /Matplotlib/20-AddingMoreIndicatorData.py | 6deebcb3c6cd32cf086b49db0aff5da22174f70c | [] | no_license | ravi4all/ML_WeekEnd_Feb | 6c66c6e6845062928834986980e5c229a19da6cd | 43891ff36cfcd557861b4eebb99c44c68d24954e | refs/heads/master | 2021-01-09T06:10:34.007131 | 2017-06-12T03:57:54 | 2017-06-12T03:57:54 | 80,917,805 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,150 | py | import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
from matplotlib.finance import candlestick_ohlc
from matplotlib import style
import numpy as np
import urllib
import datetime as dt
#style.use('ggplot')
style.use('fivethirtyeight')
MA1 = 10
MA2 = 30
# Will give the moving average
def moving_average(values, window):
weights = np.repeat(1.0, window)/window
smas = np.convolve(values, weights, 'valid')
return smas
def high_minus_low(highs, lows):
return highs-lows
# fmt - format
def bytespdate2num(fmt, encoding='utf-8'):
strconverter = mdates.strpdate2num(fmt)
def bytesconverter(b):
a = b.decode(encoding)
return strconverter(a)
return bytesconverter
def graph_data(stock):
fig = plt.figure()
# ax1 is a subplot
ax1 = plt.subplot2grid((6,1),(0,0), rowspan=1, colspan=1)
plt.title(stock)
ax2 = plt.subplot2grid((6,1),(1,0), rowspan=4, colspan=1)
plt.xlabel('Date')
plt.ylabel('Price')
ax3 = plt.subplot2grid((6,1),(5,0), rowspan=1, colspan=1)
stock_price_url = 'http://chartapi.finance.yahoo.com/instrument/1.0/'+stock+'/chartdata;type=quote;range=1y/csv'
source_code = urllib.request.urlopen(stock_price_url).read().decode()
stock_data = []
split_source = source_code.split('\n')
for line in split_source:
split_line = line.split(',')
if len(split_line) == 6:
if 'values' not in line and 'labels' not in line:
stock_data.append(line)
date, closep, highp, lowp, openp, volume = np.loadtxt(stock_data,
delimiter = ',',
unpack = True,
converters={0: bytespdate2num('%Y%m%d')})
x = 0
y = len(date)
# OHLC : open high low close
ohlc = []
while x < y:
append_me = date[x], openp[x], highp[x], lowp[x], closep[x], volume[x]
ohlc.append(append_me)
x += 1
ma1 = moving_average(closep, MA1)
ma2 = moving_average(closep, MA2)
start = len(date[MA2-1:])
h_l = list(map(high_minus_low, highp, lowp))
ax1.plot_date(date, h_l, '-')
candlestick_ohlc(ax2, ohlc, width=0.4, colorup='g', colordown='r')
for label in ax2.xaxis.get_ticklabels():
label.set_rotation(45)
ax2.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax2.xaxis.set_major_locator(mticker.MaxNLocator(10))
ax2.grid(True)
bbox_props = dict(boxstyle='larrow', fc='w', ec='k', lw=1)
# to display the last price
ax2.annotate(str(closep[-1]), (date[-1], closep[-1]),
xytext = (date[-1]+4, closep[-1]), bbox=bbox_props)
ax3.plot(date[-start:], ma1[-start:])
ax3.plot(date[-start:], ma2[-start:])
#plt.legend()
plt.subplots_adjust(left=0.11, bottom=0.24, right=0.90, top=0.90, wspace=0.2, hspace=0)
plt.show()
graph_data('ebay')
| [
"noreply@github.com"
] | noreply@github.com |
cee714270ac9da346b73a254f76e2c64e3ffa383 | 2f361629eff8a16a35a3b37ad8959e22077adfb4 | /lab3and4/ecommerce/serialize.py | d91ae279c636fd638a6d4e53bb1d7a27ebe7ce98 | [] | no_license | alexiscollado/alexiscollado.github.io | 5dfe82ed43a399e089f537b603ad77a6a8ff7a7c | 07bad41f92d24e31a9933991b02a9d560274de57 | refs/heads/master | 2021-05-02T10:16:38.117572 | 2018-04-30T15:50:39 | 2018-04-30T15:50:39 | 120,792,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | from .models import User, Product, Cart
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('email', 'first_name', 'last_name', 'shipping_address')
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ('price', 'name', 'description')
class CartSerializer(serializers.ModelSerializer):
class Meta:
model = Cart
fields = ('cart_code', 'total_price', 'created_on', 'updated_on', 'has_paid') | [
"lex.collado@gmail.com"
] | lex.collado@gmail.com |
2676c8b70cc62e532379d2c46e363e54f2d94d14 | 97999ecca9e50972cc9e80df27d4768d83498dba | /credentials/migrations/0008_aboutme_resume.py | 0ca7767e117e9ddde7e97056617a2f2465605750 | [] | no_license | apatten001/portfolio | c79312d13f7a75f909e2d4d66ab6ef275b69543e | 4fdb503afccea83b849b62e3b12539e25a0b722f | refs/heads/master | 2020-04-25T05:45:20.946946 | 2019-03-07T16:53:00 | 2019-03-07T16:53:00 | 172,554,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # Generated by Django 2.1.5 on 2019-02-27 19:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('credentials', '0007_auto_20190128_1630'),
]
operations = [
migrations.AddField(
model_name='aboutme',
name='resume',
field=models.FileField(default='Arnold_Resume.pdf', upload_to=''),
),
]
| [
"apatten001@yahoo.com"
] | apatten001@yahoo.com |
105d384348c813c95cee9ab8d72d6d04d7edae30 | e8d03c56048a02df607ea5e48660285175174ee7 | /bbs/start.py | a20be8d8021280f9766115058c0bb1a307843cc6 | [] | no_license | Alex-wwei/Tornado | acf3b4b996bcf2fabf299021ad9b96eb6d49d8b5 | 0448c075ce0cf1c6b2b3d1de3cc0d83bee6792cc | refs/heads/master | 2022-06-16T01:42:44.167203 | 2017-07-20T02:35:22 | 2017-07-20T02:35:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,220 | py | import tornado.web, tornado.ioloop, re
class BaseField(object):
def __init__(self, error_dict, required = True, ):
self.error_dict = {}
if error_dict:
self.error_dict.update(error_dict)
self.error = None
self.required = required
self.is_valid = False
self.vlaue = None
def validate(self, name, input_value):
if not self.required:
self.is_valid = True
else:
if name != 'check_box':
input_value = input_value.strip()
if len(input_value) == 0:
self.error = name + self.error_dict['required']
else:
if name == 'check_box':
self.is_valid = True
else:
if re.match(self.REGULAR, input_value):
self.is_valid = True
else:
self.error = name + self.error_dict['valid']
return self.is_valid, self.error
class IPField(BaseField):
REGULAR = "^(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|[1-9])\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)\\."+"(1\\d{2}|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)$"
class PHONEField(BaseField):
REGULAR = '^1[3|4|5|8]\d{9}$'
class MAILField(BaseField):
REGULAR = "^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$"
class CHECK_BOX_Field(BaseField):
REGULAR = None
class Base_check(object):
def check_values(self, handle):
flag = True
content = {}
for key, regular in self.__dict__.items():
if key == 'check_box':
value = handle.get_arguments(key)
else:
value = handle.get_argument(key)
info, error = regular.validate(key, value)
if info:
content[key] = value
else:
content[key] = error
flag = info
return flag, content
class Check_value(Base_check):
def __init__(self):
self.IP = IPField(required=True, error_dict={'required':'不能为空哦!','valid':'输入格式错误!'})
self.PHONE = PHONEField(required=True, error_dict={'required':'不能为空哦!','valid':'输入格式错误!'})
self.MAIL = MAILField(required=True, error_dict={'required':'不能为空哦!','valid':'输入格式错误!'})
self.check_box = CHECK_BOX_Field(required=True, error_dict={'required':'不能为空哦!','valid':'输入格式错误!'})
class Indexhandle(tornado.web.RequestHandler):
def get(self):
self.render('index.html')
def post(self, *args, **kwargs):
check = Check_value()
flag, content = check.check_values(self)
print(content)
if flag:
self.write('success')
else:
self.write('fault')
settings = {
'template_path':'views',
'static_path':'statics',
}
if __name__ == '__main__':
app = tornado.web.Application([
(r'/index', Indexhandle),
], **settings)
app.listen(8080)
tornado.ioloop.IOLoop.instance().start() | [
"starhub2009@qq.com"
] | starhub2009@qq.com |
0a8d575b2b400d7251a2163fe1bd61be8ed51ca1 | 5dcb7913895f5892dd945c015920759e1dee7dc2 | /Solutions/day5.py | 78557e74dd6f7b3f83dfae8bdbfeaa749e972547 | [] | no_license | atg-abhishek/adventofcode2015 | a05d7e1f50533fbc03597eb736eb70799bfd42dd | 499c77f2bfa7e48ebae02e0bf94cb17bd5367cdd | refs/heads/master | 2021-08-19T12:09:08.855786 | 2017-11-26T05:04:46 | 2017-11-26T05:04:46 | 112,049,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,536 | py | from pprint import pprint
li = []
with open('../Data/day5.txt') as infile:
li = infile.readlines()
def part1():
nice_strings = 0
for line in li:
vowel_count = 0
double_present = False
for char in line:
if char in ['a','e', 'i', 'o', 'u']:
vowel_count+=1
temp_line = line[1:]
for ind, char in enumerate(temp_line):
if char==line[ind]: #because the ind value starts one ahead, to look at the previous character in the original string we don't have to do -1
double_present = True
break
if vowel_count>=3 and double_present:
forbidden_strings = ['ab', 'cd', 'pq','xy']
forbidden_present = False
for fs in forbidden_strings:
if fs in line:
forbidden_present = True
break
if not forbidden_present:
nice_strings += 1
def part2():
nice_strings = 0
for line in li:
duplicate_present = False
single_repeat = False
for idx, char in enumerate(line[:-2]):
if line[idx]+line[idx+1] in line[idx+2:]:
duplicate_present = True
break
for idx, char in enumerate(line[:-2]):
if char==line[idx+2] and char!=line[idx+1]:
single_repeat = True
break
if single_repeat and duplicate_present:
nice_strings+=1
pprint(nice_strings)
part2() | [
"atg.abhishek@gmail.com"
] | atg.abhishek@gmail.com |
d3b5d2220dfd64a054fc44c58b941464e11c9a62 | bb2b6422476f5bd80171a31517465f9f62e15558 | /catkin_ws/build/scan_tools/laser_ortho_projector/catkin_generated/pkg.installspace.context.pc.py | a7beec3bd992862819cd8c913a124d3586a9795b | [] | no_license | Forrest-Z/MyKitAgv | ccd7b1c5fdb3a046bc5267d1827c4a08d89e74a4 | db9506ad8c8a9012fb49775e188932e28526337e | refs/heads/master | 2022-12-07T17:49:23.140713 | 2020-09-07T14:25:04 | 2020-09-07T14:25:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nodelet;sensor_msgs;tf;pcl_ros;pcl_conversions;geometry_msgs;message_filters".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-llaser_ortho_projector".split(';') if "-llaser_ortho_projector" != "" else []
PROJECT_NAME = "laser_ortho_projector"
PROJECT_SPACE_DIR = "/home/nhamtung/TungNV/MyKitAgv/catkin_ws/install"
PROJECT_VERSION = "0.3.2"
| [
"nhamtung125@gmail.com"
] | nhamtung125@gmail.com |
1d49c638c84d9cfa20e25fd85489966f882c7123 | bfda3af75d94767a5cb265bd68c17cfbf94e3ee1 | /rosalind/qrt/rosalind_qrtd_tung.py | c6d0de60e3be4d40377362c4f3b26bdba3ad70ce | [] | no_license | orenlivne/euler | d0e5b956a46eacfe423fbd6c52918beb91eea140 | 2afdd8bccdc5789c233e955b1ca626cea618eb9b | refs/heads/master | 2020-12-29T02:24:36.479708 | 2016-12-15T21:27:33 | 2016-12-15T21:27:33 | 20,263,482 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,129 | py | '''
============================================================
http://rosalind.info/problems/qrtd
Given: A list containing n taxa (n<=2000) and two unrooted
binary trees T1 and T2 on the given taxa. Both T1 and T2 are
given in Newick format.
Return: The quartet distance dq(T1,T2).
============================================================
'''
# From http://rosalind.info/problems/qrtd/solutions/.
# Need to get the rest of his libraries
import time
from rosalind import rostree
def qrtd(fp):
taxa = next(fp).split()
t1_str = next(fp)
t2_str = next(fp)
taxa_id = dict((s,i) for i, s in enumerate(taxa))
all_taxa = set(xrange(len(taxa)))
start_time = time.time()
def build_tree(t_str):
T = rostree.read_newick_str(t_str)
#T = make_unrooted_binary(T)
for node in T.walk(order=T.POSTORDER):
if node.is_leaf:
node.id = taxa_id[node.val]
node.nodes = set([node.id])
node.rest = all_taxa - node.nodes
else:
node.nodes = reduce(set.union, map(attrgetter('nodes'), node.children), set())
node.rest = all_taxa - node.nodes
# special case to walk unroot tree; the first node is also a leaf node
T.id = taxa_id[T.val]
T.nodes = set([T.id])
T.rest = all_taxa - T.nodes
return T
T1 = build_tree(t1_str)
T2 = build_tree(t2_str)
# link T2 nodes to T1. Mind the special case for root node.
id_2_T1 = dict((node.id,node) for node in T1.walk(type=T1.LEAF))
id_2_T1[T1.id] = T1
for node in T2.walk(type=T1.LEAF):
node.t1_node = id_2_T1[node.id]
T2.t1_node = id_2_T1[T2.id]
N = len(taxa)
print 'N=',N
count = 0
for i, v1 in enumerate(T1.walk(type=T1.INODE)):
if v1 is T1:
continue
if i % 10 == 0:
print 'T1 %3d %s' % (time.time() - start_time, i)
for A_node in T1.walk(exclude_node=v1):
A_node.color = 1
for B_node in v1.left.walk():
B_node.color = 2
for C_node in v1.right.walk():
C_node.color = 3
A1 = v1.rest
B1 = v1.left.nodes
C1 = v1.right.nodes
for v2 in T2.walk(order=T2.POSTORDER):
if v2 is T2:
pass
elif v2.is_leaf:
v2.a1 = 0
v2.b1 = 0
v2.c1 = 0
c = v2.t1_node.color
if c == 1: v2.a1 = 1
elif c == 2: v2.b1 = 1
else: v2.c1 = 1
else:
B = v2.left
C = v2.right
a1b2 = B.a1
a1c2 = C.a1
a1a2 = len(A1) - a1b2 - a1c2
b1b2 = B.b1
b1c2 = C.b1
b1a2 = len(B1) - b1b2 - b1c2
c1b2 = B.c1
c1c2 = C.c1
c1a2 = len(C1) - c1b2 - c1c2
# rememeber under v2, how many of them intersect with A1, B1 and C1
v2.a1 = a1b2 + a1c2
v2.b1 = b1b2 + b1c2
v2.c1 = c1b2 + c1c2
# 3x3=9 different orientation for T12 and T2,
# times in each case two ways to pair B and C from each tree
count += a1a2 * (a1a2-1) / 2 * (b1b2 * c1c2 + b1c2 * c1b2)
count += a1b2 * (a1b2-1) / 2 * (b1a2 * c1c2 + b1c2 * c1a2)
count += a1c2 * (a1c2-1) / 2 * (b1a2 * c1b2 + b1b2 * c1a2)
count += b1a2 * (b1a2-1) / 2 * (a1b2 * c1c2 + a1c2 * c1b2)
count += b1b2 * (b1b2-1) / 2 * (a1a2 * c1c2 + a1c2 * c1a2)
count += b1c2 * (b1c2-1) / 2 * (a1a2 * c1b2 + a1b2 * c1a2)
count += c1a2 * (c1a2-1) / 2 * (a1b2 * b1c2 + a1c2 * b1b2)
count += c1b2 * (c1b2-1) / 2 * (a1a2 * b1c2 + a1c2 * b1a2)
count += c1c2 * (c1c2-1) / 2 * (a1a2 * b1b2 + a1b2 * b1a2)
print N * (N - 1) * (N- 2) * (N - 3) / 12 - count
if __name__ == "__main__":
print qrtd('rosalind_qrtd_sample.dat')
#print qrtd('rosalind_qrtd.dat')
| [
"oren.livne@gmail.com"
] | oren.livne@gmail.com |
b76f179288fa582d387bf950eec30e00a1f63724 | ff44b59792d68783387f542ed76419182630573f | /mage_python/Ex/04/users.py | 17e31c040889b38f52ef8c431831af13addfec6a | [] | no_license | caichenyi/daily | e130a9ab24e170ca3c0167774e9aca739ff7f928 | 717fada9ff473d6d244e8aadfd2b96621fec4120 | refs/heads/master | 2021-09-01T10:08:25.548701 | 2017-12-26T11:13:34 | 2017-12-26T11:13:34 | 114,218,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,150 | py | # encoding: utf-8
# Author: Cai Chenyi
'''
1. 每一个用户存储信息修改为字典
{name : xxx, age : xxxx, tel: xxx}
2. 用户输入了三次,修改为一次,使用:分隔用户信息
3. 查找的时候使用包含关系,name包含查找的字符串
'''
users = {}
user_info_tpl = '|{0:>20}|{1:>5}|{2:>20}|'
user_info_header = user_info_tpl.format('name', 'age', 'telephone')
fhandler = open('users.txt')
for line in fhandler:
name, age, tel = line.strip().split(':')
users[name] = {'name': name, 'age': age, 'tel': tel}
fhandler.close()
#用户认证
is_valid = False
for i in range(3):
name = input('请输入用户名:')
tel = input('请输入手机号:')
for user in users.values():
if user['name'] == name and user['tel'] == tel:
is_valid = True
break
if is_valid:
break
else:
print('认证失败')
if not is_valid:
print('已超过最大认证次数,程序退出')
else:
while True:
action = input('please input(find/list/add/delete/update/exit):')
print(action)
if action == 'add':
#增加用户
user_txt = input('请输入用户信息(用户名:年龄:电话):')
name, age, tel = user_txt.split(':')
if name in users:
print('用户存在')
else:
users[name] = {'name' : name, 'age' : age, 'tel' : tel}
elif action == 'delete':
#删除用户
name = input('请输入你要删除的用户名:')
if users.pop(name, None):
print('删除成功')
else:
print('删除用户失败, 失败原因: 用户名不存在')
elif action == 'update':
# 更改用户
user_txt = input('请输入用户信息(用户名:年龄:电话):')
name, age, tel = user_txt.split(':')
if name in users:
users[name] = {'name' : name, 'age' : age, 'tel' : tel}
else:
print('更新用户失败, 错误原因: 用户名不存在')
elif action == 'find':
# 查找用户
name = input('请输入你要查询的用户名:')
is_exists = False
print(user_info_header)
for user in users.values():
if user['name'].find(name) != -1:
print(user_info_tpl.format(user['name'], user['age'], user['tel']))
is_exists = True
if not is_exists:
print('没有该用户信息')
elif action == 'list':
#罗列所有用户
print(user_info_header)
for user in users.values():
print(user_info_tpl.format(user['name'], user['age'], user['tel']))
elif action == 'exit':
fhandler = open('users.txt', 'wt')
for user in users.values():
fhandler.write('{}:{}:{}\n'.format(user['name'], user['age'], user['tel']))
fhandler.close()
#退出程序
break
else:
print('命令错误')
| [
"chenyi.cai@wwwarehouse.com"
] | chenyi.cai@wwwarehouse.com |
bf71d6307ef41c2c55ddf45a5bf5ad3f1a2c1144 | 688362ebcf80619dacae5615fcf2879670ecbfe3 | /week2/soa_bgsubs.py | d934e7e56cd6d2e5920bf8cdfd10dad6610399be | [] | no_license | mcv-m6-video/mcv-m6-2021-team7 | 8255dd2606414f31707d26b55fd3cde6266daaea | b19d99cdb9185f6062586a9267a1d119798ce204 | refs/heads/main | 2023-04-14T06:27:07.639171 | 2021-04-29T17:02:50 | 2021-04-29T17:02:50 | 345,711,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,114 | py | import cv2
from tqdm import trange
from bboxdetection import *
import numpy
import imageio
import os
def get_opencv_bgsubs(alg):
# Inicialize bg segmentation class (from opencv)
if alg == 'KNN':
bgsubs = cv2.createBackgroundSubtractorKNN()
elif alg == 'MOG2':
bgsubs = cv2.createBackgroundSubtractorMOG2()
elif alg == 'CNT':
bgsubs = cv2.bgsegm.createBackgroundSubtractorCNT()
elif alg == 'GMG':
bgsubs = cv2.bgsegm.createBackgroundSubtractorGMG()
elif alg == 'GSOC':
bgsubs = cv2.bgsegm.createBackgroundSubtractorGSOC()
elif alg == 'LSBP':
bgsubs = cv2.bgsegm.createBackgroundSubtractorLSBP()
elif alg == 'MOG':
bgsubs = cv2.bgsegm.createBackgroundSubtractorMOG()
return bgsubs
class BgsModel:
def __init__(self,path,color_space, alg):
self.path = path
self.vCapture = cv2.VideoCapture(path)
self.width = int(self.vCapture.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.vCapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.length = int(self.vCapture.get(cv2.CAP_PROP_FRAME_COUNT))
self.color_space = color_space
self.vidLen = int(self.vCapture.get(cv2.CAP_PROP_FRAME_COUNT))
self.backSub = get_opencv_bgsubs(alg)
def retVidLen(self):
return self.vidLen
def _color_space_prep(self,addDim,i):
self.vCapture.set(cv2.CAP_PROP_POS_FRAMES, i)
im = self.vCapture.read()[1]
if self.color_space == 'gray':
imC = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
num_channels = 1
elif self.color_space == 'rgb':
imC = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
num_channels = 3
elif self.color_space == 'hsv':
imC = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
num_channels = 3
elif self.color_space == 'lab':
imC = cv2.cvtColor(im, cv2.COLOR_BGR2LAB)
num_channels = 3
elif self.color_space == 'yuv':
imC = cv2.cvtColor(im, cv2.COLOR_BGR2YUV)
num_channels = 3
else:
print('Color space is not in the dictionary')
return
if addDim:
imC = imC.reshape(im.shape[0],im.shape[1],num_channels)
return imC,num_channels
def foreground_extraction(self,showVid, gt, use_postprocessing, outpath=None):
initFrame = int(self.vidLen*0.25)
endFrame = int(self.vidLen)
if outpath:
if not os.path.exists(outpath):
os.makedirs(outpath)
for i in trange(initFrame, desc='Background computation'):
imC, _ = self._color_space_prep(addDim=True, i=i)
self.backSub.apply(imC)
predictedBBOX = []
predictedFrames = []
count = 0
for i in trange(initFrame,endFrame, desc='Foreground extraction'):
imC,_ = self._color_space_prep(addDim=True,i=i)
mask = self.backSub.apply(imC)
mask = np.ceil(mask/255.0)
# Detect white patches (cars)
if use_postprocessing:
denoised_m = removeNoise(mask,'task3')
else:
denoised_m = mask
bboxFrame = findBBOX(denoised_m,'task3')
predictedBBOX.append(bboxFrame)
predictedFrames.append(i)
if showVid:
denoised_m = denoised_m*255
gtBoxes = gt[count]['bbox']
mRGB = np.zeros((denoised_m.shape[0],denoised_m.shape[1],3))
mRGB[:, :, 0] = denoised_m
mRGB[:, :, 1] = denoised_m
mRGB[:, :, 2] = denoised_m
for k in range(len(gtBoxes)):
gbox=gtBoxes[k]
if gbox != None:
cv2.rectangle(mRGB, (int(gbox[0]), int(gbox[1])), (int(gbox[2]), int(gbox[3])), (0,0,255), 3)
for b in bboxFrame:
cv2.rectangle(mRGB, (b[0], b[1]), (b[2], b[3]), (100, 255, 0), 3)
mRGB = mRGB.astype('uint8')
if outpath:
cv2.imwrite(os.path.join(outpath, str(i) + '.jpg'), mRGB)
count += 1
predictionsInfo,num_bboxes = prepareBBOXdata(predictedBBOX, predictedFrames)
return predictionsInfo,num_bboxes
if __name__ == '__main__':
backSub = get_opencv_bgsubs('KNN')
capture = cv2.VideoCapture("D:\MCV\M6\AICity_data\\train\S03\c010\\vdo.avi")
if not capture.isOpened():
print('Unable to open: D:\MCV\M6\AICity_data\\train\S03\c010\\vdo.avi')
exit(0)
while True:
ret, frame = capture.read()
if frame is None:
break
fgMask = backSub.apply(frame)
cv2.rectangle(frame, (10, 2), (100, 20), (255, 255, 255), -1)
cv2.putText(frame, str(capture.get(cv2.CAP_PROP_POS_FRAMES)), (15, 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
cv2.imshow('Frame', frame)
cv2.imshow('FG Mask', fgMask)
keyboard = cv2.waitKey(30)
if keyboard == 'q' or keyboard == 27:
break | [
"59796230+victorubieto@users.noreply.github.com"
] | 59796230+victorubieto@users.noreply.github.com |
43a0cab3c9c839ec46266c935ecdf82958e35ef6 | ba3c06f9ae89479fa4987fe841ac09b5b5d71383 | /python_for_kids/book/Projects/monster6.py | a10a99ac4030b0d5d0cfab3769dc4e6741f8afab | [] | no_license | mary-tano/python-programming | 6d806e25011e770a04a0922d0b71bf38c222d026 | 829654a3274be939fa529ed94ea568c12f7f1a27 | refs/heads/master | 2021-05-17T15:30:32.710838 | 2020-04-01T13:37:18 | 2020-04-01T13:37:18 | 250,846,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | # Лаборатория Франкенштейна
from monsterlab import *
# Основная программа
Frank = Monster("Фрэнки", "необычный")
Frank.show()
Albert = GMonster("Альберт", "задумчивый")
Albert.show()
Sigmund = SMonster("Зигмунд", "веселый")
Sigmund.show()
| [
"masha.mary.tano@gmail.com"
] | masha.mary.tano@gmail.com |
5593fcb9ec3b1417a331b3952f8d5f7cd229fa92 | 11bb0cbe6de2a0a4e94fc0ba610f61894d5593a1 | /VBS_Zgamma/Significance/Invert_detajj/data_cards/th2_to_txt.py | 40511394bf0a015827f544aacee6e14b17f68643 | [] | no_license | AnYpku/PKU-Cluster | 0dc4a88445aeb3ca239b2d7d7f796c6a67f3f69c | f9ffbcb7988053f4618fd015c1bb656d92ff51c6 | refs/heads/master | 2022-11-01T23:46:59.442037 | 2022-10-21T06:37:43 | 2022-10-21T06:37:43 | 188,202,345 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 10,414 | py | #!/usr/bin/env python
from ROOT import gROOT, THStack, TH1D, TList, TFile
import sys
from math import sqrt
from numpy import sum
def merge_bin(th1):
nbins=th1.GetNbinsX()
print 'nbins', nbins
th1.SetBinContent(nbins-2,th1.GetBinContent(nbins-2)+th1.GetBinContent(nbins-1)+th1.GetBinContent(nbins))
if th1.GetBinContent(nbins-2)>0:
th1.SetBinError(nbins-2,sqrt(th1.GetBinError(nbins-2)*th1.GetBinError(nbins-2)+th1.GetBinError(nbins-1)*th1.GetBinError(nbins-1)+th1.GetBinError(nbins)*th1.GetBinError(nbins)))
else:
th1.SetBinError(nbins-2,0);
print '-----begin to transfer TH2D to txt for Higgs-combine tool----- \n'
fdir = '/home/pku/anying/cms/PKU-Cluster/Significance/Invert_detajj/root/'
fin = TFile.Open(fdir+'hist_'+sys.argv[1]+'.root')
th1_ZA_sig=fin.Get('hist_ZA-EWK'+sys.argv[1])
th1_ZA=fin.Get('hist_ZA'+sys.argv[1])
th1_non_prompt=fin.Get('hist_plj'+sys.argv[1])
th1_TTA=fin.Get('hist_TTA'+sys.argv[1])
th1_VV=fin.Get('hist_VV'+sys.argv[1])
th1_ST=fin.Get('hist_ST'+sys.argv[1])
# the bkg histo and signal histo have already contain the overflow bin in the last bin when creat the histograms
genbincontent=[]
genbinerror=[]
arr={}
f=open('/home/pku/anying/cms/PKU-Cluster/Significance/Invert_detajj/Uncer/summary_Mjj_'+sys.argv[1]+'.txt')
import re
import numpy as np
for line in f:
if not line.strip():
continue
print line
line = line.replace('[','')
line = line.replace(']','')
line = line.replace('\n','')
print line
arr_Temp = re.split(',|=',line)
print arr_Temp
name = arr_Temp[0]
arr_Temp = np.array(arr_Temp[1:])
#arr_Temp.astype(np.float)
arr_Temp = [float(x) for x in arr_Temp]
print name
arr[name]=arr_Temp
print arr
print '>>>>begin to read bin content to the txt file>>>>'
nbins=th1_ZA_sig.GetNbinsX()
print 'nbins', nbins
nbins=th1_ZA_sig.GetNbinsX()+1
print 'range in for loop 1 to', nbins
for i in range(1,nbins):
f = open('./txt/Mjj_%s_bin%i.txt'%(sys.argv[1],i),'w')
f.write('imax 1 number of channels\n')
f.write('jmax 5 number of processes-1\n')
if sys.argv[1].find("18") == -1 and sys.argv[1].find("17") == -1: #16
f.write('kmax * number of nuisance parameters (sources of systematical uncertainties)\n')
if sys.argv[1].find("16") == -1 and sys.argv[1].find("18") == -1: #17
f.write('kmax * number of nuisance parameters (sources of systematical uncertainties)\n')
if sys.argv[1].find("16") == -1 and sys.argv[1].find("17") == -1: #18
f.write('kmax * number of nuisance parameters (sources of systematical uncertainties)\n')
f.write('------------\n')
f.write('# we have just one channel, in which we observe 0 events\n')
f.write('bin bin%i\n'%(i))
# bincontent of each precess
ST_bincontent = th1_ST.GetBinContent(i) if th1_ST.GetBinContent(i)>0 else 0
TTA_bincontent = th1_TTA.GetBinContent(i) if th1_TTA.GetBinContent(i)>0 else 0
VV_bincontent = th1_VV.GetBinContent(i) if th1_VV.GetBinContent(i)>0 else 0
# WA_bincontent = th1_WA.GetBinContent(i) if th1_WA.GetBinContent(i)>0 else 0
non_prompt_bincontent = th1_non_prompt.GetBinContent(i) if th1_non_prompt.GetBinContent(i)>0 else 0
ZA_bincontent = th1_ZA.GetBinContent(i) if th1_ZA.GetBinContent(i)>0 else 0
ZA_sig_bincontent = th1_ZA_sig.GetBinContent(i) if th1_ZA_sig.GetBinContent(i)>0 else 0
# bin error
ST_binerror = th1_ST.GetBinError(i)/ST_bincontent if ST_bincontent>0 else 0
ST_binerror = ST_binerror if ST_binerror<1 else 1
ST_binerror = ST_binerror+1
TTA_binerror = th1_TTA.GetBinError(i)/TTA_bincontent if TTA_bincontent>0 else 0
TTA_binerror = TTA_binerror if TTA_binerror<1 else 1
TTA_binerror = TTA_binerror+1
VV_binerror = th1_VV.GetBinError(i)/VV_bincontent if VV_bincontent>0 else 0
VV_binerror = VV_binerror if VV_binerror<1 else 1
VV_binerror = VV_binerror+1
# WA_binerror = th1_WA.GetBinError(i)/WA_bincontent if WA_bincontent>0 else 0
# WA_binerror = WA_binerror if WA_binerror<1 else 1
# WA_binerror = WA_binerror+1
non_prompt_binerror = th1_non_prompt.GetBinError(i)/non_prompt_bincontent if non_prompt_bincontent>0 else 0
non_prompt_binerror = non_prompt_binerror if non_prompt_binerror<1 else 1
non_prompt_binerror =non_prompt_binerror+1
ZA_binerror = th1_ZA.GetBinError(i)/ZA_bincontent if ZA_bincontent>0 else 0
ZA_binerror = ZA_binerror if ZA_binerror<1 else 1
ZA_binerror = ZA_binerror+1
ZA_sig_binerror = th1_ZA_sig.GetBinError(i)/ZA_sig_bincontent if ZA_sig_bincontent>0 else 0
ZA_sig_binerror = ZA_sig_binerror if ZA_sig_binerror<1 else 1
ZA_sig_binerror = ZA_sig_binerror+1
data= ZA_sig_bincontent + ZA_bincontent+non_prompt_bincontent+TTA_bincontent+VV_bincontent+ST_bincontent
f.write('observation %0.2f\n'%(data))
f.write('------------\n')
f.write('# now we list the expected events for signal and all backgrounds in that bin\n')
f.write('# the second process line must have a positive number for backgrounds, and 0 for signal\n')
f.write('# then we list the independent sources of uncertainties, and give their effect (syst. error)\n')
f.write('# on each process and bin\n')
f.write('bin\t')
f.write('bin%i\tbin%i\tbin%i\tbin%i\tbin%i\tbin%i\n'%(i,i,i,i,i,i))
f.write('process\t')
f.write('Sig\tQCD\tnon_prompt\tTTA\tVV\tST\n')
f.write('process\t0\t1\t2\t3\t4\t5\n')
f.write('rate\t')
f.write('%0.2f\t%0.2f\t%0.2f\t%0.2f\t%0.2f\t%0.2f\n'%(ZA_sig_bincontent,ZA_bincontent, non_prompt_bincontent, TTA_bincontent, VV_bincontent, ST_bincontent))
f.write('------------\n')
f.write('lumi_%s\tlnN\t'%(sys.argv[1]))
if sys.argv[1].find("17")==-1 and sys.argv[1].find("18")==-1:
f.write('%0.3f\t%0.3f\t-\t%0.3f\t%0.3f\t%0.3f\n'%(1.022,1.022,1.022,1.022,1.022))
if sys.argv[1].find("16")==-1 and sys.argv[1].find("18")==-1:
f.write('%0.3f\t%0.3f\t-\t%0.3f\t%0.3f\t%0.3f\n'%(1.02,1.02,1.02,1.02,1.02))
if sys.argv[1].find("16")==-1 and sys.argv[1].find("17")==-1:
f.write('%0.3f\t%0.3f\t-\t%0.3f\t%0.3f\t%0.3f\n'%(1.015,1.015,1.015,1.015,1.015))
f.write('VBS_Stat_bin%d_%s\tlnN\t'%(i,sys.argv[1]))
f.write('%0.2f\t-\t-\t-\t-\t-\n'%(ZA_sig_binerror))
f.write('QCD_Stat_bin%d_%s\tlnN\t'%(i,sys.argv[1]))
f.write('-\t%0.2f\t-\t-\t-\t-\n'%(ZA_binerror))
f.write('non_prompt_Stat_bin%d_%s\tlnN\t'%(i,sys.argv[1]))
f.write('-\t-\t%0.2f\t-\t-\t-\n'%(non_prompt_binerror))
f.write('TTA_Stat_bin%d_%s\tlnN\t'%(i,sys.argv[1]))
f.write('-\t-\t-\t%0.2f\t-\t-\n'%(TTA_binerror))
f.write('VV_Stat_bin%d_%s\tlnN\t'%(i,sys.argv[1]))
f.write('-\t-\t-\t-\t%0.2f\t-\n'%(VV_binerror))
f.write('ST_Stat_bin%d_%s\tlnN\t'%(i,sys.argv[1]))
f.write('-\t-\t-\t-\t-\t%0.2f\n'%(ST_binerror))
#
f.write('fake_%s\tlnN\t'%(sys.argv[1]))
if non_prompt_bincontent==0:
f.write('-\t-\t-\t-\t-\t-\n')
else:
f.write('-\t-\t%0.2f\t-\t-\t-\n'%(arr['fake'+sys.argv[1]][i-1]))
#
f.write('JES_%s\tlnN\t'%(sys.argv[1]))
f.write('%0.2f\t%0.2f\t-\t%0.2f\t%0.2f\t%0.2f\n'%(arr['jes'+sys.argv[1]+'_ZA-EWK'][i-1],arr['jes'+sys.argv[1]+'_ZA'][i-1],arr['jes'+sys.argv[1]+'_TTA'][i-1],arr['jes'+sys.argv[1]+'_VV'][i-1],arr['jes'+sys.argv[1]+'_ST'][i-1]))
#
f.write('JER_%s\tlnN\t'%(sys.argv[1]))
f.write('%0.2f\t%0.2f\t-\t%0.2f\t%0.2f\t%0.2f\n'%(arr['jer'+sys.argv[1]+'_ZA-EWK'][i-1],arr['jer'+sys.argv[1]+'_ZA'][i-1],arr['jer'+sys.argv[1]+'_TTA'][i-1],arr['jer'+sys.argv[1]+'_VV'][i-1],arr['jer'+sys.argv[1]+'_ST'][i-1]))
#
f.write('pdf_EW\tlnN\t')
f.write('%0.3f\t-\t-\t-\t-\t-\n'%(arr['Sig_pdf'][i-1]))
#
f.write('pdf_QCD\tlnN\t')
f.write('-\t%0.2f\t-\t-\t-\t-\n'%(arr['QCD_pdf'][i-1]))
#
f.write('Scale_EW\tlnN\t')
f.write('%0.2f\t-\t-\t-\t-\t-\n'%(arr['Sig_scale'][i-1]))
#
f.write('Scale_muF1\tlnN\t')
f.write('-\t%0.3f\t-\t-\t-\t-\n'%(arr['scale_muF1'][i-1]))
#
f.write('Scale_muR1\tlnN\t')
f.write('-\t%0.3f\t-\t-\t-\t-\n'%(arr['scale_muR1'][i-1]))
#
f.write('Scale_muFmuR\tlnN\t')
f.write('-\t%0.3f\t-\t-\t-\t-\n'%(arr['scale_muFmuR'][i-1]))
#
f.write('interf\tlnN\t')
f.write('%0.2f\t-\t-\t-\t-\t-\n'%(arr['interf'][i-1]))
#
f.write('mu_trigger\tlnN\t')
f.write('%0.3f\t-\t%0.3f\t%0.3f\t%0.3f\t%0.3f\n'%(arr['muon_ZA_trigger'][i-1],arr['muon_TTA_trigger'][i-1],arr['muon_VV_trigger'][i-1],arr['muon_ST_trigger'][i-1],arr['muon_ZA-EWK_trigger'][i-1]))
#
f.write('mu_eff\tlnN\t')
f.write('%0.3f\t-\t%0.3f\t%0.3f\t%0.3f\t%0.3f\n'%(arr['muon_ZA_all'][i-1],arr['muon_TTA_all'][i-1],arr['muon_VV_all'][i-1],arr['muon_ST_all'][i-1],arr['muon_VV_all'][i-1]))
#
f.write('ele_reco\tlnN\t')
f.write('%0.3f\t-\t%0.3f\t%0.3f\t%0.3f\t%0.3f\n'%(arr['ele_ZA_reco'][i-1],arr['ele_TTA_reco'][i-1],arr['ele_VV_reco'][i-1],arr['ele_ST_reco'][i-1],arr['ele_ZA-EWK_reco'][i-1]))
#
f.write('ele_ID\tlnN\t')
f.write('%0.3f\t-\t%0.3f\t%0.3f\t%0.3f\t%0.3f\n'%(arr['ele_ZA_ID'][i-1],arr['ele_TTA_ID'][i-1],arr['ele_VV_ID'][i-1],arr['ele_ST_ID'][i-1],arr['ele_ZA-EWK_ID'][i-1]))
#
f.write('photon_id\tlnN\t')
f.write('%0.3f\t-\t%0.3f\t%0.3f\t%0.3f\t%0.3f\n'%(arr['photon_ZA_ID'][i-1],arr['photon_TTA_ID'][i-1],arr['photon_VV_ID'][i-1],arr['photon_ST_ID'][i-1],arr['photon_ZA-EWK_ID'][i-1]))
#
f.write('pileup\tlnN\t')
f.write('%0.3f\t-\t%0.3f\t%0.3f\t%0.3f\t%0.3f\n' %(arr['pileup_ZA'][i-1],arr['pileup_TTA'][i-1],arr['pileup_VV'][i-1],arr['pileup_ST'][i-1],arr['pileup_ZA-EWK'][i-1]))
#
f.write('ttgamma_xs\tlnN\t')
f.write('-\t-\t-\t1.1\t-\t-\n')
f.write('VV_xs\tlnN\t')
f.write('-\t-\t-\t-\t1.1\t-\n')
#
f.write('pileupId_eff_%s\tlnN\t'%(sys.argv[1]))
f.write('%0.2f\t%0.2f\t-\t%0.2f\t%0.2f\t%0.2f\n'%(arr['ZA-EWK_eff'][i-1],arr['ZA_eff'][i-1],arr['TTA_eff'][i-1],arr['VV_eff'][i-1],arr['ST_eff'][i-1]))
f.write('pileupId_mis_%s\tlnN\t'%(sys.argv[1]))
f.write('%0.2f\t%0.2f\t-\t%0.2f\t%0.2f\t%0.2f\n'%(arr['ZA-EWK_mis'][i-1],arr['ZA_mis'][i-1],arr['TTA_mis'][i-1],arr['VV_mis'][i-1],arr['ST_mis'][i-1]))
#
if sys.argv[1].find("18") == -1:
f.write('l1pref\tlnN\t')
f.write('%0.2f\t%0.2f\t-\t%0.2f\t%0.2f\t%0.2f\n'%(arr['l1pref_ZA'][i-1],arr['l1pref_ZA-EWK'][i-1],arr['l1pref_TTA'][i-1],arr['l1pref_VV'][i-1],arr['l1pref_ST'][i-1]))
# print 'bin ',i,' ',ZA_binerror,' ',non_prompt_binerror,' ',TTA_binerror,' ',VV_binerror,' ',ST_binerror,' ',WA_binerror,' ',ZA_sig_out_binerror
genbincontent[:]=[]
genbinerror[:]=[]
| [
"anying@pku.edu.cn"
] | anying@pku.edu.cn |
fab57c88638bc680f3b540d514636a2d2caeed5e | 9f0deecc85c256c3586ed5abf8e02809a6094092 | /JinriToutiao.py | a938676c76b6053a521b356c19291591cb95eb3b | [] | no_license | llx-answer/JinriToutiao | 834f24031ca012e10713808bcbe265a1d963d41a | 2edbf1378ab3b8e3d5cb274307c5bec8082b82b2 | refs/heads/master | 2020-08-15T07:17:05.444716 | 2019-10-15T12:56:43 | 2019-10-15T12:56:43 | 215,287,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,728 | py | import requests
from urllib.parse import urlencode
from requests import codes
import os
from hashlib import md5
from multiprocessing.pool import Pool
import re
def get_page(offset):
headers = {
'cookie':your cookie,
'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36',
'x-requested-with':'XMLHttpRequest',
'referer':'https://www.toutiao.com/search/?keyword=%E8%A1%97%E6%8B%8D'
}
params = {
'aid':'24',
'app_name':'web_search',
'offset':offset,
'format':'json',
'keyword':'街拍',
'autoload':'true',
'count':'20',
'en_qc':'1',
'cur_tab':'1',
'from':'search_tab',
'pd': 'synthesis'
}
base_url = 'https://www.toutiao.com/api/search/content/?'
url = base_url + urlencode(params)
try:
resp = requests.get(url, headers=headers)
if resp.status_code == 200:
return resp.json()
except requests.ConnectionError:
return None
def get_images(json):
if json.get('data'):
data = json.get('data')
for item in data:
if item.get('title') is None:
continue
title = re.sub('[\t]', '', item.get('title'))
images = item.get('image_list')
for image in images:
origin_image = re.sub("list.*?pgc-image", "large/pgc-image", image.get('url'))
yield {
'image': origin_image,
'title': title
}
def save_image(item):
img_path = 'img0907777' + '/' + item.get('title')
if '|' or '?' in img_path:
img_path = img_path[-5:]
if not os.path.exists(img_path):
os.makedirs(img_path)
try:
resp = requests.get(item.get('image'))
if 200 == resp.status_code:
file_path = img_path + '/' + '{file_name}.{file_suffix}'.format(
file_name=md5(resp.content).hexdigest(),
file_suffix='jpg')
if not os.path.exists(file_path):
with open(file_path, 'wb') as f:
f.write(resp.content)
print('Downloaded image path is %s' % file_path)
else:
print('Already Downloaded', file_path)
except Exception as e:
print(e)
def main(offset):
json = get_page(offset)
for item in get_images(json):
save_image(item)
GROUP_START = 0
GROUP_END = 20
if __name__ == '__main__':
pool = Pool()
groups = ([x * 20 for x in range(GROUP_START, GROUP_END)])
pool.map(main, groups)
pool.close()
pool.join()
| [
"363863343@qq.com"
] | 363863343@qq.com |
460268891e3255c5dbad126bb8d1011ba45d5bc1 | b2c873cb92b71500bcb8468dccc6da7704edb841 | /resumecamp/resume/migrations/0002_auto_20150209_0455.py | 5f2baae03c9053e9a8a6fc2661ee8bc386b149cd | [] | no_license | hawkaa/resumecamp | 019c77839dd2db009542b880498252824ff8dfa2 | 9aa07074ec1326104e73581ad9edc33546e40c73 | refs/heads/master | 2020-06-04T10:52:38.045221 | 2015-02-09T06:20:54 | 2015-02-09T06:20:54 | 30,518,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('resume', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='CV',
new_name='Resume',
),
]
| [
"hakon@aamdal.com"
] | hakon@aamdal.com |
34c9d63c64f37b6a17a2adfae7b3bb9d3677a416 | 0130c8b14927097663157846adc4b146d67d2fda | /tests/common/test_run/softplus_run.py | 72090ba2620e11675993ae68cec770d88f6b7703 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-3-Clause",
"NCSA",
"LLVM-exception",
"Zlib",
"BSD-2-Clause",
"MIT"
] | permissive | Shigangli/akg | e8be3e0ee1eafe3e42b4cc4d424c28f08ef4c0bc | 3766c54e0b109541932d147a6b5643a334b82403 | refs/heads/master | 2023-09-06T05:13:40.571583 | 2021-11-23T03:44:54 | 2021-11-23T03:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""run function for softplus"""
import numpy as np
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from tests.common.test_op import softplus
from tests.common.gen_random import random_gaussian
from tests.common.base import get_rtol_atol
def softplus_run(shape, dtype, attrs):
mod = utils.op_build_test(softplus.softplus, [shape], [dtype],
kernel_name="softplus", attrs=attrs)
expect, inputs, output = gen_data(dtype, shape)
output = utils.mod_launch(mod, (inputs, output), expect=expect)
rtol, atol = get_rtol_atol("softplus", dtype)
TestCase_Result = compare_tensor(
output, expect, rtol=rtol, atol=atol, equal_nan=False)
return inputs, output, expect, TestCase_Result
def gen_data(dtype, shape):
inputs = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
expect = np.log1p(np.exp(-np.abs(inputs))) + np.maximum(inputs, 0)
output = np.full(shape, np.nan, dtype)
return expect, inputs, output
| [
"1027252281@qq.com"
] | 1027252281@qq.com |
11180e15b35f69a7900177782c6a8647996bb3db | 9efb7ed6ae39065837247c6fe66655e702d6120a | /static/solutions/labs/bfs/solution/vertex.py | db5d3c094bb69cf5c2f6b5fc5d20c6a21e515840 | [] | no_license | hardlyHacking/cs1 | 305b0db330ee13e4bbde71bdb53cb2a2378747a4 | 957f578b933fe01b208eae8e5c9577c056b565e5 | refs/heads/master | 2020-03-09T16:26:11.023918 | 2019-11-26T03:25:24 | 2019-11-26T03:25:24 | 128,884,594 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,695 | py | # vertex.py
# Emily Freebairn
# November 17, 2011
# Minor changes by THC.
# The Vertex class represents a vertex in a graph. It stores the vertex's name,
# location, and a list of the vertices that are adjacent to it.
from cs1lib import *
RADIUS = 8 # RADIUS of the drawn vertices
LINE_WIDTH = 3 # line width of the drawn edges
X_CORRECTION = 4 # horizontal correction amount to display names
Y_CORRECTION = 10 # vertical correction amount to display names
class Vertex:
# Initialize a Vertex, given its location.
def __init__(self, name, x, y):
self.name = name # name of this vertex
self.x = x # x location of the vertex in pixels
self.y = y # y location of the vertex in pixels
self.adjacent = [] # list of adjacent vertices
# Return the information about this vertex as a string, including
# the name of the vertex, its location, and a list of names of its
# adjacent vertices.
def __str__(self):
string = self.name + "; Location: " + str(self.x) + ", " + str(self.y) + \
"; Adjacent vertices: "
for i in range(len(self.adjacent)-1):
string += self.adjacent[i].name + ", "
string += self.adjacent[-1].name
return string
# Draw the vertex with the color parameters given.
def draw(self, r, g, b):
set_fill_color(r, g, b)
disable_stroke()
draw_circle(self.x, self.y, RADIUS)
# Draw the edge between self and the vertex given as a parameter,
# in the color determined by the parameters.
def draw_edge(self, vertex, r, g, b):
enable_stroke()
set_stroke_width(LINE_WIDTH)
set_stroke_color(r, g, b)
draw_line(self.x, self.y, vertex.x, vertex.y)
# Draw all the edges between a vertex and the vertices in its adjacency list,
# in the color determined by the parameters.
def draw_neighbor_edges(self, r, g, b):
for adjacent_vertex in self.adjacent:
self.draw_edge(adjacent_vertex, r, g, b)
# Display this vertex's name in the color determined by the parameters.
# (Extra-credit feature.)
def show_name(self, r, g, b):
enable_stroke()
set_stroke_color(r, g, b)
set_font_size(18)
set_font_bold()
text_width = get_text_width(self.name)
draw_text(self.name, self.x - text_width/2 - X_CORRECTION,
self.y - Y_CORRECTION)
# Determine whether the point (x, y) is in the box inscribing the vertex's circle.
def is_point_near_vertex(self, x, y):
return abs(self.x - x) <= RADIUS and abs(self.y - y) <= RADIUS
| [
"hardly.hacking@gmail.com"
] | hardly.hacking@gmail.com |
58e06984a80bfb1f133e9e4eee18958f1fe78fb2 | 0f5bdddfb93154d7f3cf097ede0a8c9ac403e027 | /tests/test_rtcrtpreceiver.py | cb6865f41e23a94d28ffbaf0933d26f08ec809e6 | [
"BSD-3-Clause"
] | permissive | pslq/aiortc | 13061885fc06eef42a05a0d8ad6eb3a3708873a3 | b27b27d3509c2a8335aadd949511b24b93530d86 | refs/heads/master | 2020-03-27T02:17:50.118454 | 2018-08-22T16:44:03 | 2018-08-22T17:58:11 | 145,780,077 | 1 | 0 | null | 2018-08-23T00:54:52 | 2018-08-23T00:54:52 | null | UTF-8 | Python | false | false | 9,343 | py | import asyncio
from unittest import TestCase
from unittest.mock import patch
from aiortc.codecs import PCMU_CODEC
from aiortc.exceptions import InvalidStateError
from aiortc.mediastreams import AudioFrame
from aiortc.rtcrtpparameters import RTCRtpCodecParameters, RTCRtpParameters
from aiortc.rtcrtpreceiver import (NackGenerator, RemoteStreamTrack,
RTCRtpReceiver, StreamStatistics)
from aiortc.rtp import RTP_SEQ_MODULO, RtcpPacket, RtpPacket
from aiortc.stats import RTCStatsReport
from .utils import dummy_dtls_transport_pair, load, run
def create_rtp_packets(count, seq=0):
packets = []
for i in range(count):
packets.append(RtpPacket(
payload_type=0,
sequence_number=(seq + i) % RTP_SEQ_MODULO,
ssrc=1234,
timestamp=i * 160))
return packets
class ClosedDtlsTransport:
state = 'closed'
class NackGeneratorTest(TestCase):
def create_generator(self):
class FakeReceiver:
def __init__(self):
self.nack = []
self.pli = []
async def _send_rtcp_nack(self, media_ssrc, lost):
self.nack.append((media_ssrc, lost))
async def _send_rtcp_pli(self, media_ssrc, lost):
self.pli.append(media_ssrc)
receiver = FakeReceiver()
return NackGenerator(receiver), receiver
def test_no_loss(self):
generator, receiver = self.create_generator()
for packet in create_rtp_packets(20, 0):
run(generator.add(packet))
self.assertEqual(receiver.nack, [])
self.assertEqual(receiver.pli, [])
self.assertEqual(generator.missing, set())
def test_with_loss(self):
generator, receiver = self.create_generator()
# receive packets: 0, <1 missing>, 2
packets = create_rtp_packets(3, 0)
missing = packets.pop(1)
for packet in packets:
run(generator.add(packet))
self.assertEqual(receiver.nack, [(1234, [1])])
self.assertEqual(receiver.pli, [])
self.assertEqual(generator.missing, set([1]))
receiver.nack.clear()
# late arrival
run(generator.add(missing))
self.assertEqual(receiver.nack, [])
self.assertEqual(receiver.pli, [])
self.assertEqual(generator.missing, set())
class StreamStatisticsTest(TestCase):
def create_counter(self):
return StreamStatistics(clockrate=8000, ssrc=0)
def test_no_loss(self):
counter = self.create_counter()
packets = create_rtp_packets(20, 0)
# receive 10 packets
for packet in packets[0:10]:
counter.add(packet)
self.assertEqual(counter.max_seq, 9)
self.assertEqual(counter.packets_received, 10)
self.assertEqual(counter.packets_lost, 0)
self.assertEqual(counter.fraction_lost, 0)
# receive 10 more packets
for packet in packets[10:20]:
counter.add(packet)
self.assertEqual(counter.max_seq, 19)
self.assertEqual(counter.packets_received, 20)
self.assertEqual(counter.packets_lost, 0)
self.assertEqual(counter.fraction_lost, 0)
def test_no_loss_cycle(self):
counter = self.create_counter()
# receive 10 packets (with sequence cycle)
for packet in create_rtp_packets(10, 65530):
counter.add(packet)
self.assertEqual(counter.max_seq, 3)
self.assertEqual(counter.packets_received, 10)
self.assertEqual(counter.packets_lost, 0)
self.assertEqual(counter.fraction_lost, 0)
def test_with_loss(self):
counter = self.create_counter()
packets = create_rtp_packets(20, 0)
packets.pop(1)
# receive 9 packets (one missing)
for packet in packets[0:9]:
counter.add(packet)
self.assertEqual(counter.max_seq, 9)
self.assertEqual(counter.packets_received, 9)
self.assertEqual(counter.packets_lost, 1)
self.assertEqual(counter.fraction_lost, 25)
# receive 10 more packets
for packet in packets[9:19]:
counter.add(packet)
self.assertEqual(counter.max_seq, 19)
self.assertEqual(counter.packets_received, 19)
self.assertEqual(counter.packets_lost, 1)
self.assertEqual(counter.fraction_lost, 0)
@patch('time.time')
def test_no_jitter(self, mock_time):
counter = self.create_counter()
packets = create_rtp_packets(3, 0)
mock_time.return_value = 1531562330.00
counter.add(packets[0])
self.assertEqual(counter._jitter_q4, 0)
self.assertEqual(counter.jitter, 0)
mock_time.return_value = 1531562330.02
counter.add(packets[1])
self.assertEqual(counter._jitter_q4, 0)
self.assertEqual(counter.jitter, 0)
mock_time.return_value = 1531562330.04
counter.add(packets[2])
self.assertEqual(counter._jitter_q4, 0)
self.assertEqual(counter.jitter, 0)
@patch('time.time')
def test_with_jitter(self, mock_time):
counter = self.create_counter()
packets = create_rtp_packets(3, 0)
mock_time.return_value = 1531562330.00
counter.add(packets[0])
self.assertEqual(counter._jitter_q4, 0)
self.assertEqual(counter.jitter, 0)
mock_time.return_value = 1531562330.03
counter.add(packets[1])
self.assertEqual(counter._jitter_q4, 80)
self.assertEqual(counter.jitter, 5)
mock_time.return_value = 1531562330.05
counter.add(packets[2])
self.assertEqual(counter._jitter_q4, 75)
self.assertEqual(counter.jitter, 4)
class RTCRtpReceiverTest(TestCase):
def test_connection_error(self):
"""
Close the underlying transport before the receiver.
"""
transport, _ = dummy_dtls_transport_pair()
receiver = RTCRtpReceiver('audio', transport)
self.assertEqual(receiver.transport, transport)
receiver._track = RemoteStreamTrack(kind='audio')
receiver._ssrc = 1234
run(receiver.receive(RTCRtpParameters(codecs=[PCMU_CODEC])))
# receive a packet to prime RTCP
packet = RtpPacket.parse(load('rtp.bin'))
run(receiver._handle_rtp_packet(packet))
# break connection
run(transport.close())
# give RTCP time to send a report
run(asyncio.sleep(2))
# shutdown
run(receiver.stop())
def test_rtp_and_rtcp(self):
transport, remote = dummy_dtls_transport_pair()
receiver = RTCRtpReceiver('audio', transport)
self.assertEqual(receiver.transport, transport)
receiver._track = RemoteStreamTrack(kind='audio')
run(receiver.receive(RTCRtpParameters(codecs=[PCMU_CODEC])))
# receive RTP
packet = RtpPacket.parse(load('rtp.bin'))
run(receiver._handle_rtp_packet(packet))
# receive RTCP SR
for packet in RtcpPacket.parse(load('rtcp_sr.bin')):
run(receiver._handle_rtcp_packet(packet))
# check stats
report = run(receiver.getStats())
self.assertTrue(isinstance(report, RTCStatsReport))
self.assertEqual(sorted(report.keys()), ['inbound-rtp', 'remote-outbound-rtp'])
# check remote track
frame = run(receiver._track.recv())
self.assertTrue(isinstance(frame, AudioFrame))
# shutdown
run(receiver.stop())
def test_rtp_empty_video_packet(self):
transport, remote = dummy_dtls_transport_pair()
receiver = RTCRtpReceiver('video', transport)
self.assertEqual(receiver.transport, transport)
receiver._track = RemoteStreamTrack(kind='video')
run(receiver.receive(RTCRtpParameters(codecs=[
RTCRtpCodecParameters(name='VP8', clockRate=90000, payloadType=100),
])))
# receive RTP with empty payload
packet = RtpPacket(payload_type=100)
run(receiver._handle_rtp_packet(packet))
# shutdown
run(receiver.stop())
def test_send_rtcp_nack(self):
transport, remote = dummy_dtls_transport_pair()
receiver = RTCRtpReceiver('video', transport)
receiver._ssrc = 1234
receiver._track = RemoteStreamTrack(kind='video')
run(receiver.receive(RTCRtpParameters(codecs=[
RTCRtpCodecParameters(name='VP8', clockRate=90000, payloadType=100),
])))
# send RTCP feedback NACK
run(receiver._send_rtcp_nack(5678, [7654]))
# shutdown
run(receiver.stop())
def test_send_rtcp_pli(self):
transport, remote = dummy_dtls_transport_pair()
receiver = RTCRtpReceiver('video', transport)
receiver._ssrc = 1234
receiver._track = RemoteStreamTrack(kind='video')
run(receiver.receive(RTCRtpParameters(codecs=[
RTCRtpCodecParameters(name='VP8', clockRate=90000, payloadType=100),
])))
# send RTCP feedback PLI
run(receiver._send_rtcp_pli(5678))
# shutdown
run(receiver.stop())
def test_invalid_dtls_transport_state(self):
dtlsTransport = ClosedDtlsTransport()
with self.assertRaises(InvalidStateError):
RTCRtpReceiver('audio', dtlsTransport)
| [
"jeremy.laine@m4x.org"
] | jeremy.laine@m4x.org |
230a04e3f67dbd3427dd3f761e135062140e15f3 | c7573c4fdcc324c6863a94133177a1de06935df1 | /src/plot/label/radial.py | 4423c080081589fde629a55465308826b7a8599e | [] | no_license | WillCS/thesis | adbd8e1b0166851f9f0c3a30e522b9c4f3b80831 | 1aa6522f01a56755cd644a75e41e37ab62709c13 | refs/heads/master | 2023-09-01T23:27:50.090827 | 2021-11-02T10:09:41 | 2021-11-02T10:09:41 | 357,769,380 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | from __future__ import annotations
from typing import Dict, Any, Optional, List, Tuple
import math
import networkx as nx
from common import Clustering
from .label import LabelStrategy, Label
class RadialLabelStrategy(LabelStrategy):
def __init__(self, factor) -> RadialLabelStrategy:
self.factor = factor
def generate_labels(self,
graph: nx.Graph,
node_positions: Dict[Any, Tuple[float, float]],
edges: Optional[List] = None,
clusters: Optional[Clustering] = None
) -> Dict[Any, Label]:
labels = {}
for i, n in enumerate(graph.nodes):
text = f"{n}({clusters.get_cluster_of(n)})"
x, y = node_positions[n]
pos = (x * self.factor, y * self.factor)
angle = math.atan2(y, x) * (180 / math.pi)
labels[n] = Label(text, pos, angle)
return labels
| [
"wcstibbards@gmail.com"
] | wcstibbards@gmail.com |
d3ad5c0fca6c1b070a3e8733bd4774edf53ddb72 | 6b1e395ce591f36b87656cc21174da8a46fdb943 | /individuals/triVecAdvMut.py | 522e875e7707b4bc30b229fb5a878122b2d05183 | [] | no_license | abernatskiy/evs | 867eb9fb68f1f7f066b12d30b05667db9228ec4b | 30c617c7d0d122c95caaadccaf481aa4d32992fd | refs/heads/master | 2021-01-10T14:45:31.282492 | 2018-03-08T06:04:12 | 2018-03-08T06:04:12 | 46,129,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,420 | py | import numpy as np
from trinaryVector import Individual as TriVecIndividual
class Individual(TriVecIndividual):
'''Class for evolutionary individuals described by a vector of
numbers taken from {-1,0,1} of constant length, with advanced
mutation operatot. Constructor takes a dictionary with the
following parameter fields:
length - length of the vector
mutExploration - fraction of density-preserving mutations
mutInsDelRatio - ratio of inserting mutations to deletions
'''
def __init__(self, params):
super(TriVecIndividual, self).__init__(params)
self.changeFrac = self.params['mutExploration']
self.deleteFrac = (1.0 - self.changeFrac)/(self.params['mutInsDelRatio']+1)
self.insertFrac = 1.0 - self.changeFrac - self.deleteFrac
self.values = np.random.random_integers(-1, 1, size=self.params['length'])
def requiredParametersTranslator(self):
t = super(Individual, self).requiredParametersTranslator()
t['toFloat'].update({'mutExploration', 'mutInsDelRatio'})
return t
def insert(self):
space = len(self.values) - np.count_nonzero(self.values)
if space < 1:
return False
pos = np.random.randint(space)
for i in xrange(len(self.values)):
if self.values[i] == 0:
if pos == 0:
self.values[i] = 1 if np.random.random() > 0.5 else -1
return True
else:
pos -= 1
print("Insert: One should not dwell here\n")
def delete(self):
space = np.count_nonzero(self.values)
if space < 1:
return False
pos = np.random.randint(space)
for i in xrange(len(self.values)):
if self.values[i] != 0:
if pos == 0:
self.values[i] = 0
return True
else:
pos -= 1
print("Delete: One should not dwell here\n")
def change(self):
space = np.count_nonzero(self.values)
if space < 1:
return False
pos = np.random.randint(space)
for i in xrange(len(self.values)):
if self.values[i] != 0:
if pos == 0:
self.values[i] = 1 if self.values[i] == -1 else -1
return True
else:
pos -= 1
print("Change: One should not dwell here\n")
def mutate(self):
mutated = False
while not mutated:
randVal = np.random.random()
if randVal < self.changeFrac:
mutated = self.change()
elif randVal < (self.changeFrac + self.insertFrac):
mutated = self.insert()
else:
mutated = self.delete()
if mutated:
self.renewID()
return mutated
| [
"abernats@uvm.edu"
] | abernats@uvm.edu |
db668ec99a3e918fab75689d177f3b571a030a86 | 8ef5a09d76a11c56963f18e6a08474a1a8bafe3c | /leet_code/7. Reverse Integer.py | 79b791271388c6874618159d647c255bde2e2e06 | [] | no_license | roiei/algo | 32c4677649c7666db148f6183fbfbf66c8b1969f | ae8bb8bf4ae4026ccaf1dce323b4098547dd35ec | refs/heads/master | 2022-04-01T19:21:27.768675 | 2022-02-19T06:15:29 | 2022-02-19T06:15:29 | 169,021,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py |
class Solution:
def reverse(self, x: 'int') -> 'int':
if x > float('inf') or x < float('-inf'):
return 0
sign = 1
if x < 0:
sign = -1
xstr = str(x)
if -1 == sign:
xstr = xstr[1:]
xstr = xstr[::-1]
skip_cnt = 0
for ch in xstr:
if ch != '0':
break
skip_cnt += 1
res = xstr[skip_cnt:]
if '' == res:
return 0
if -1 == sign:
res = '-' + res
return int(res)
x = 123
#x = -123
#x = 120
#x = 901000
x = 1534236469 # 0
sol = Solution()
print(sol.reverse(x))
| [
"hyoukjea.son@hyundai.com"
] | hyoukjea.son@hyundai.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.