blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cae575ac11edcb3fd67e9ac513d89d54df1466d3
|
9b66589e00a519aef34e1dc1d5a6f9933a29350d
|
/web-app/myPdajProject/student/views.py
|
a7b6972abb60d4e9537bfc59997416249c75c6e2
|
[] |
no_license
|
ivanIndjic/PDAJ2020
|
50da7f9ddcae68c3d1c125e9d52076c3e0622349
|
d3d2274e13f43f4947adf92f223a50aa4343bbcd
|
refs/heads/main
| 2023-02-12T13:21:04.136564
| 2021-01-03T15:15:34
| 2021-01-03T15:15:34
| 316,608,490
| 0
| 0
| null | 2021-01-03T15:15:35
| 2020-11-27T22:03:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,033
|
py
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import Student
from .serializers import *
# Create your views here.
class StudentAPI(APIView):
def get(self,request):
students = Student.objects.all()
serializer = StudentSerializer(students, many=True)
return Response(serializer.data,status=200)
def post(self,request):
serializer = StudentSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data,status=200)
return Response(serializer.errors, status=400)
def put(self,request,id):
student = Student.objects.get(id=id)
serializer = StudentSerializer(student, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=200)
return Response(serializer.data, status=400)
def delete(self, request, id):
student = Student.objects.get(id=id)
student.delete()
return Response(status=200)
class GradeAPI(APIView):
def get(self,request):
grades = Grade.objects.all()
serializer = GradeSerializer(grades, many=True)
return Response(serializer.data, status=200)
def post(self,request):
serializer = GradeSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=200)
return Response(serializer.errors, status=400)
def put(self,request,id):
grade = Grade.objects.get(id=id)
serializer = GradeSerializer(grade, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=200)
return Response(serializer.errors, status=400)
def delete(self,request,id):
grade = Grade.objects.get(id=id)
grade.delete()
return Response(status=200)
|
[
"ivan@kali.ivan"
] |
ivan@kali.ivan
|
c92425105870b039e770a6a229d77457f47a5146
|
a3ffed2f05cad8879c269f8943d1253e748f187c
|
/DjangoProject1/first_app/urls.py
|
28d2569c474b652f34875f0d5c23c7fd7ed4f41b
|
[
"MIT"
] |
permissive
|
gugabfigueiredo/DjangoBootcamp
|
0c7c1d72d228c2d7fc22a005ee54cfc519343915
|
cfd55a87cc3d8a8e70800e04e1414abdc9a98a07
|
refs/heads/master
| 2021-05-16T09:43:26.628640
| 2017-09-25T16:55:14
| 2017-09-25T16:55:14
| 104,571,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from first_app import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^help/', views.help, name='help')
]
|
[
"gugabfigueiredo@gmail.com"
] |
gugabfigueiredo@gmail.com
|
e51e1b2cb531dc47f108bf6ac05eec546e5a2d54
|
9243063862af8a7def0b2630cf57f909b3e02994
|
/CookieCompApi/users/api/viewsets.py
|
50f8c6c3472a86f196298d56cb1cae5195acfaf7
|
[] |
no_license
|
rodolfocbarroso/projeto-programacao-comercial
|
715d474dd0a47a0b26b6409d123b5cb5cd438861
|
57ac9d5501df58c65442e18392b86b21b96adcbd
|
refs/heads/main
| 2023-02-02T03:11:19.138746
| 2020-12-15T03:05:55
| 2020-12-15T03:05:55
| 313,403,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
from rest_framework.viewsets import ModelViewSet
from django.contrib.auth.models import Group
from .serializers import UserSerializer
class GroupViewSet(ModelViewSet):
queryset = Group.objects.all().order_by('name')
serializer_class = UserSerializer
|
[
"rodolfoc.barroso@hotmail.com"
] |
rodolfoc.barroso@hotmail.com
|
a38cc7faac158cb13452fdfdea3ff56459e19da8
|
5f833550140da00745528ac6b018b51737f5d904
|
/prod/gen-py/demo/dm_service.py
|
65c232db12902bab6be4e1a8bebdb486521ab32f
|
[] |
no_license
|
lajanugen/DM_feb16demo
|
b17eb023aa0c5a7644cf230d354346bb0d43d7b4
|
debad148f83411e1683a964918509112e4355042
|
refs/heads/master
| 2021-01-11T21:05:44.759245
| 2017-01-17T19:11:39
| 2017-01-17T19:11:39
| 79,242,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 6,289
|
py
|
#
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def act(self, s1):
"""
Parameters:
- s1
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def act(self, s1):
"""
Parameters:
- s1
"""
self.send_act(s1)
return self.recv_act()
def send_act(self, s1):
self._oprot.writeMessageBegin('act', TMessageType.CALL, self._seqid)
args = act_args()
args.s1 = s1
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_act(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = act_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "act failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["act"] = Processor.process_act
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_act(self, seqid, iprot, oprot):
args = act_args()
args.read(iprot)
iprot.readMessageEnd()
result = act_result()
result.success = self._handler.act(args.s1)
oprot.writeMessageBegin("act", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class act_args:
"""
Attributes:
- s1
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 's1', None, None, ), # 1
)
def __init__(self, s1=None,):
self.s1 = s1
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.s1 = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('act_args')
if self.s1 is not None:
oprot.writeFieldBegin('s1', TType.STRING, 1)
oprot.writeString(self.s1)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.s1)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class act_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('act_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
[
"llajan@diae1.eecs.umich.edu"
] |
llajan@diae1.eecs.umich.edu
|
b88349d1542353d788525c8182f69c7fc5de5058
|
5bd1f160308a7f692fec346610ddc0de8ae24550
|
/Tree/height.py
|
8f49f01f8df6abc3a87a26c93e002cc18cf5cf9e
|
[] |
no_license
|
sprataps/HackerRank-Data-Structures
|
dec6a2cc7c79733f0f9d92f363ef7e9b27368be1
|
52f586ecba0a1fc8e2bd6e22244a7738d39b2b27
|
refs/heads/master
| 2021-07-04T21:48:03.389995
| 2017-09-25T03:45:38
| 2017-09-25T03:45:38
| 103,877,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 322
|
py
|
class Node:
def __init__(self,info):
self.info = info
self.left = None
self.right = None
// this is a node of the tree , which contains info as data, left , right
'''
def height(root):
if not root:
return -1
return 1+(max(height(root.left),height(root.right)))
|
[
"siddharthpratap92@gmail.com"
] |
siddharthpratap92@gmail.com
|
238cf01519fb73b7e7b01cd4d52adf1b0bae281c
|
356bcb1e1de65ebcb064aab50a4a245fcbda51b6
|
/pyworkflow/tests/em/programs/__init__.py
|
dfd18131ed2fab7fd29f27eb07df12b0f85a491b
|
[] |
no_license
|
EyeSeeTea/scipion-web
|
260d6339b1ccf400d208b395b7ed4db4f01759b4
|
18797bc15f6667adb42a307c411d32d45bab2b7d
|
refs/heads/web-master
| 2020-06-03T09:38:36.251023
| 2017-04-10T13:56:31
| 2017-04-10T13:56:31
| 94,125,511
| 0
| 0
| null | 2017-06-29T14:14:27
| 2017-06-12T18:02:05
|
C++
|
UTF-8
|
Python
| false
| false
| 1,101
|
py
|
# ***************************************************************************
# * Authors: J.M. De la Rosa Trevin (jmdelarosa@cnb.csic.es)
# *
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# * 02111-1307 USA
# *
# * All comments concerning this program package may be sent to the
# * e-mail address 'xmipp@cnb.csic.es'
# ***************************************************************************/
from base import ProgramTest
|
[
"delarosatrevin@gmail.com"
] |
delarosatrevin@gmail.com
|
b41b45264469af30ff43edafc563bf91b0dc79a0
|
1e6118d7cf3c1efe26186e5825093b3933c2aed3
|
/Projects/TipCalculatorForDean.py
|
c75d9d1c47fb5107790212d1126b29c48c530491
|
[] |
no_license
|
Mkurtz424/n00bc0d3r
|
6ce9120761175bafbd3c85923a310dd0609f2c39
|
066736c0580f38624239755e9650fdb16632c6f8
|
refs/heads/master
| 2021-01-10T19:26:43.164522
| 2013-05-05T16:43:24
| 2013-05-05T16:43:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
"""
YO BRO
LET'S COMMENT ON SOME SHIT UP IN HERE.
This is a simple tip calculator!
It asks the end-user for the following:
-Meal Cost
-Desired Tip Percentage
It then gives the user this information:
-Tip Amount
-Total Meal Cost
It also asks if the user would like to
run the program again. If the user says
'Q', then it will not.
"""
print 'Welcome to the EZ-TIP calculator!'
print 'Let us begin...'
raw_input('Press <<enter>> to begin.')
while 1:
mealcost = input('Please enter the meal cost, including tax: $')
percentage = input('Please enter the tip percantage, e.g. "20" or "15": ')
#
tipmultiplyer = (percentage*.01)
tipamount = (tipmultiplyer*mealcost)
tipamountround = round(tipamount,2)
#
totalmeal = mealcost + tipamountround
print '...\n...\n...\nYour tip is %s, and your total meal cost is %s.' % (tipamountround,totalmeal)
#
cont=raw_input('Calculate another meal cost? Hit <<enter>>. Quit? Type \'Q\' and hit <<enter>>')
#
if cont == 'Q': break
|
[
"mkurtz424@gmail.com"
] |
mkurtz424@gmail.com
|
57457c0ab96d2597939e2439ed38bd8bd5a8c0ff
|
320fd1a99fec74d8813e87d0f9f9ba3bcdd1d8da
|
/balancer/exception.py
|
514095af4d66064862060e04c104b288813ae872
|
[] |
no_license
|
yinyangxu/openstack-lbaas
|
1ddaf0111a35e496ba6088018826220c399588da
|
12c26262c77125ce0ae75a15f25b2fcae52308bf
|
refs/heads/master
| 2020-04-07T17:33:29.789038
| 2012-07-18T10:24:20
| 2012-07-18T13:14:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,735
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Balancer base exception handling."""
import webob.exc as exception
class NotFound(exception.HTTPNotFound):
message = 'Resource not found.'
def __init__(self, message=None, **kwargs):
super(NotFound, self).__init__(message)
self.kwargs = kwargs
class DeviceNotFound(NotFound):
message = 'Device not found'
class LoadBalancerNotFound(NotFound):
message = 'LoadBalancer not found'
class ProbeNotFound(NotFound):
message = 'Probe not found'
class StickyNotFound(NotFound):
message = 'Sticky not found'
class ServerNotFound(NotFound):
message = 'Server not found'
class ServerFarmNotFound(NotFound):
message = 'Server Farm not found'
class PredictorNotFound(NotFound):
message = 'Predictor not found'
class VirtualServerNotFound(NotFound):
message = 'Virtual Server not found'
|
[
"yorik.sar@gmail.com"
] |
yorik.sar@gmail.com
|
a1f7239ed88d8a406b87852ca8acdb3d1be0a969
|
bf9305e4da7fc84ad677ccf6e819d2852d53f389
|
/django-testing/django_testing/tests/students/test_courses_api.py
|
b9af0f94da7b388ef4f0966b5f3e02e3d9852fbb
|
[] |
no_license
|
SergueiKozlenko/HomeworksDjango
|
601166c4cab4a5d921db34610907e7beec2d4d80
|
26e20eb52dfb901f4a60376627f10146c733856b
|
refs/heads/master
| 2023-06-01T13:41:27.937068
| 2021-06-05T22:53:40
| 2021-06-05T22:53:40
| 371,978,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,090
|
py
|
import pytest
from django.urls import reverse
from rest_framework.status import HTTP_200_OK, HTTP_201_CREATED, HTTP_204_NO_CONTENT
import random
@pytest.mark.django_db
def test_course(api_client, course_factory, student_factory):
"""Тест получения 1го курса (retrieve-логика)."""
student = student_factory(_quantity=2)
course = course_factory(students=student)
url = reverse('courses-detail', args=[course.id])
resp = api_client.get(url)
assert resp.status_code == HTTP_200_OK
assert resp.json()['id'] == course.id
@pytest.mark.django_db
def test_courses_list(api_client, course_factory, student_factory):
"""Тест получения списка курсов (list-логика)."""
students = student_factory(_quantity=3)
courses = course_factory(students=students, _quantity=5)
expected_ids_set = [course.id for course in courses]
url = reverse('courses-list')
resp = api_client.get(url)
results_ids_set = [result['id'] for result in resp.json()]
assert resp.status_code == HTTP_200_OK
assert len(resp.json()) == 5
assert results_ids_set == expected_ids_set
@pytest.mark.django_db
def test_courses_filter_by_id(api_client, course_factory, student_factory):
"""Тест фильтрации списка курсов по id."""
students = student_factory(_quantity=3)
courses = course_factory(students=students, _quantity=5)
random_id = random.choice(courses).id
url = reverse('courses-list')
resp = api_client.get(url, {'id': random_id})
assert resp.status_code == HTTP_200_OK
assert resp.json()[0]
assert resp.json()[0]['id'] == random_id
@pytest.mark.django_db
def test_courses_filter_by_name(api_client, course_factory, student_factory):
"""Тест фильтрации списка курсов по name."""
students = student_factory(_quantity=3)
courses = course_factory(students=students, _quantity=5)
random_name = random.choice(courses).name
url = reverse('courses-list')
resp = api_client.get(url, {'name': random_name})
assert resp.status_code == HTTP_200_OK
assert resp.json()[0]
assert resp.json()[0]['name'] == random_name
@pytest.mark.django_db
def test_course_create(api_client):
"""Тест успешного создания курса."""
course_payload = {'name': 'test_course_created'}
url = reverse('courses-list')
resp = api_client.post(url, course_payload, format='json')
resp_get = api_client.get(url, {'name': course_payload['name']})
assert resp.status_code == HTTP_201_CREATED
assert resp_get.json()[0]
assert resp_get.json()[0]['name'] == course_payload['name']
@pytest.mark.django_db
def test_course_update(api_client, course_factory, student_factory):
"""Тест успешного обновления курса."""
students = student_factory(_quantity=2)
course_old = course_factory(students=students)
course_new = course_factory(students=students)
url = reverse('courses-detail', args=[course_old.id])
resp = api_client.patch(url, {'name': course_new.name}, format='json')
resp_get = api_client.get(url, {'id': course_old.id})
assert resp.status_code == HTTP_200_OK
assert resp.json()['id'] == course_old.id and resp.json()['name'] == course_new.name
assert resp_get.json()['id'] == course_old.id and resp_get.json()['name'] == course_new.name
@pytest.mark.django_db
def test_course_delete(api_client, course_factory, student_factory):
"""Тест успешного удаления курса."""
students = student_factory(_quantity=3)
courses = course_factory(students=students, _quantity=5)
random_id = random.choice(courses).id
url = reverse('courses-detail', args=[random_id])
url_get = reverse('courses-list')
resp = api_client.delete(url)
resp_get = api_client.get(url_get)
existed_ids = [course['id'] for course in resp_get.json()]
assert resp.status_code == HTTP_204_NO_CONTENT
assert random_id not in existed_ids
|
[
"genesis.biconsulting@gmail.com"
] |
genesis.biconsulting@gmail.com
|
4504a6ef89e70bac99856812b32bedbf3a4c7ba1
|
bdb59e37af0382808c60bae510d046f8b48e3a4c
|
/Code_SocketProgramming/client_TCP.py
|
e05491f4a67465f53a100bf5d4e22aa2f09029b4
|
[] |
no_license
|
yiuj/CSCI353
|
b5045cbf817b40d70667f42f7a57190d077035b1
|
1998eb75ba703ff7b1459d2c0b612ac2280eab26
|
refs/heads/master
| 2020-08-03T05:46:20.209788
| 2019-09-30T01:23:18
| 2019-09-30T01:23:18
| 211,642,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,100
|
py
|
import socket # Import socket module
def getSocket():
s = socket.socket() # Create a socket object
port = 12345
s.connect(('localhost', port)) # Establish connection/handshake
return s
if __name__ == "__main__":
sock = getSocket()
try:
print " You will have your chance to send the data. Let me show you a demo first. "
sock.send("This is the message. It will be capitalized ! !");
print "Received message from server : ", sock.recv(1024)
input = True
while(input):
user_input=str(raw_input('>Please enter string you would like capitalize : '))
print " You have entered : ", user_input
sock.send(user_input)
print " Received message from server : ", sock.recv(1024)
input = str(raw_input('>Do you want to send more message ? (Y/y/N/n/any character) : '))
input = True if input in ['Y', 'y'] else False
except:
print "Something went wrong while connecting to server"
finally:
sock.close()
|
[
"yiuj@usc.edu"
] |
yiuj@usc.edu
|
1feb1388a5a8490f192460e55ed0d5d57a889e48
|
1747f184407824adc03186de9204742bf6d150ce
|
/faiss/__init__.py
|
fa1a6050d3bc10286af8a6a1b298a5f77c63f3f3
|
[
"BSD-2-Clause"
] |
permissive
|
humanfirewall-iot19/server
|
855a6a98f25d7fd13cd2c80f67b7c37df40f6913
|
ccd0834549b09834e8c24d49ca2d3ab761d26d8c
|
refs/heads/master
| 2020-05-20T13:00:30.836029
| 2019-06-29T10:05:12
| 2019-06-29T10:05:12
| 185,585,428
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,423
|
py
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD+Patents license found in the
# LICENSE file in the root directory of this source tree.
#@nolint
# not linting this file because it imports * form swigfaiss, which
# causes a ton of useless warnings.
import numpy as np
import sys
import inspect
import pdb
# we import * so that the symbol X can be accessed as faiss.X
from .swigfaiss import *
__version__ = "%d.%d.%d" % (FAISS_VERSION_MAJOR,
FAISS_VERSION_MINOR,
FAISS_VERSION_PATCH)
##################################################################
# The functions below add or replace some methods for classes
# this is to be able to pass in numpy arrays directly
# The C++ version of the classnames will be suffixed with _c
##################################################################
def replace_method(the_class, name, replacement, ignore_missing=False):
try:
orig_method = getattr(the_class, name)
except AttributeError:
if ignore_missing:
return
raise
if orig_method.__name__ == 'replacement_' + name:
# replacement was done in parent class
return
setattr(the_class, name + '_c', orig_method)
setattr(the_class, name, replacement)
def handle_Clustering():
def replacement_train(self, x, index):
assert x.flags.contiguous
n, d = x.shape
assert d == self.d
self.train_c(n, swig_ptr(x), index)
replace_method(Clustering, 'train', replacement_train)
handle_Clustering()
def handle_Quantizer(the_class):
def replacement_train(self, x):
n, d = x.shape
assert d == self.d
self.train_c(n, swig_ptr(x))
def replacement_compute_codes(self, x):
n, d = x.shape
assert d == self.d
codes = np.empty((n, self.code_size), dtype='uint8')
self.compute_codes_c(swig_ptr(x), swig_ptr(codes), n)
return codes
def replacement_decode(self, codes):
n, cs = codes.shape
assert cs == self.code_size
x = np.empty((n, self.d), dtype='float32')
self.decode_c(swig_ptr(codes), swig_ptr(x), n)
return x
replace_method(the_class, 'train', replacement_train)
replace_method(the_class, 'compute_codes', replacement_compute_codes)
replace_method(the_class, 'decode', replacement_decode)
handle_Quantizer(ProductQuantizer)
handle_Quantizer(ScalarQuantizer)
def handle_Index(the_class):
def replacement_add(self, x):
assert x.flags.contiguous
n, d = x.shape
assert d == self.d
self.add_c(n, swig_ptr(x))
def replacement_add_with_ids(self, x, ids):
n, d = x.shape
assert d == self.d
assert ids.shape == (n, ), 'not same nb of vectors as ids'
self.add_with_ids_c(n, swig_ptr(x), swig_ptr(ids))
def replacement_assign(self, x, k):
n, d = x.shape
assert d == self.d
labels = np.empty((n, k), dtype=np.int64)
self.assign_c(n, swig_ptr(x), swig_ptr(labels), k)
return labels
def replacement_train(self, x):
assert x.flags.contiguous
n, d = x.shape
assert d == self.d
self.train_c(n, swig_ptr(x))
def replacement_search(self, x, k):
n, d = x.shape
assert d == self.d
distances = np.empty((n, k), dtype=np.float32)
labels = np.empty((n, k), dtype=np.int64)
self.search_c(n, swig_ptr(x),
k, swig_ptr(distances),
swig_ptr(labels))
return distances, labels
def replacement_search_and_reconstruct(self, x, k):
n, d = x.shape
assert d == self.d
distances = np.empty((n, k), dtype=np.float32)
labels = np.empty((n, k), dtype=np.int64)
recons = np.empty((n, k, d), dtype=np.float32)
self.search_and_reconstruct_c(n, swig_ptr(x),
k, swig_ptr(distances),
swig_ptr(labels),
swig_ptr(recons))
return distances, labels, recons
def replacement_remove_ids(self, x):
if isinstance(x, IDSelector):
sel = x
else:
assert x.ndim == 1
sel = IDSelectorBatch(x.size, swig_ptr(x))
return self.remove_ids_c(sel)
def replacement_reconstruct(self, key):
x = np.empty(self.d, dtype=np.float32)
self.reconstruct_c(key, swig_ptr(x))
return x
def replacement_reconstruct_n(self, n0, ni):
x = np.empty((ni, self.d), dtype=np.float32)
self.reconstruct_n_c(n0, ni, swig_ptr(x))
return x
def replacement_update_vectors(self, keys, x):
n = keys.size
assert keys.shape == (n, )
assert x.shape == (n, self.d)
self.update_vectors_c(n, swig_ptr(keys), swig_ptr(x))
def replacement_range_search(self, x, thresh):
n, d = x.shape
assert d == self.d
res = RangeSearchResult(n)
self.range_search_c(n, swig_ptr(x), thresh, res)
# get pointers and copy them
lims = rev_swig_ptr(res.lims, n + 1).copy()
nd = int(lims[-1])
D = rev_swig_ptr(res.distances, nd).copy()
I = rev_swig_ptr(res.labels, nd).copy()
return lims, D, I
replace_method(the_class, 'add', replacement_add)
replace_method(the_class, 'add_with_ids', replacement_add_with_ids)
replace_method(the_class, 'assign', replacement_assign)
replace_method(the_class, 'train', replacement_train)
replace_method(the_class, 'search', replacement_search)
replace_method(the_class, 'remove_ids', replacement_remove_ids)
replace_method(the_class, 'reconstruct', replacement_reconstruct)
replace_method(the_class, 'reconstruct_n', replacement_reconstruct_n)
replace_method(the_class, 'range_search', replacement_range_search)
replace_method(the_class, 'update_vectors', replacement_update_vectors,
ignore_missing=True)
replace_method(the_class, 'search_and_reconstruct',
replacement_search_and_reconstruct, ignore_missing=True)
def handle_IndexBinary(the_class):
def replacement_add(self, x):
assert x.flags.contiguous
n, d = x.shape
assert d * 8 == self.d
self.add_c(n, swig_ptr(x))
def replacement_add_with_ids(self, x, ids):
n, d = x.shape
assert d * 8 == self.d
assert ids.shape == (n, ), 'not same nb of vectors as ids'
self.add_with_ids_c(n, swig_ptr(x), swig_ptr(ids))
def replacement_train(self, x):
assert x.flags.contiguous
n, d = x.shape
assert d * 8 == self.d
self.train_c(n, swig_ptr(x))
def replacement_reconstruct(self, key):
x = np.empty(self.d // 8, dtype=np.uint8)
self.reconstruct_c(key, swig_ptr(x))
return x
def replacement_search(self, x, k):
n, d = x.shape
assert d * 8 == self.d
distances = np.empty((n, k), dtype=np.int32)
labels = np.empty((n, k), dtype=np.int64)
self.search_c(n, swig_ptr(x),
k, swig_ptr(distances),
swig_ptr(labels))
return distances, labels
replace_method(the_class, 'add', replacement_add)
replace_method(the_class, 'add_with_ids', replacement_add_with_ids)
replace_method(the_class, 'train', replacement_train)
replace_method(the_class, 'search', replacement_search)
replace_method(the_class, 'reconstruct', replacement_reconstruct)
def handle_VectorTransform(the_class):
def apply_method(self, x):
assert x.flags.contiguous
n, d = x.shape
assert d == self.d_in
y = np.empty((n, self.d_out), dtype=np.float32)
self.apply_noalloc(n, swig_ptr(x), swig_ptr(y))
return y
def replacement_reverse_transform(self, x):
n, d = x.shape
assert d == self.d_out
y = np.empty((n, self.d_in), dtype=np.float32)
self.reverse_transform_c(n, swig_ptr(x), swig_ptr(y))
return y
def replacement_vt_train(self, x):
assert x.flags.contiguous
n, d = x.shape
assert d == self.d_in
self.train_c(n, swig_ptr(x))
replace_method(the_class, 'train', replacement_vt_train)
# apply is reserved in Pyton...
the_class.apply_py = apply_method
replace_method(the_class, 'reverse_transform',
replacement_reverse_transform)
def handle_AutoTuneCriterion(the_class):
def replacement_set_groundtruth(self, D, I):
if D:
assert I.shape == D.shape
self.nq, self.gt_nnn = I.shape
self.set_groundtruth_c(
self.gt_nnn, swig_ptr(D) if D else None, swig_ptr(I))
def replacement_evaluate(self, D, I):
assert I.shape == D.shape
assert I.shape == (self.nq, self.nnn)
return self.evaluate_c(swig_ptr(D), swig_ptr(I))
replace_method(the_class, 'set_groundtruth', replacement_set_groundtruth)
replace_method(the_class, 'evaluate', replacement_evaluate)
def handle_ParameterSpace(the_class):
def replacement_explore(self, index, xq, crit):
assert xq.shape == (crit.nq, index.d)
ops = OperatingPoints()
self.explore_c(index, crit.nq, swig_ptr(xq),
crit, ops)
return ops
replace_method(the_class, 'explore', replacement_explore)
def handle_MatrixStats(the_class):
original_init = the_class.__init__
def replacement_init(self, m):
assert len(m.shape) == 2
original_init(self, m.shape[0], m.shape[1], swig_ptr(m))
the_class.__init__ = replacement_init
handle_MatrixStats(MatrixStats)
this_module = sys.modules[__name__]
for symbol in dir(this_module):
obj = getattr(this_module, symbol)
# print symbol, isinstance(obj, (type, types.ClassType))
if inspect.isclass(obj):
the_class = obj
if issubclass(the_class, Index):
handle_Index(the_class)
if issubclass(the_class, IndexBinary):
handle_IndexBinary(the_class)
if issubclass(the_class, VectorTransform):
handle_VectorTransform(the_class)
if issubclass(the_class, AutoTuneCriterion):
handle_AutoTuneCriterion(the_class)
if issubclass(the_class, ParameterSpace):
handle_ParameterSpace(the_class)
###########################################
# Add Python references to objects
# we do this at the Python class wrapper level.
###########################################
def add_ref_in_constructor(the_class, parameter_no):
# adds a reference to parameter parameter_no in self
# so that that parameter does not get deallocated before self
original_init = the_class.__init__
def replacement_init(self, *args):
original_init(self, *args)
self.referenced_objects = [args[parameter_no]]
def replacement_init_multiple(self, *args):
original_init(self, *args)
pset = parameter_no[len(args)]
self.referenced_objects = [args[no] for no in pset]
if type(parameter_no) == dict:
# a list of parameters to keep, depending on the number of arguments
the_class.__init__ = replacement_init_multiple
else:
the_class.__init__ = replacement_init
def add_ref_in_method(the_class, method_name, parameter_no):
original_method = getattr(the_class, method_name)
def replacement_method(self, *args):
ref = args[parameter_no]
if not hasattr(self, 'referenced_objects'):
self.referenced_objects = [ref]
else:
self.referenced_objects.append(ref)
return original_method(self, *args)
setattr(the_class, method_name, replacement_method)
def add_ref_in_function(function_name, parameter_no):
# assumes the function returns an object
original_function = getattr(this_module, function_name)
def replacement_function(*args):
result = original_function(*args)
ref = args[parameter_no]
result.referenced_objects = [ref]
return result
setattr(this_module, function_name, replacement_function)
add_ref_in_constructor(IndexIVFFlat, 0)
add_ref_in_constructor(IndexIVFFlatDedup, 0)
add_ref_in_constructor(IndexPreTransform, {2: [0, 1], 1: [0]})
add_ref_in_method(IndexPreTransform, 'prepend_transform', 0)
add_ref_in_constructor(IndexIVFPQ, 0)
add_ref_in_constructor(IndexIVFPQR, 0)
add_ref_in_constructor(Index2Layer, 0)
add_ref_in_constructor(Level1Quantizer, 0)
add_ref_in_constructor(IndexIVFScalarQuantizer, 0)
add_ref_in_constructor(IndexIDMap, 0)
add_ref_in_constructor(IndexIDMap2, 0)
add_ref_in_method(IndexShards, 'add_shard', 0)
add_ref_in_method(IndexBinaryShards, 'add_shard', 0)
add_ref_in_constructor(IndexRefineFlat, 0)
add_ref_in_constructor(IndexBinaryIVF, 0)
add_ref_in_constructor(IndexBinaryFromFloat, 0)
add_ref_in_method(IndexReplicas, 'addIndex', 0)
add_ref_in_method(IndexBinaryReplicas, 'addIndex', 0)
# seems really marginal...
# remove_ref_from_method(IndexReplicas, 'removeIndex', 0)
if hasattr(this_module, 'GpuIndexFlat'):
# handle all the GPUResources refs
add_ref_in_function('index_cpu_to_gpu', 0)
add_ref_in_constructor(GpuIndexFlat, 0)
add_ref_in_constructor(GpuIndexFlatIP, 0)
add_ref_in_constructor(GpuIndexFlatL2, 0)
add_ref_in_constructor(GpuIndexIVFFlat, 0)
add_ref_in_constructor(GpuIndexIVFPQ, 0)
add_ref_in_constructor(GpuIndexBinaryFlat, 0)
###########################################
# GPU functions
###########################################
def index_cpu_to_gpu_multiple_py(resources, index, co=None):
"""builds the C++ vectors for the GPU indices and the
resources. Handles the common case where the resources are assigned to
the first len(resources) GPUs"""
vres = GpuResourcesVector()
vdev = IntVector()
for i, res in enumerate(resources):
vdev.push_back(i)
vres.push_back(res)
index = index_cpu_to_gpu_multiple(vres, vdev, index, co)
index.referenced_objects = resources
return index
def index_cpu_to_all_gpus(index, co=None, ngpu=-1):
if ngpu == -1:
ngpu = get_num_gpus()
res = [StandardGpuResources() for i in range(ngpu)]
index2 = index_cpu_to_gpu_multiple_py(res, index, co)
return index2
###########################################
# numpy array / std::vector conversions
###########################################
# mapping from vector names in swigfaiss.swig and the numpy dtype names
vector_name_map = {
'Float': 'float32',
'Byte': 'uint8',
'Char': 'int8',
'Uint64': 'uint64',
'Long': 'int64',
'Int': 'int32',
'Double': 'float64'
}
def vector_to_array(v):
""" convert a C++ vector to a numpy array """
classname = v.__class__.__name__
assert classname.endswith('Vector')
dtype = np.dtype(vector_name_map[classname[:-6]])
a = np.empty(v.size(), dtype=dtype)
if v.size() > 0:
memcpy(swig_ptr(a), v.data(), a.nbytes)
return a
def vector_float_to_array(v):
return vector_to_array(v)
def copy_array_to_vector(a, v):
""" copy a numpy array to a vector """
n, = a.shape
classname = v.__class__.__name__
assert classname.endswith('Vector')
dtype = np.dtype(vector_name_map[classname[:-6]])
assert dtype == a.dtype, (
'cannot copy a %s array to a %s (should be %s)' % (
a.dtype, classname, dtype))
v.resize(n)
if n > 0:
memcpy(v.data(), swig_ptr(a), a.nbytes)
###########################################
# Wrapper for a few functions
###########################################
def kmin(array, k):
"""return k smallest values (and their indices) of the lines of a
float32 array"""
m, n = array.shape
I = np.zeros((m, k), dtype='int64')
D = np.zeros((m, k), dtype='float32')
ha = float_maxheap_array_t()
ha.ids = swig_ptr(I)
ha.val = swig_ptr(D)
ha.nh = m
ha.k = k
ha.heapify()
ha.addn(n, swig_ptr(array))
ha.reorder()
return D, I
def kmax(array, k):
"""return k largest values (and their indices) of the lines of a
float32 array"""
m, n = array.shape
I = np.zeros((m, k), dtype='int64')
D = np.zeros((m, k), dtype='float32')
ha = float_minheap_array_t()
ha.ids = swig_ptr(I)
ha.val = swig_ptr(D)
ha.nh = m
ha.k = k
ha.heapify()
ha.addn(n, swig_ptr(array))
ha.reorder()
return D, I
def rand(n, seed=12345):
res = np.empty(n, dtype='float32')
float_rand(swig_ptr(res), n, seed)
return res
def lrand(n, seed=12345):
res = np.empty(n, dtype='int64')
long_rand(swig_ptr(res), n, seed)
return res
def randn(n, seed=12345):
res = np.empty(n, dtype='float32')
float_randn(swig_ptr(res), n, seed)
return res
def eval_intersection(I1, I2):
""" size of intersection between each line of two result tables"""
n = I1.shape[0]
assert I2.shape[0] == n
k1, k2 = I1.shape[1], I2.shape[1]
ninter = 0
for i in range(n):
ninter += ranklist_intersection_size(
k1, swig_ptr(I1[i]), k2, swig_ptr(I2[i]))
return ninter
def normalize_L2(x):
fvec_renorm_L2(x.shape[1], x.shape[0], swig_ptr(x))
def replacement_map_add(self, keys, vals):
n, = keys.shape
assert (n,) == keys.shape
self.add_c(n, swig_ptr(keys), swig_ptr(vals))
def replacement_map_search_multiple(self, keys):
n, = keys.shape
vals = np.empty(n, dtype='int64')
self.search_multiple_c(n, swig_ptr(keys), swig_ptr(vals))
return vals
replace_method(MapLong2Long, 'add', replacement_map_add)
replace_method(MapLong2Long, 'search_multiple', replacement_map_search_multiple)
###########################################
# Kmeans object
###########################################
class Kmeans:
"""shallow wrapper around the Clustering object. The important method
is train()."""
def __init__(self, d, k, **kwargs):
"""d: input dimension, k: nb of centroids. Additional
parameters are passed on the ClusteringParameters object,
including niter=25, verbose=False, spherical = False
"""
self.d = d
self.k = k
self.cp = ClusteringParameters()
for k, v in kwargs.items():
# if this raises an exception, it means that it is a non-existent field
getattr(self.cp, k)
setattr(self.cp, k, v)
self.centroids = None
def train(self, x):
n, d = x.shape
assert d == self.d
clus = Clustering(d, self.k, self.cp)
if self.cp.spherical:
self.index = IndexFlatIP(d)
else:
self.index = IndexFlatL2(d)
clus.train(x, self.index)
centroids = vector_float_to_array(clus.centroids)
self.centroids = centroids.reshape(self.k, d)
self.obj = vector_float_to_array(clus.obj)
return self.obj[-1]
def assign(self, x):
assert self.centroids is not None, "should train before assigning"
index = IndexFlatL2(self.d)
index.add(self.centroids)
D, I = index.search(x, 1)
return D.ravel(), I.ravel()
# IndexProxy was renamed to IndexReplicas, remap the old name for any old code
# people may have
IndexProxy = IndexReplicas
|
[
"andreafioraldi@gmail.com"
] |
andreafioraldi@gmail.com
|
6b9e4f5dc88728c885f456e770c902959ab99226
|
31d2fadeb43c4634c29b517741cfce60a96fac3c
|
/src/handlers/telegram_only/telegram_examples_dir/inlinekeyboard.py
|
8b4a0d32d2fcc13dcb10ae3dd690e2fb2ecc7773
|
[] |
no_license
|
Ar-Kareem/dynamic_telegram_bot
|
1b443bdfc7f6abdd7d85d30287ad2d07e97a4412
|
a3b0f8771a63f70e95786b528fb61304d93ecac3
|
refs/heads/master
| 2023-06-10T21:01:44.376021
| 2021-07-01T22:07:43
| 2021-07-01T22:07:43
| 329,928,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,001
|
py
|
"""
Basic example for a bot that uses inline keyboards.
"""
import logging
import os
from functools import partial
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update
from telegram.ext import Updater, CommandHandler, CallbackQueryHandler, CallbackContext
from src.core.actions import TelegramBotInitiated
from src.core.pocket import Pocket
logger = logging.getLogger(__name__)
def start(update: Update, context: CallbackContext) -> None:
keyboard = [
[
InlineKeyboardButton("Option 1", callback_data='1'),
InlineKeyboardButton("Option 2", callback_data='2'),
],
[InlineKeyboardButton("Option 3", callback_data='3')],
]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text('Please choose:', reply_markup=reply_markup)
def button(update: Update, context: CallbackContext) -> None:
query = update.callback_query
# CallbackQueries need to be answered, even if no notification to the user is needed
# Some clients may have trouble otherwise. See https://core.telegram.org/bots/api#callbackquery
query.answer()
query.edit_message_text(text=f"Selected option: {query.data}")
def help_command(update: Update, context: CallbackContext) -> None:
update.message.reply_text("Use /start to test this bot.")
def main(updater: Updater):
updater.dispatcher.add_handler(CommandHandler('start', start))
updater.dispatcher.add_handler(CallbackQueryHandler(button))
updater.dispatcher.add_handler(CommandHandler('help', help_command))
def init_bot_handlers(action: TelegramBotInitiated, pocket: Pocket):
main(pocket.telegram_updater)
def init(pocket: Pocket):
filename = os.path.basename(__file__).rstrip('.py')
if pocket.config.getboolean('TELEGRAM EXAMPLES', filename, fallback=False):
pocket.reducer.register_handler(trigger=TelegramBotInitiated,
callback=partial(init_bot_handlers, pocket=pocket))
|
[
"abdulrahman_3000@hotmail.com"
] |
abdulrahman_3000@hotmail.com
|
cfec3790dfcf8355b5573d1f82ff37d5126137b6
|
f29de0f2c4839e266242e57900b1e97488d97a68
|
/has-more-vowels/has-more-vowels/hasmorevowels.py
|
3942f7bf58dc7916bd2f4696b79b3cb6f4255783
|
[] |
no_license
|
Supreethamg/code-challenges
|
5e951ef3826f164e0d97bf55d72a75a1dedf469a
|
b376bcd50a3f45fa30db5ae55e180a46f04d5454
|
refs/heads/master
| 2022-12-25T11:23:01.242408
| 2020-09-30T06:45:22
| 2020-09-30T06:45:22
| 297,492,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 882
|
py
|
"""Does word contain more vowels than non-vowels?
If the word is over half vowels, it should return True:
>>> has_more_vowels("moose")
True
If it's half vowels (or less), it's false:
>>> has_more_vowels("mice")
False
>>> has_more_vowels("graph")
False
Don't consider "y" as a vowel:
>>> has_more_vowels("yay")
False
Uppercase vowels are still vowels:
>>> has_more_vowels("Aal")
True
"""
def has_more_vowels(word):
"""Does word contain more vowels than non-vowels?"""
vowel= 0
non_vowel = 0
for i, v in enumerate(word):
if v.lower() in ['a','e','i','o','u']:
vowel = vowel + 1
else:
non_vowel = non_vowel + 1
return vowel > non_vowel
if __name__ == '__main__':
import doctest
if doctest.testmod().failed == 0:
print("\n*** ALL TESTS PASSED. HOORAY!\n")
|
[
"supreethamg@gmail.com"
] |
supreethamg@gmail.com
|
b0aefdcf2c2db6779f6677518c901550f3e59888
|
8079a6cfca1de1ddfb7d5ac3b215d81cfba36184
|
/count_tags_xml.py
|
f64485c9069f3b660ebf3b731e2d3fff0f6d3f61
|
[] |
no_license
|
tonogeneral/python
|
e764a366fbf126a3bd1a5734396c38084a1e7d3b
|
a4f3bc3848f6c19a26260b57452013ee028a2b42
|
refs/heads/master
| 2023-02-04T12:20:57.369914
| 2020-12-16T16:13:35
| 2020-12-16T16:13:35
| 281,011,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 882
|
py
|
import urllib.request, urllib.parse, urllib.error
import xml.etree.ElementTree as ET
import ssl
#api_key = False
# If you have a Google Places API key, enter it here
# api_key = 'AIzaSy___IDByT70'
# https://developers.google.com/maps/documentation/geocoding/intro
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
while True:
url = input('Enter location: ')
if len(url) < 1: break
print('Retrieving', url)
xml = urllib.request.urlopen(url, context=ctx)
data = xml.read()
arbol = ET.fromstring(data)
lst = arbol.findall('comments/comment')
suma=0
for count in lst:
valor=count.find('count').text
suma=int(valor)+suma
print('Retrieved:',len(data))
print('Count:',len(lst))
print('Sum:',suma)
#print("Count:",tree.findall('name').text)
|
[
"tonogeneral@gmail.com"
] |
tonogeneral@gmail.com
|
6933509e67333ddc4393c9e71a72720bdb5c74d8
|
d8bf68df624e5a50324527baf44ca610340b060d
|
/sniffersapp/projects/models.py
|
7b6880af9a6d233b3aac629f739f0adb8647d23d
|
[
"MIT"
] |
permissive
|
jamesokane/Oneworksite-Application
|
0aadd4661e09c7448b465c967377f8058df82f65
|
1749ffa89430be75394ae0d43905f3dd30a24fc6
|
refs/heads/master
| 2022-12-15T21:13:30.697889
| 2018-11-09T02:55:18
| 2018-11-09T02:55:18
| 157,323,318
| 0
| 0
|
MIT
| 2022-12-08T01:02:11
| 2018-11-13T05:06:01
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,935
|
py
|
import datetime
from django.db import models
from django.conf import settings
from ..connections.models import Company
from shortuuidfield import ShortUUIDField
class Project(models.Model):
status_options = (
('Open', 'Project Open'),
('Closed', 'Project Closed'),
)
created_user = models.ForeignKey(settings.AUTH_USER_MODEL,
blank=True, null=True, on_delete=models.PROTECT)
uuid = ShortUUIDField()
slug = models.CharField(max_length=80, unique=True)
# Company
company = models.ForeignKey(Company, models.SET_NULL, null=True)
# Project Name
project_name = models.CharField(max_length=80, blank=False,)
# Address Info
project_address = models.CharField(max_length=200, blank=True, null=True)
# Project Status
project_status = models.CharField(max_length=20, blank=False, default='Open', choices=status_options)
# Project Start/End Date
project_start_date = models.DateField(auto_now=False, blank=True, null=True)
project_end_date = models.DateField(auto_now=False, blank=True, null=True)
# Additional Info
additional_info = models.TextField(blank=True)
# Created/Updated
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('project_name',)
def __str__(self):
return self.project_name
def get_absolute_url(self):
return reverse('projects:project_new', kwargs={'slug': self.slug})
# Create slug using first 8 characters of uuid
def save(self, **kwargs):
super(Project, self).save()
slug = self.uuid
self.slug = slug[:8]
super(Project, self).save()
@property
def duration(self):
if self.project_end_date:
end = self.project_end_date
else:
end = datetime.date.today()
return (end - self.project_start_date).days
|
[
"james@oneworksite.com"
] |
james@oneworksite.com
|
db22f1613ecfb0ae9b171db68f83c9a8d0f77dca
|
10e8c2d72750386a89ee0cd53511ca6152acc0e2
|
/blog/migrations/0001_initial.py
|
e85eb079c4f8fe5f305a1e57ca409ee0d975959a
|
[] |
no_license
|
goldogob/my-first-blog
|
8fdfe22ee263d1090a783a3781d629e981e3eaca
|
640258fa894d8ec6a5da61f3b5fa54cacc97103b
|
refs/heads/master
| 2020-03-31T05:12:32.499310
| 2018-10-07T09:44:30
| 2018-10-07T09:44:30
| 151,937,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
# Generated by Django 2.1.2 on 2018-10-07 02:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('uthor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"abdinasir_308@hotmail.com"
] |
abdinasir_308@hotmail.com
|
f34189295dea001181d92eeb0fca188de1f693c7
|
0faafb167fb778e7f3a094ade994e74e7b7a8cc4
|
/dataHandlers/accParser_append.py
|
c2f968fe6887cf0aef24ac1ebd28f8338845a7be
|
[
"MIT"
] |
permissive
|
aritraghsh09/morphML_reloaded
|
f5825d9a6124cc1c788ff10128afc10ca9416d45
|
85c3da4dcf43db1bd621bba64d1a5720937ad661
|
refs/heads/master
| 2021-05-02T10:44:55.177097
| 2019-09-03T15:34:45
| 2019-09-03T15:34:45
| 120,798,948
| 0
| 0
|
MIT
| 2019-04-08T20:54:56
| 2018-02-08T18:13:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
####################################
# accParser_append.py
#
# To use this, you must first have run accParser at least once for that particular run folder. This tries to open the old out file and append stuff to it.
# Used when there are separate epochs for a single run.
# !!BE CAREFUL!! -- THIS WORKS ONLY WHEN FILES ARE NAMED USING A FIXED SCHEME AND THIS SCRIPT IS APPLIED ON THOSE FILES IN A CERTAIN ORDER
####################################
import sys
if (len(sys.argv) != 2):
print "Exiting Program....\nUsage: python accParser.py /path/to/screen/output"
dataPath = sys.argv[1] #the first argument is the path to the screen grab of the TF Learn run
dataFile = open(dataPath, 'r')
outFile = open(dataPath[:-6] + 'out.txt', 'a') # !!HIGHLY UNSTABLE LINE!!
outFile_read = open(dataPath[:-6] + 'out.txt', 'r') # !!HIGHLY UNSTABLE LINE!!
#outFile.write("epoch loss acc val_acc\n")
resultLines = dataFile.readlines()
#Getting last epoch run from the previous outfile.
old_out_lines = outFile_read.readlines()
last_out_line = old_out_lines.pop()
ll_words = last_out_line.split()
last_epoch = int(ll_words[0])
for line in resultLines:
if 'val_acc' in line:
words = line.split()
#validation step
if words[-2:-1] != ['iter:']:
print "Something doesn't look right. Skipping an occurence of val_acc"
continue
outFile.write( str(int(words[words.index("epoch:")+1]) + last_epoch) + " ")
outFile.write(words[words.index("loss:")+1] + " ")
outFile.write(words[words.index("acc:")+1] + " ")
outFile.write(words[words.index("val_acc:")+1] + "\n")
dataFile.close()
outFile.close()
outFile_read.close()
|
[
"aritraghsh09@gmail.com"
] |
aritraghsh09@gmail.com
|
567c734c8c30c7425d8a8d2bc60d18af42bda97e
|
9c7e7e4a23e95b8de1869e7111cf25973d019742
|
/pretix_swap/migrations/0001_initial.py
|
3751ed4ab1c547be6734832f854df711ccc7f31a
|
[
"Apache-2.0"
] |
permissive
|
rixx/pretix-swap
|
f8ce05e2346de3cfa4de57316e4c9bbde013ad4f
|
8b796e5cf8372968d3f64ad0a34e954164edd60e
|
refs/heads/main
| 2023-06-21T06:55:04.395432
| 2021-06-28T22:04:26
| 2021-06-28T22:04:26
| 343,238,644
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,399
|
py
|
# Generated by Django 3.0.11 on 2021-03-02 13:57
import django.db.models.deletion
import i18nfield.fields
from django.db import migrations, models
import pretix_swap.models
class Migration(migrations.Migration):
initial = True
dependencies = [
("pretixbase", "0174_merge_20201222_1031"),
]
operations = [
migrations.CreateModel(
name="SwapRequest",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False
),
),
("state", models.CharField(default="r", max_length=1)),
("swap_type", models.CharField(max_length=1)),
("swap_method", models.CharField(default="f", max_length=1)),
("requested", models.DateTimeField(auto_now_add=True)),
("completed", models.DateTimeField(null=True)),
(
"swap_code",
models.CharField(
default=pretix_swap.models.generate_swap_code, max_length=40
),
),
(
"partner",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="pretix_swap.SwapRequest",
),
),
(
"position",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="swap_states",
to="pretixbase.OrderPosition",
),
),
(
"target_item",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="pretixbase.Item",
),
),
(
"target_order",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="cancelation_request",
to="pretixbase.Order",
),
),
],
),
migrations.CreateModel(
name="SwapGroup",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False
),
),
("name", i18nfield.fields.I18nCharField(max_length=255)),
("swap_type", models.CharField(max_length=1)),
(
"event",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="swap_groups",
to="pretixbase.Event",
),
),
(
"left",
models.ManyToManyField(
related_name="_swapgroup_left_+", to="pretixbase.Item"
),
),
(
"right",
models.ManyToManyField(
related_name="_swapgroup_right_+", to="pretixbase.Item"
),
),
],
),
migrations.CreateModel(
name="SwapApproval",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False
),
),
("approved_for_cancelation_request", models.BooleanField(default=True)),
(
"order",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="swap_approval",
to="pretixbase.Order",
),
),
],
),
]
|
[
"r@rixx.de"
] |
r@rixx.de
|
5e8be2330606ad2f82c90f8a283f6a92921edf3d
|
eff48acf9644e41b4b8739ad3f40eda8a2821a81
|
/qz_day14/qz_day14_1_内置函数.py
|
8f212ba7041a9490092b7893b7dfab021991cc2a
|
[] |
no_license
|
renhang0214/learn_python
|
11e082d127605173de4208b9f26b12f6bbbfa05d
|
fac6662c1ca1da1547963d7f75d6152e7a562775
|
refs/heads/master
| 2021-01-01T19:13:01.505427
| 2017-07-27T16:11:35
| 2017-07-27T16:11:35
| 98,537,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 692
|
py
|
# /usr/local/bin/python
# -*- coding: utf-8 -*-
# Author: Ren Hang
# 内置函数
# __import__()用于导入模块
# getattr 用于寻找模块的指定对象
# a = __import__(‘b’) # b为模块名,b是字符串 ==>> 导入模块b并重新命名为a
# c = getattr(a,’d’) # d为模块中指定对象 ==>> 找到模块中命名为d的对象
# d() ==>> 执行d
# getattr(a,’b’, c) # 从a模块中导入b。c参数可不写表示找不到报错;c为None表示找不到不报错,返回None。
# hasattr(a,’b’) # 判断a中b模块是否存在。
# setattr(a,’b’, c) # 在内存中我模块a创建b=c。
# delattr(a,’b’) # 在内存中删除模块a中的b。
|
[
"297096995@qq.com"
] |
297096995@qq.com
|
e5e948b4178f1f925deb48c1204a54d6f6f323c3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03088/s550393525.py
|
d662712473fa4b56d2c4e3a870ac0937453f7bfe
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
N = int(input())
dp = [[0]*64 for i in range(N-2)]
# 3進数で管理
mod = 10**9+7
d = dict()
d2 = dict()
d2[0] = 'A'
d2[1] = 'G'
d2[2] = 'C'
d2[3] = 'T'
for i in range(64):
k = i
ret = ''
for j in range(3):
ret += d2[k % 4]
k //= 4
d[i] = ret
for i in range(64):
if d[i] == 'AGC' or d[i] == 'GAC' or d[i] == "ACG":
continue
else:
dp[0][i] = 1
for i in range(N-3):
for j in range(64):
if dp[i][j] == 0:
continue
tmp = j // 4
for k in range(4):
idx = tmp + k * 16
if d[idx] == "AGC" or d[idx] == 'ACG' or d[idx] == "GAC" or (d[j][0] == 'A' and d[j][1] == 'G' and k == 2) or (d[j][0] == 'A' and d[j][2] == 'G' and k == 2):
#print(d[j] + d2[k])
continue
dp[i+1][idx] += dp[i][j]
print(sum(dp[-1]) % mod)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
299d57b3c36a34de2b7b36fa4962f0e792851130
|
2c0c50a3de7eecf4baabdcfd80589d31f99ee653
|
/third-party/meme_4.9.0/scripts/glam2scan2html
|
62110490e012d8a2fbec49b9b7268eb75af2d0ae
|
[] |
no_license
|
xfLee/denovochipseq
|
0f4669b730c8c009041c4055f6839b3b2bd01c3c
|
042988f447438eb725170b62d616f27396339795
|
refs/heads/master
| 2021-01-21T23:45:42.909409
| 2015-07-27T00:14:01
| 2015-07-27T00:14:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,040
|
#!/opt/python27/bin/python
# Read GLAM2SCAN output: write an HTML version of it
import fileinput
version = ''
commandline = ''
alignments = []
state = 0
for line in fileinput.input():
if state == 0:
if line.startswith('Version'):
version = line
elif line.find('glam2scan') != -1:
commandline = line
state += 1
elif state == 1:
fields = line.split()
if len(fields) == 6:
alignments.append(fields)
# print the HTML header:
print '<html>'
print '<head>'
print '<title>GLAM2SCAN</title>'
print '<style type="text/css">'
print 'body {background: #D5F0FF}'
print 'th {text-align: left}'
print '</style>'
print '</head>'
print '<body>'
print '<h1>GLAM2SCAN</h1>'
print '<p style="font-family: monospace">', version, '<br><pre>', commandline, '</pre></p>'
print '<p>If you use this program in your research, please cite:<b> \
MC Frith, NFW Saunders, B Kobe, TL Bailey, "Discovering sequence motifs with arbitrary insertions and deletions", PLoS Computational Biology, <b>4</b>(5):e1000071, 2008.\
</b></p>'
print '<table>'
print '<tr>'
print '<th style="padding-right: 1em">NAME</th>'
print '<th style="padding-right: 1em">START</th>'
print '<th style="text-align: center">SITE</th>'
print '<th style="padding-left: 1em">END</th>'
print '<th style="padding-left: 1em">STRAND</th>'
print '<th style="padding-left: 1em">SCORE</th>'
print '</tr>'
print '<tbody>'
for row in alignments:
print '<tr>'
print '<td style="padding-right: 1em">', row[0], '</td>'
print '<td style="padding-right: 1em;text-align: right">', row[1], '</td>'
print '<td style="text-align: center;font-family: monospace">', row[2], '</td>'
print '<td style="padding-left: 1em;text-align: right">', row[3], '</td>'
print '<td style="padding-left: 1em;text-align: center">', row[4], '</td>'
print '<td style="padding-left: 1em">', row[5], '</td>'
print '</tr>'
# close the HTML:
print '</tbody>'
print '</table>'
print '</body>'
print '</html>'
|
[
"yuhaow.thu@gmail.com"
] |
yuhaow.thu@gmail.com
|
|
5cfaab7eb46ff501a77a9d3785b4645591d3c17b
|
14a1e119756bc032b8d0d60a863823c3a4fa3b25
|
/CenterNet3D.py
|
9a7d478818d59361db5c4f28efca8870d9d8d4dd
|
[
"MIT"
] |
permissive
|
DylanHsu/CenterNet-tensorflow
|
093c2ef4c66bb3982fa4224faef1ea10fe144d38
|
106f6415c412dbab94fcfb72ed5d57b0aeaeb5e1
|
refs/heads/master
| 2020-09-24T03:56:06.804284
| 2020-04-06T15:28:25
| 2020-04-06T15:28:25
| 225,656,155
| 0
| 0
|
MIT
| 2019-12-03T15:41:14
| 2019-12-03T15:41:13
| null |
UTF-8
|
Python
| false
| false
| 19,692
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import sys
import os
class CenterNet3D:
def __init__(self, config, data_provider):
assert config['mode'] in ['train', 'test']
assert config['data_format'] in ['channels_first', 'channels_last']
self.config = config
self.data_provider = data_provider
self.input_size = config['input_size']
#if config['data_format'] == 'channels_last':
# self.data_shape = [self.input_size, self.input_size, 3]
#else:
# self.data_shape = [3, self.input_size, self.input_size]
self.data_shape = [self.input_size, self_input_size, self.input_size]
self.num_classes = config['num_classes']
self.weight_decay = config['weight_decay']
self.prob = 1. - config['keep_prob']
self.data_format = config['data_format']
self.mode = config['mode']
self.batch_size = config['batch_size'] if config['mode'] == 'train' else 1
if self.mode == 'train':
self.num_train = data_provider['num_train']
self.num_val = data_provider['num_val']
self.train_generator = data_provider['train_generator']
self.train_initializer, self.train_iterator = self.train_generator
if data_provider['val_generator'] is not None:
self.val_generator = data_provider['val_generator']
self.val_initializer, self.val_iterator = self.val_generator
else:
self.score_threshold = config['score_threshold']
self.top_k_results_output = config['top_k_results_output']
self.global_step = tf.get_variable(name='global_step', initializer=tf.constant(0), trainable=False)
self.is_training = True
self._define_inputs()
self._build_graph()
self._create_saver()
if self.mode == 'train':
self._create_summary()
self._init_session()
def _define_inputs(self):
shape = [self.batch_size]
shape.extend(self.data_shape)
mean = tf.convert_to_tensor([0.485, 0.456, 0.406], dtype=tf.float32)
std = tf.convert_to_tensor([0.229, 0.224, 0.225], dtype=tf.float32)
if self.data_format == 'channels_last':
mean = tf.reshape(mean, [1, 1, 1, 3])
std = tf.reshape(std, [1, 1, 1, 3])
else:
mean = tf.reshape(mean, [1, 3, 1, 1])
std = tf.reshape(std, [1, 3, 1, 1])
if self.mode == 'train':
self.images, self.ground_truth = self.train_iterator.get_next()
self.images.set_shape(shape)
self.images = (self.images / 255. - mean) / std
else:
self.images = tf.placeholder(tf.float32, shape, name='images')
self.images = (self.images / 255. - mean) / std
self.ground_truth = tf.placeholder(tf.float32, [self.batch_size, None, 5], name='labels')
self.lr = tf.placeholder(dtype=tf.float32, shape=[], name='lr')
def _build_graph(self):
with tf.variable_scope('backbone'):
conv = self._conv_bn_activation(
bottom=self.images,
filters=16,
kernel_size=7,
strides=1,
)
conv = self._conv_bn_activation(
bottom=conv,
filters=16,
kernel_size=3,
strides=1,
)
conv = self._conv_bn_activation(
bottom=conv,
filters=32,
kernel_size=3,
strides=2,
)
dla_stage3 = self._dla_generator(conv, 64, 1, self._basic_block)
dla_stage3 = self._max_pooling(dla_stage3, 2, 2)
dla_stage4 = self._dla_generator(dla_stage3, 128, 2, self._basic_block)
residual = self._conv_bn_activation(dla_stage3, 128, 1, 1)
residual = self._avg_pooling(residual, 2, 2)
dla_stage4 = self._max_pooling(dla_stage4, 2, 2)
dla_stage4 = dla_stage4 + residual
dla_stage5 = self._dla_generator(dla_stage4, 256, 2, self._basic_block)
residual = self._conv_bn_activation(dla_stage4, 256, 1, 1)
residual = self._avg_pooling(residual, 2, 2)
dla_stage5 = self._max_pooling(dla_stage5, 2, 2)
dla_stage5 = dla_stage5 + residual
dla_stage6 = self._dla_generator(dla_stage5, 512, 1, self._basic_block)
residual = self._conv_bn_activation(dla_stage5, 512, 1, 1)
residual = self._avg_pooling(residual, 2, 2)
dla_stage6 = self._max_pooling(dla_stage6, 2, 2)
dla_stage6 = dla_stage6 + residual
with tf.variable_scope('upsampling'):
dla_stage6 = self._conv_bn_activation(dla_stage6, 256, 1, 1)
dla_stage6_5 = self._dconv_bn_activation(dla_stage6, 256, 4, 2)
dla_stage6_4 = self._dconv_bn_activation(dla_stage6_5, 256, 4, 2)
dla_stage6_3 = self._dconv_bn_activation(dla_stage6_4, 256, 4, 2)
dla_stage5 = self._conv_bn_activation(dla_stage5, 256, 1, 1)
dla_stage5_4 = self._conv_bn_activation(dla_stage5+dla_stage6_5, 256, 3, 1)
dla_stage5_4 = self._dconv_bn_activation(dla_stage5_4, 256, 4, 2)
dla_stage5_3 = self._dconv_bn_activation(dla_stage5_4, 256, 4, 2)
dla_stage4 = self._conv_bn_activation(dla_stage4, 256, 1, 1)
dla_stage4_3 = self._conv_bn_activation(dla_stage4+dla_stage5_4+dla_stage6_4, 256, 3, 1)
dla_stage4_3 = self._dconv_bn_activation(dla_stage4_3, 256, 4, 2)
features = self._conv_bn_activation(dla_stage6_3+dla_stage5_3+dla_stage4_3, 256, 3, 1)
features = self._conv_bn_activation(features, 256, 1, 1)
stride = 4.0
with tf.variable_scope('center_detector'):
keypoints = self._conv_bn_activation(features, self.num_classes, 3, 1, None)
offset = self._conv_bn_activation(features, 2, 3, 1, None)
size = self._conv_bn_activation(features, 2, 3, 1, None)
if self.data_format == 'channels_first':
keypoints = tf.transpose(keypoints, [0, 2, 3, 1])
offset = tf.transpose(offset, [0, 2, 3, 1])
size = tf.transpose(size, [0, 2, 3, 1])
pshape = [tf.shape(offset)[1], tf.shape(offset)[2]]
h = tf.range(0., tf.cast(pshape[0], tf.float32), dtype=tf.float32)
w = tf.range(0., tf.cast(pshape[1], tf.float32), dtype=tf.float32)
[meshgrid_x, meshgrid_y] = tf.meshgrid(w, h)
if self.mode == 'train':
total_loss = []
for i in range(self.batch_size):
loss = self._compute_one_image_loss(keypoints[i, ...], offset[i, ...], size[i, ...],
self.ground_truth[i, ...], meshgrid_y, meshgrid_x,
stride, pshape)
total_loss.append(loss)
self.loss = tf.reduce_mean(total_loss) + self.weight_decay * tf.add_n(
[tf.nn.l2_loss(var) for var in tf.trainable_variables()])
optimizer = tf.train.AdamOptimizer(self.lr)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = optimizer.minimize(self.loss, global_step=self.global_step)
self.train_op = tf.group([update_ops, train_op])
else:
keypoints = tf.sigmoid(keypoints)
meshgrid_y = tf.expand_dims(meshgrid_y, axis=-1)
meshgrid_x = tf.expand_dims(meshgrid_x, axis=-1)
center = tf.concat([meshgrid_y, meshgrid_x], axis=-1)
category = tf.expand_dims(tf.squeeze(tf.argmax(keypoints, axis=-1, output_type=tf.int32)), axis=-1)
meshgrid_xyz = tf.concat([tf.zeros_like(category), tf.cast(center, tf.int32), category], axis=-1)
keypoints = tf.gather_nd(keypoints, meshgrid_xyz)
keypoints = tf.expand_dims(keypoints, axis=0)
keypoints = tf.expand_dims(keypoints, axis=-1)
keypoints_peak = self._max_pooling(keypoints, 3, 1)
keypoints_mask = tf.cast(tf.equal(keypoints, keypoints_peak), tf.float32)
keypoints = keypoints * keypoints_mask
scores = tf.reshape(keypoints, [-1])
class_id = tf.reshape(category, [-1])
bbox_yx = tf.reshape(center+offset, [-1, 2])
bbox_hw = tf.reshape(size, [-1, 2])
score_mask = scores > self.score_threshold
scores = tf.boolean_mask(scores, score_mask)
class_id = tf.boolean_mask(class_id, score_mask)
bbox_yx = tf.boolean_mask(bbox_yx, score_mask)
bbox_hw = tf.boolean_mask(bbox_hw, score_mask)
bbox = tf.concat([bbox_yx-bbox_hw/2., bbox_yx+bbox_hw/2.], axis=-1) * stride
num_select = tf.cond(tf.shape(scores)[0] > self.top_k_results_output, lambda: self.top_k_results_output, lambda: tf.shape(scores)[0])
select_scores, select_indices = tf.nn.top_k(scores, num_select)
select_class_id = tf.gather(class_id, select_indices)
select_bbox = tf.gather(bbox, select_indices)
self.detection_pred = [select_scores, select_bbox, select_class_id]
def _compute_one_image_loss(self, keypoints, offset, size, ground_truth, meshgrid_y, meshgrid_x,
stride, pshape):
slice_index = tf.argmin(ground_truth, axis=0)[0]
ground_truth = tf.gather(ground_truth, tf.range(0, slice_index, dtype=tf.int64))
ngbbox_y = ground_truth[..., 0] / stride
ngbbox_x = ground_truth[..., 1] / stride
ngbbox_h = ground_truth[..., 2] / stride
ngbbox_w = ground_truth[..., 3] / stride
class_id = tf.cast(ground_truth[..., 4], dtype=tf.int32)
ngbbox_yx = ground_truth[..., 0:2] / stride
ngbbox_yx_round = tf.floor(ngbbox_yx)
offset_gt = ngbbox_yx - ngbbox_yx_round
size_gt = ground_truth[..., 2:4] / stride
ngbbox_yx_round_int = tf.cast(ngbbox_yx_round, tf.int64)
keypoints_loss = self._keypoints_loss(keypoints, ngbbox_yx_round_int, ngbbox_y, ngbbox_x, ngbbox_h,
ngbbox_w, class_id, meshgrid_y, meshgrid_x, pshape)
offset = tf.gather_nd(offset, ngbbox_yx_round_int)
size = tf.gather_nd(size, ngbbox_yx_round_int)
offset_loss = tf.reduce_mean(tf.abs(offset_gt - offset))
size_loss = tf.reduce_mean(tf.abs(size_gt - size))
total_loss = keypoints_loss + 0.1*size_loss + offset_loss
return total_loss
def _keypoints_loss(self, keypoints, gbbox_yx, gbbox_y, gbbox_x, gbbox_h, gbbox_w,
classid, meshgrid_y, meshgrid_x, pshape):
sigma = self._gaussian_radius(gbbox_h, gbbox_w, 0.7)
gbbox_y = tf.reshape(gbbox_y, [-1, 1, 1])
gbbox_x = tf.reshape(gbbox_x, [-1, 1, 1])
sigma = tf.reshape(sigma, [-1, 1, 1])
num_g = tf.shape(gbbox_y)[0]
meshgrid_y = tf.expand_dims(meshgrid_y, 0)
meshgrid_y = tf.tile(meshgrid_y, [num_g, 1, 1])
meshgrid_x = tf.expand_dims(meshgrid_x, 0)
meshgrid_x = tf.tile(meshgrid_x, [num_g, 1, 1])
keyp_penalty_reduce = tf.exp(-((gbbox_y-meshgrid_y)**2 + (gbbox_x-meshgrid_x)**2)/(2*sigma**2))
zero_like_keyp = tf.expand_dims(tf.zeros(pshape, dtype=tf.float32), axis=-1)
reduction = []
gt_keypoints = []
for i in range(self.num_classes):
exist_i = tf.equal(classid, i)
reduce_i = tf.boolean_mask(keyp_penalty_reduce, exist_i, axis=0)
reduce_i = tf.cond(
tf.equal(tf.shape(reduce_i)[0], 0),
lambda: zero_like_keyp,
lambda: tf.expand_dims(tf.reduce_max(reduce_i, axis=0), axis=-1)
)
reduction.append(reduce_i)
gbbox_yx_i = tf.boolean_mask(gbbox_yx, exist_i)
gt_keypoints_i = tf.cond(
tf.equal(tf.shape(gbbox_yx_i)[0], 0),
lambda: zero_like_keyp,
lambda: tf.expand_dims(tf.sparse.to_dense(tf.sparse.SparseTensor(gbbox_yx_i, tf.ones_like(gbbox_yx_i[..., 0], tf.float32), dense_shape=pshape), validate_indices=False),
axis=-1)
)
gt_keypoints.append(gt_keypoints_i)
reduction = tf.concat(reduction, axis=-1)
gt_keypoints = tf.concat(gt_keypoints, axis=-1)
keypoints_pos_loss = -tf.pow(1.-tf.sigmoid(keypoints), 2.) * tf.log_sigmoid(keypoints) * gt_keypoints
keypoints_neg_loss = -tf.pow(1.-reduction, 4) * tf.pow(tf.sigmoid(keypoints), 2.) * (-keypoints+tf.log_sigmoid(keypoints)) * (1.-gt_keypoints)
keypoints_loss = tf.reduce_sum(keypoints_pos_loss) / tf.cast(num_g, tf.float32) + tf.reduce_sum(keypoints_neg_loss) / tf.cast(num_g, tf.float32)
return keypoints_loss
# from cornernet
def _gaussian_radius(self, height, width, min_overlap=0.7):
a1 = 1.
b1 = (height + width)
c1 = width * height * (1. - min_overlap) / (1. + min_overlap)
sq1 = tf.sqrt(b1 ** 2. - 4. * a1 * c1)
r1 = (b1 + sq1) / 2.
a2 = 4.
b2 = 2. * (height + width)
c2 = (1. - min_overlap) * width * height
sq2 = tf.sqrt(b2 ** 2. - 4. * a2 * c2)
r2 = (b2 + sq2) / 2.
a3 = 4. * min_overlap
b3 = -2. * min_overlap * (height + width)
c3 = (min_overlap - 1.) * width * height
sq3 = tf.sqrt(b3 ** 2. - 4. * a3 * c3)
r3 = (b3 + sq3) / 2.
return tf.reduce_min([r1, r2, r3])
def _init_session(self):
self.sess = tf.InteractiveSession()
self.sess.run(tf.global_variables_initializer())
if self.mode == 'train':
self.sess.run(self.train_initializer)
def _create_saver(self):
weights = tf.trainable_variables('backone')
self.pretrained_saver = tf.train.Saver(weights)
self.saver = tf.train.Saver()
self.best_saver = tf.train.Saver()
def _create_summary(self):
with tf.variable_scope('summaries'):
tf.summary.scalar('loss', self.loss)
self.summary_op = tf.summary.merge_all()
def train_one_epoch(self, lr):
self.is_training = True
self.sess.run(self.train_initializer)
mean_loss = []
num_iters = self.num_train // self.batch_size
for i in range(num_iters):
_, loss = self.sess.run([self.train_op, self.loss], feed_dict={self.lr: lr})
sys.stdout.write('\r>> ' + 'iters '+str(i+1)+str('/')+str(num_iters)+' loss '+str(loss))
sys.stdout.flush()
mean_loss.append(loss)
sys.stdout.write('\n')
mean_loss = np.mean(mean_loss)
return mean_loss
def test_one_image(self, images):
self.is_training = False
pred = self.sess.run(self.detection_pred, feed_dict={self.images: images})
return pred
def save_weight(self, mode, path):
assert (mode in ['latest', 'best'])
if mode == 'latest':
saver = self.saver
else:
saver = self.best_saver
if not tf.gfile.Exists(os.path.dirname(path)):
tf.gfile.MakeDirs(os.path.dirname(path))
print(os.path.dirname(path), 'does not exist, create it done')
saver.save(self.sess, path, global_step=self.global_step)
print('save', mode, 'model in', path, 'successfully')
def load_weight(self, path):
self.saver.restore(self.sess, path)
print('load weight', path, 'successfully')
def load_pretrained_weight(self, path):
self.pretrained_saver.restore(self.sess, path)
print('load pretrained weight', path, 'successfully')
def _bn(self, bottom):
bn = tf.layers.batch_normalization(
inputs=bottom,
axis=3 if self.data_format == 'channels_last' else 1,
training=self.is_training
)
return bn
def _conv_bn_activation(self, bottom, filters, kernel_size, strides, activation=tf.nn.relu):
conv = tf.layers.conv3d(
inputs=bottom,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
data_format=self.data_format
)
bn = self._bn(conv)
if activation is not None:
return activation(bn)
else:
return bn
def _dconv_bn_activation(self, bottom, filters, kernel_size, strides, activation=tf.nn.relu):
conv = tf.layers.conv3d_transpose(
inputs=bottom,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
data_format=self.data_format,
)
bn = self._bn(conv)
if activation is not None:
bn = activation(bn)
return bn
def _separable_conv_layer(self, bottom, filters, kernel_size, strides, activation=tf.nn.relu):
conv = tf.layers.separable_conv2d(
inputs=bottom,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
data_format=self.data_format,
use_bias=False,
)
bn = self._bn(conv)
if activation is not None:
bn = activation(bn)
return bn
def _basic_block(self, bottom, filters):
conv = self._conv_bn_activation(bottom, filters, 3, 1)
conv = self._conv_bn_activation(conv, filters, 3, 1)
axis = 3 if self.data_format == 'channels_last' else 1
input_channels = tf.shape(bottom)[axis]
shutcut = tf.cond(
tf.equal(input_channels, filters),
lambda: bottom,
lambda: self._conv_bn_activation(bottom, filters, 1, 1)
)
return conv + shutcut
def _dla_generator(self, bottom, filters, levels, stack_block_fn):
if levels == 1:
block1 = stack_block_fn(bottom, filters)
block2 = stack_block_fn(block1, filters)
aggregation = block1 + block2
aggregation = self._conv_bn_activation(aggregation, filters, 3, 1)
else:
block1 = self._dla_generator(bottom, filters, levels-1, stack_block_fn)
block2 = self._dla_generator(block1, filters, levels-1, stack_block_fn)
aggregation = block1 + block2
aggregation = self._conv_bn_activation(aggregation, filters, 3, 1)
return aggregation
def _max_pooling(self, bottom, pool_size, strides, name=None):
return tf.layers.max_pooling3d(
inputs=bottom,
pool_size=pool_size,
strides=strides,
padding='same',
data_format=self.data_format,
name=name
)
def _avg_pooling(self, bottom, pool_size, strides, name=None):
return tf.layers.average_pooling3d(
inputs=bottom,
pool_size=pool_size,
strides=strides,
padding='same',
data_format=self.data_format,
name=name
)
def _dropout(self, bottom, name):
return tf.layers.dropout(
inputs=bottom,
rate=self.prob,
training=self.is_training,
name=name
)
|
[
"hsud3@mskcc.org"
] |
hsud3@mskcc.org
|
85163a8a79493073b5ada17fabb8ef264d431e27
|
06f52df1993d2a856663863aa7bd0fc63e25b10e
|
/bot.py
|
0d1f05504ca357c2cd8d55e7a39731e8199e226b
|
[
"MIT"
] |
permissive
|
nullhandler/Android-Bot
|
2695d27b01fbecd1204deb576fd7e8fcb37bfc78
|
1864c57d551f5d7629857d9d4b3491e6b13339aa
|
refs/heads/master
| 2023-03-09T19:39:20.886029
| 2018-11-12T19:48:07
| 2018-11-12T19:48:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,863
|
py
|
# -*- coding: utf-8 -*-
import os
import telegram
from telegram.error import NetworkError, Unauthorized
from time import sleep
from telegram.ext import CommandHandler, MessageHandler, Filters
from telegram.ext import Updater
myToken = os.environ['TELEGRAM_TOKEN']
# Add the commands and reply commands in the commandDict with the following format
# "command":"reply message"
commandDict={
"docs":"Android Developer Docs: https://developer.android.com"
}
warnDict={
"rom":"This is an APP Development Group. Not ROM Development Group!"
}
def main():
updater = Updater(token=myToken)
dispatcher = updater.dispatcher
warn_handler = CommandHandler('warn', warn, pass_args=True)
res_handler = CommandHandler('res', res, pass_args=True)
msg_handler = MessageHandler(Filters.status_update.new_chat_members,msg)
dispatcher.add_handler(msg_handler)
dispatcher.add_handler(res_handler)
dispatcher.add_handler(warn_handler)
updater.start_polling()
print("Started...")
def res(bot, update, args):
arg = args[0].lower()
print(arg)
if(arg in commandDict):
if(update.message.reply_to_message != None):
update.message.reply_to_message.reply_text(commandDict[arg])
else:
update.message.reply_text(commandDict[arg])
def msg(bot, update):
# Welcome the member with the greet message
update.message.reply_text("Welcome to the Android App Development Group! Ask any questions *about App Development* in the group , and one of our members will try to help you :)")
def warn(bot, update, args):
arg = args[0].lower()
if(arg in warnDict):
if(update.message.reply_to_message != None):
update.message.reply_to_message.reply_text(warnDict[arg])
else:
update.message.reply_text(warnDict[arg])
if __name__ == '__main__':
main()
|
[
"peratchiselvank@gmail.com"
] |
peratchiselvank@gmail.com
|
45b40277437dd06c433bf68ccd3ed8437144d618
|
4fb10fd28bf10e6931252b519c09af6ff257f6f0
|
/tests/test_rotate_array.py
|
55d982714d44da198c7a4037868e8fa33ed3d022
|
[] |
no_license
|
bliaw/algorithms_python
|
42b00ac668e4050c015cc1c9e34fff628c776b7a
|
ad27231c2c30a35d55f75e60f745288230abf3de
|
refs/heads/master
| 2020-08-17T06:48:42.605995
| 2019-10-16T21:59:07
| 2019-10-16T21:59:07
| 215,627,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
"""Test rotate_array."""
from hypothesis import assume
from hypothesis import given
from hypothesis import strategies as st
import pytest
from algorithms.rotate_array import RotateArray
@pytest.mark.parametrize('input, input_k, expected',
[([1, 2], 11, [2, 1]),
([-1, -100, 3, 99], 0, [-1, -100, 3, 99]),
([-1, -100, 3, 99], 2, [3, 99, -1, -100]),
([1, 2, 3, 4, 5, 6, 7], 3, [5, 6, 7, 1, 2, 3, 4]),
([1, 0], 1, [0, 1])
])
def test_rotate_array(input, input_k, expected):
nums = input
RotateArray.rotate(nums, input_k)
assert nums == expected
@given(gen_input=st.lists(st.integers()), gen_input_k=st.integers(max_value=10000))
def test_rotate_array_gen(gen_input, gen_input_k):
assume (gen_input_k < len(gen_input))
# Calculate expected results.
gen_input_copy = gen_input.copy() # ops are in-place
length = len(gen_input)
# Avoid mod by 0
if length > 0:
expected = [gen_input_copy[(idx - gen_input_k) % length] for idx in range(length)]
else:
expected = gen_input_copy
nums = gen_input
RotateArray.rotate(nums, gen_input_k)
assert nums == expected
|
[
"aiden@aidenlaw.com"
] |
aiden@aidenlaw.com
|
2a5d0a8638430e8dd2add02f4ca10ea6642afd69
|
33be4ac2c05477530b326decf6883a6f8392db32
|
/__tests__/test_table.py
|
2fbdacd91e1da9b2adbc76311c09444f29c79ab3
|
[] |
no_license
|
leo-ware/logic
|
bbffd30ec7c75642dd8e21ca67bcce9c48201e99
|
a99c427fa386d48c7a34b0a0de7fffb57ee84ba3
|
refs/heads/main
| 2023-04-13T06:55:45.507489
| 2021-04-24T05:03:12
| 2021-04-24T05:03:12
| 356,954,833
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
from src import *
from anything import Anything
x, y = variables("xy")
Leo = Term("Leo")
Declan = Term("Declan")
Milo = Term("Milo")
Henry = Term("Henry")
sibling = functor("sibling")
father = functor("father")
foo = LinearTable([
sibling(Leo, x) <= sibling(Declan, x),
sibling(Leo, Milo) <= language.YES
])
def howto_test(tb):
def test():
assert tuple(tb.rules()) in [((sibling(Leo, Milo) <= language.YES,
sibling(Leo, Anything) <= sibling(Declan, Anything))),
((sibling(Leo, Anything) <= sibling(Declan, Anything)),
sibling(Leo, Milo) <= language.YES)]
assert len(list(tb.fetch(sibling(Leo, Milo)))) == 2
assert list(tb.fetch(sibling(Leo, Declan))) == [sibling(Leo, Anything) <= sibling(Declan, Anything)]
return test
test_linear = howto_test(foo)
test_predicate = howto_test(PredicateIndex(foo.rules()))
test_heuristic = howto_test(HeuristicIndex(foo))
test_trie = howto_test(TrieTable(foo.rules()))
t = TrieTable
print(list(
TrieTable(foo.rules()).rules()
))
|
[
"leoware@minerva.kgi.edu"
] |
leoware@minerva.kgi.edu
|
e5a91252c372b130711f02d13ad05728d33ce7d1
|
49c8131fe6e1b37f3d21483cbb5bf4cd6449611b
|
/habitat_baselines/utils/visualizations/utils.py
|
27ae0209f1e0cb455b8da6b3868a7f58a5f59c08
|
[] |
no_license
|
jyjang0421/VisualNavigation
|
63c5d5082f51dc7085f3ac587ca8fa87b193c05e
|
ac681c30cfd6e034400d1229f1235bc22c62fa7a
|
refs/heads/main
| 2023-09-05T01:11:51.526280
| 2021-11-07T17:22:10
| 2021-11-07T17:22:10
| 425,567,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,100
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import cv2
import torch
from habitat_baselines.utils.common import (
tensor_to_bgr_images,
tensor_to_depth_images,
)
from habitat_sim.utils.common import d3_40_colors_rgb
def save_rgb_results(
gt_rgb: torch.Tensor, pred_rgb: torch.Tensor, path: str
) -> None:
r"""For saving RGB reconstruction results during EQA-CNN-Pretrain eval.
Args:
gt_rgb: RGB ground truth tensor
pred_rgb: RGB reconstruction tensor
path: to save images
"""
path = path.format(split="val", type="rgb")
gt_bgr, pred_bgr = tensor_to_bgr_images([gt_rgb, pred_rgb])
cv2.imwrite(path + "_gt.jpg", gt_bgr)
cv2.imwrite(path + "_pred.jpg", pred_bgr)
def save_seg_results(
gt_seg: torch.Tensor, pred_seg: torch.Tensor, path: str
) -> None:
r"""For saving predicted and ground truth seg maps during
EQA-CNN-Pretrain eval.
Args:
gt_seg: ground truth segmentation tensor
pred_seg: ouput segmentation tensor
path: to save images
"""
path = path.format(split="val", type="seg")
gt_seg = gt_seg.cpu().numpy() % 40
pred_seg = torch.argmax(pred_seg, 0).cpu().numpy() % 40
gt_seg_colored = d3_40_colors_rgb[gt_seg]
pred_seg_colored = d3_40_colors_rgb[pred_seg]
cv2.imwrite(path + "_gt.jpg", gt_seg_colored)
cv2.imwrite(path + "_pred.jpg", pred_seg_colored)
def save_depth_results(
gt_depth: torch.Tensor, pred_depth: torch.Tensor, path: str
) -> None:
r"""For saving predicted and ground truth depth maps during
EQA-CNN-Pretrain eval.
Args:
gt_depth: ground truth depth tensor
pred_depth: ouput depth tensor
path: to save images
"""
path = path.format(split="val", type="depth")
gt_depth, pred_depth = tensor_to_depth_images([gt_depth, pred_depth])
cv2.imwrite(path + "_gt.jpg", gt_depth)
cv2.imwrite(path + "_pred.jpg", pred_depth)
|
[
"jyjang0421@naver.com"
] |
jyjang0421@naver.com
|
c61e2d038be8353a38749035d85ca92ac2d8a449
|
1a3362c92082ac8c65ebfcd0cbc548b7aab14514
|
/tests/test_events/announcement_published/test_announcement_published.py
|
bf898177735cb91ad6cd240679e729c92dff1964
|
[
"Apache-2.0"
] |
permissive
|
henning-roos/eiffel-graphql-api
|
a8829d010380fc13ea130b375f610e11997fa27e
|
9bffe27478a088e5438762e0104d1901c7baab01
|
refs/heads/master
| 2022-10-10T05:19:00.377557
| 2020-02-05T13:23:22
| 2020-02-05T13:23:22
| 264,891,498
| 0
| 0
|
Apache-2.0
| 2020-05-18T09:29:07
| 2020-05-18T09:29:06
| null |
UTF-8
|
Python
| false
| false
| 5,424
|
py
|
# Copyright 2019 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import pytest
import logging
from unittest import TestCase
from .event import *
from .queries import *
from tests.lib.query_handler import GraphQLQueryHandler
logging.basicConfig(
level=logging.DEBUG
)
class TestAnnouncementPublished(TestCase):
@classmethod
def setUpClass(cls):
cls.query_handler = GraphQLQueryHandler("http://127.0.0.1:12345/graphql")
cls.events = [
eiffel_announcement_published_event()
]
cls.logger = logging.getLogger("TestAnnouncementPublished")
def setUp(self):
self.logger.info("\n")
for event in self.events:
insert(event)
def tearDown(self):
for event in self.events:
remove(event)
def test_announcement_published_data(self):
"""Test that it is possible to query 'data' from announcement published.
Approval criteria:
- It shall be possible to query 'data' from graphql.
- Data shall be:
- heading : "This is a heading"
- body : "This is a body"
- uri : "http://uri.se"
- severity: "MINOR"
Test steps:
1. Query 'data' from AnnouncementPublished in Graphql.
2. Verify that the response is correct.
"""
self.logger.info("STEP: Query 'data.activityOutcome' from AnnouncementPublished in Graphql.")
self.logger.debug(DATA_ONLY)
response = self.query_handler.execute(DATA_ONLY)
self.logger.debug(pretty(response))
self.logger.info("STEP: Verify that the response is correct.")
data = self.query_handler.get_node(response, "data")
self.assertIsInstance(data, dict)
self.assertGreater(len(data), 0)
self.assertDictEqual(
data,
{
"heading": "This is a heading",
"body": "This is a body",
"uri": "http://uri.se",
"severity": "MINOR"
}
)
def test_announcement_published_link(self):
"""Test that it is possible to query a valid activity execution link on announcement published.
Approval criteria:
- Graphql shall return an AnnouncementPublished event when requesting ModifiedAnnouncement.
Test steps:
1. Query 'links.ModifiedAnnouncement' from AnnouncementPublished in Graphql.
2. Verify that the returned event is an AnnouncementPublished.
"""
event = eiffel_announcement_published_event_link()
try:
insert(event)
self.logger.info("STEP: Query 'links.ActivityExecution' from AnnouncementPublished in Graphql.")
self.logger.debug(LINKS_ONLY)
response = self.query_handler.execute(LINKS_ONLY)
self.logger.debug(pretty(response))
self.logger.info("STEP: Verify that the returned event is an AnnouncementPublished.")
link_meta = self.query_handler.get_node(response, "meta")
self.assertDictEqual(link_meta, {"id": "4baf56e6-404a-4132-a28b-9ed782f26293",
"type": "EiffelAnnouncementPublishedEvent"})
finally:
remove(event)
def test_announcement_published_meta(self):
"""Test that it is possible to query 'meta' from announcement published.
Approval criteria:
- It shall be possible to query 'meta' from graphql.
- Data returned shall be correct:
- version: "3.0.0"
- type : "EiffelAnnouncementPublishedEvent"
- id : "4baf56e6-404a-4132-a28b-9ed782f26293"
- time : 1575966865708
Test steps:
1. Query 'meta' from AnnouncementPublished in Graphql.
2. Verify that the response is not empty.
3. Verify that meta data returned correctly.
"""
self.logger.info("STEP: Query 'meta' from AnnouncementPublished in Graphql.")
self.logger.debug(META_ONLY)
response = self.query_handler.execute(META_ONLY)
self.logger.debug(pretty(response))
self.logger.info("STEP: Verify that response is not empty.")
meta = self.query_handler.get_node(response, "meta")
self.assertIsInstance(meta, dict)
self.assertGreater(len(meta), 0)
self.logger.info("STEP: Verify that meta data returned correctly.")
self.assertEqual(meta.get("version"), "3.0.0")
self.assertEqual(meta.get("type"), "EiffelAnnouncementPublishedEvent")
self.assertEqual(meta.get("id"), "4baf56e6-404a-4132-a28b-9ed782f26293")
self.assertEqual(meta.get("time"), "2019-12-10T09:34:25.708000")
|
[
"tobiaspn@axis.com"
] |
tobiaspn@axis.com
|
cb2060b44ea1196495278c47516362886016ec01
|
9d291ff8ad0a455b8b38065546e1c9a6617d36cf
|
/python/cqtest/hw.py
|
a09cf8f585e0fa274a6a9599c48d971cc7db1593
|
[
"BSD-2-Clause"
] |
permissive
|
seekindark/helloworld
|
7128b8d8b6ebdc79c2ec3aa17137fb2a8e18885e
|
3f36e040399a998e43c63bca0586fe517b7fef69
|
refs/heads/master
| 2023-05-26T22:04:08.714917
| 2023-05-13T01:31:06
| 2023-05-13T01:31:06
| 193,409,142
| 0
| 0
|
BSD-2-Clause
| 2019-09-27T08:07:02
| 2019-06-24T00:31:59
|
C
|
UTF-8
|
Python
| false
| false
| 28
|
py
|
print("Hello World,",2022)
|
[
"fei_n_chen@163.com"
] |
fei_n_chen@163.com
|
983fc36a0c5cf2655d4920fc4370fc7da8ed6fb1
|
e838ed306684b103b3d6ac6aa72fda321caf3632
|
/flappybirds.py
|
ac1a9a1805016954673047e21eefd81e6ae7ebc7
|
[] |
no_license
|
harika1101/Retro-fun-house
|
102d1e1b922212870d5c649161c2c72b84439d93
|
e4931d1159d4553856256696160ed6d865322170
|
refs/heads/main
| 2022-12-27T19:36:21.419550
| 2020-10-05T05:37:46
| 2020-10-05T05:37:46
| 301,194,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,640
|
py
|
import pygame, sys, random, digiweb
from PythonInsideHTML import PIH
def draw_floor():
screen.blit(floor_surface, (floor_x_pos,450))
screen.blit(floor_surface, (floor_x_pos + 288,450))
def create_pipe():
random_pipe_pos = random.choice(pipe_height)
bottom_pipe = pipe_surface.get_rect(midtop = (350,random_pipe_pos))
top_pipe = pipe_surface.get_rect(midbottom = (350,random_pipe_pos - 150))
return bottom_pipe, top_pipe
def move_pipes(pipes):
for pipe in pipes:
pipe.centerx -= 5
return pipes
def draw_pipes(pipes):
for pipe in pipes:
if pipe.bottom >= 512:
screen.blit(pipe_surface, pipe)
else:
flip_pipe = pygame.transform.flip(pipe_surface, False, True)
screen.blit(flip_pipe, pipe)
def check_collision(pipes):
for pipe in pipes:
if bird_rect.colliderect(pipe):
death_sound.play()
return False
if bird_rect.top <= -50 or bird_rect.bottom >= 450:
return False
return True
def rotate_bird(bird):
new_bird = pygame.transform.rotozoom(bird, -bird_movement *3, 1)
return new_bird
def bird_animation():
new_bird = bird_frames[bird_index]
new_bird_rect = new_bird.get_rect(center = (100, bird_rect.centery))
return new_bird, new_bird_rect
def score_display(game_state):
if game_state == 'main_game':
score_surface = game_font.render(str(int(score)), True, (255, 255, 255))
score_rect = score_surface.get_rect(center = (144, 50))
screen.blit(score_surface, score_rect)
if game_state == 'game_over':
score_surface = game_font.render(f'Score: {int(score)}', True, (255, 255, 255))
score_rect = score_surface.get_rect(center = (144, 50))
screen.blit(score_surface, score_rect)
high_score_surface = game_font.render(f'High Score: {int(high_score)}', True, (255, 255, 255))
high_score_rect = high_score_surface.get_rect(center = (144, 425))
screen.blit(high_score_surface, high_score_rect)
def update_score(score, high_score):
if score > high_score:
high_score = score
return high_score
pygame.mixer.pre_init(frequency = 44100, size = 16, channels = 1, buffer = 512)
pygame.init()
screen = pygame.display.set_mode((288, 512))
pygame.display.set_caption("Flappy Birds")
icon = pygame.image.load('assets/bird.png')
pygame.display.set_icon(icon)
clock = pygame.time.Clock()
game_font = pygame.font.Font(None,40)
# game variables
gravity = 0.25
bird_movement = 0
game_active = True
score = 0
high_score = 0
bg_surface = pygame.image.load('assets/background-day.png').convert()
floor_surface = pygame.image.load('assets/base.png').convert()
floor_x_pos = 0
bird_downflap = pygame.image.load('assets/bluebird-downflap.png').convert_alpha()
bird_midflap = pygame.image.load('assets/bluebird-midflap.png').convert_alpha()
bird_upflap = pygame.image.load('assets/bluebird-upflap.png').convert_alpha()
bird_frames = [bird_downflap,bird_midflap,bird_upflap]
bird_index = 0
bird_surface = bird_frames[bird_index]
bird_rect = bird_surface.get_rect(center = (100, 256))
BIRDFLAP = pygame.USEREVENT + 1
pygame.time.set_timer(BIRDFLAP, 200)
pipe_surface = pygame.image.load('assets/pipe-green.png')
pipe_list =[]
SPAWNPIPE = pygame.USEREVENT
pygame.time.set_timer(SPAWNPIPE, 1200)
pipe_height = [200, 300, 400]
game_over_surface = pygame.image.load('assets/message.png').convert_alpha()
game_over_rect = game_over_surface.get_rect(center = (144, 256))
flap_sound = pygame.mixer.Sound('assets/audio/wing.wav')
death_sound = pygame.mixer.Sound('assets/audio/hit.wav')
score_sound = pygame.mixer.Sound('assets/audio/point.wav')
score_sound_countdown = 100
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE and game_active:
bird_movement = 0
bird_movement = -6
flap_sound.play()
if event.key == pygame.K_SPACE and game_active == False:
game_active = True
pipe_list.clear()
bird_rect.center = (100, 256)
bird_movement = 0
score = 0
if event.type == SPAWNPIPE:
pipe_list.extend(create_pipe())
if event.type == BIRDFLAP:
if bird_index < 2:
bird_index += 1
else:
bird_index = 0
bird_surface, bird_rect = bird_animation()
screen.blit(bg_surface, (0,0))
if game_active:
# bird
bird_movement += gravity
rotated_bird = rotate_bird(bird_surface)
bird_rect.centery += bird_movement
screen.blit(rotated_bird, bird_rect)
game_active = check_collision(pipe_list)
#Pipes
pipe_list = move_pipes(pipe_list)
draw_pipes(pipe_list)
score += 0.01
score_display('main_game')
score_sound_countdown -= 1
if score_sound_countdown <= 0:
score_sound.play()
score_sound_countdown = 100
else:
screen.blit(game_over_surface, game_over_rect)
high_score = update_score(score, high_score)
score_display('game_over')
#Floor
floor_x_pos -= 1
draw_floor()
if floor_x_pos <= -288:
floor_x_pos = 0
pygame.display.update()
clock.tick(120)
|
[
"noreply@github.com"
] |
harika1101.noreply@github.com
|
c38cf65e1e52694035fbdf163d9836a8c32b5204
|
74db18a388236f83a46a36a6bb32b99983323823
|
/flight_bot/src/model/user_model.py
|
0efa2b6193040d8544365a9cce209bc01ab73bf6
|
[] |
no_license
|
guilevieiram/100_days
|
e0a5e9f931b92d0b23e6b12109a85d3d055a9470
|
e259a55bfdad4800f0754a1ac12172c20c9377b0
|
refs/heads/main
| 2023-09-03T21:21:17.795417
| 2021-11-01T00:01:09
| 2021-11-01T00:01:09
| 383,873,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,464
|
py
|
from abc import ABC, abstractmethod
from dataclasses import dataclass, asdict
from typing import Union, Optional, Any
from src.decorators import log
from src.model.db import DataBase
@dataclass
class User:
"""Dataclass to encode a user."""
first_name: str = ""
last_name: str = ""
e_mail: str = ""
phone: str = ""
city: str = ""
def set(self, data: dict):
"""
Method for setting a user if the data is in a dictionary format.
Use as 'user = User().set(data)'.
"""
self.__dict__.update(**data)
return self
def print(self):
"""Method for printing user info in a user-friendly way"""
return "\n".join([f"{key}: {value}" for key, value in self.__dict__.items()])
class UserModel(ABC):
"""
User model responsible for dealing with all the operations related to the users of the aplication.
It lacks a verification and a login method as those are not yet necessary
"""
@abstractmethod
def __init__(self, data_base: DataBase) -> None:
"""
Initializes the object giving it the user data base.
This data base must have the same atributes as the User dataclass.
"""
pass
@abstractmethod
def add_user(self, user: User) -> None:
"""Adds a user to the data base"""
pass
@abstractmethod
def delete_user(self, user: User) -> None:
"""Deletes a given user (via user object) to the data base"""
pass
@abstractmethod
def find_user(self, attribute: dict[str, Any]) -> User:
"""
Finds a user given one of its attributes as a dict (e.g.: {'first_name': 'user1'})
If many users share this attribute return the first/oldest one.
"""
pass
@abstractmethod
def get_all_users(self) -> list[User]:
"""Returns all the users in the data base in a list."""
pass
@abstractmethod
def edit_user(self, user: User, attribute: dict[str, Any]) -> None:
"""
Edits a user, setting a new attribute, given as a dictionary.
The dictionary key must be an atribute of the User dataclass
"""
pass
class TerminalUserModel(UserModel):
"""Implementation of the User Model, loging executions on the terminal."""
def __init__(self, data_base: DataBase) -> None:
self.data_base: DataBase = data_base
def add_user(self, user: User) -> None:
all_emails = [u.e_mail for u in self.get_all_users()]
if not user.e_mail in all_emails:
self.data_base.add_data(table="users", data=[user.__dict__])
else:
print("User already exists.")
def delete_user(self, user: User) -> None:
user_id: int = self.data_base.get_data(table="users", key_value={"e_mail": user.e_mail})[0]["id"]
self.data_base.delete_data(table="users", key=user_id)
def find_user(self, attribute: dict[str, Any]) -> User:
user_list = self.data_base.get_data(table="users", key_value=attribute)
if not user_list:
raise KeyError("User not found on the data base.")
return self.convert_attributes_user(attributes=user_list[0])
def get_all_users(self) -> list[User]:
return [self.convert_attributes_user(attributes=attributes)
for attributes in self.data_base.get_data(table="users")]
def edit_user(self, user: User, attribute: dict[str, Any]) -> None:
user_id: int = self.data_base.get_data(table="users", key_value={"e_mail": user.e_mail})[0]["id"]
self.data_base.update_data(table="users", key=user_id, key_values=[attribute])
@staticmethod
def convert_attributes_user(attributes: dict[str, Any]) -> User:
"""Converts a dict of attributes to a user object"""
attributes.pop("id")
return User().set(attributes)
|
[
"guilhermevmanhaes@gmail.com"
] |
guilhermevmanhaes@gmail.com
|
c2c3d509ac459bd3bb36a4e114ea7c7800b0df1e
|
8edf0fe4ef80faeb40ddcf3486988864a631dd61
|
/text_gmail_api.py
|
98ae693c894d3a38f0a70475b4965aafea569dcf
|
[] |
no_license
|
pritul2/Alert_APIs
|
54ce72705f252bf63d3e73d6bb2adc2b198eb4ca
|
607ea7076dceaf89ee0a793beda9acebff172ab6
|
refs/heads/master
| 2020-11-29T11:25:12.507975
| 2019-12-26T04:37:59
| 2019-12-26T04:37:59
| 230,103,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
'''
1) enable 2 step verification
2) go to enable app password
3) Generate the APP password
'''
import smtplib
import os
#setting the env. variables#
SENDER_EMAIL_ADDRESS = "sender address"
EMAIL_PASSWORD = "app password"
RECEIVER_EMAIL_ADDRESS= "receiver address"
#setting up the mail server and smtp port number#
with smtplib.SMTP_SSL("smtp.gmail.com",465) as smtp:
#login to account#
smtp.login(EMAIL_ADDRESS,EMAIL_PASSWORD)
#defining subject#
subject = '---subject----'
#defining body#
body = '---body part -----'
#wrapping up#
msg=f'subject:{subject}\n\nbody:{body}'
#sending message sender id,receiver id#
smtp.sendmail(SENDER_EMAIL_ADDRESS,RECEIVER_EMAIL_ADDRESS,msg)
|
[
"noreply@github.com"
] |
pritul2.noreply@github.com
|
819b491b627b3288f3b1beef011973ec50ecc21b
|
60ce73bf2f86940438e5b7fecaaccad086888dc5
|
/working_scrapers/Kentucky_perry.py
|
e8b7877b0405e86c4b2fbbf7255b77f7b2ad7a96
|
[] |
no_license
|
matthewgomies/jailcrawl
|
22baf5f0e6dc66fec1b1b362c26c8cd2469dcb0d
|
9a9ca7e1328ae549860ebeea9b149a785f152f39
|
refs/heads/master
| 2023-02-16T06:39:42.107493
| 2021-01-15T16:37:57
| 2021-01-15T16:37:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,260
|
py
|
#!/usr/bin/python
'''
This is an template script
'''
from urllib.request import urlopen, Request
import pandas as pd
import os
import time
import numpy as np
from datetime import datetime
import datetime as dt
import sys
from io import StringIO
from joblib import Parallel, delayed
import requests
from jailscrape.common import save_to_s3, get_browser, get_logger, record_error, save_pages_array
from jailscrape import crawlers
# jailscrape.common is a file that is part of the project which keeps
# most common boilerplate code out of this file
from selenium.webdriver.common.keys import Keys
import watchtower
from bs4 import BeautifulSoup
import re
import math
# NOTE: These are imports. They ideally don't change very often. It's OK
# to have a large, maximal set here and to bulk-edit files to add to
# these.
ROW_INDEX = 345 # Change this for each scraper. This references the row
# of the main jailcrawl spreadsheet. This index will be used to look up
# the URL as well as state/county info
THIS_STATE = 'kentucky' # Change the current state/county information.
THIS_COUNTY = 'perry'
def main(roster_row):
try:
logger = get_logger(roster_row) # Get a standard logger
# Here are standard variable values/how to initialize them.
# These aren't initialized here since in the save_single_page
# case, they can be done in the called function
browser = get_browser() # Get a standard browser
urlAddress = roster_row['Working Link'] # Set the main URL from the spreadsheet
page_index = 0 # Set an initial value of "page_index", which we will use to separate output pages
logger.info('Set working link to _%s_', urlAddress) # Log the chosen URL
##########
# Begin core specific scraping code
if roster_row['State'].lower() != THIS_STATE or roster_row['County'].lower() != THIS_COUNTY:
raise Exception("Expected county definition info from _%s, %s_, but found info: _%s_" % (THIS_COUNTY, THIS_STATE, roster_row))
# Open Browser
browser.get(urlAddress)
time.sleep(np.random.uniform(5,7,1))
#Extract the HTML#
store_source = browser.page_source
## Code to save the first page and log appropriately
save_to_s3(store_source, page_index, roster_row)
logger.info('Saved page _%s_', page_index)
# End core specific scraping code
##########
#Close the browser
logger.info('complete!')
except Exception as errorMessage:
try:
browser.close()
record_error(message=str(errorMessage), roster_row=roster_row, browser=browser)
except:
record_error(message=str(errorMessage), roster_row=roster_row)
# Record error in S3 for a general error
logger.error('Error: %s', errorMessage)
# Log error
sys.exit(1)
if __name__ == "__main__":
#This will load in the current jail roster list
#Select the index of the roster this script is for:
#Write the name of the county and state
roster = pd.read_csv('/opt/jail_roster_final_rmDuplicates.csv',encoding = "utf-8")
main(roster[roster['index'] == ROW_INDEX].iloc[0])
|
[
"matthewgomies@Matthews-MacBook-Pro.local"
] |
matthewgomies@Matthews-MacBook-Pro.local
|
dc6c425b31fb7d5e373fcca16b4cca613081850b
|
5f181e84305f507264945459924f0b40aa30a271
|
/locus_level_score/classification/feature_extraction.py
|
35c4dfeb573f50c0cb51ef11c10b8025e9aa22bc
|
[
"MIT"
] |
permissive
|
HealthVivo/ABE_NonCoding_functional_score
|
6e3d56b92b21946518b6fd7c8050e8fd66d9784b
|
87db4000d7ee030e3ed813774e03f4d902ced587
|
refs/heads/master
| 2023-03-01T22:05:19.929521
| 2021-02-04T21:01:08
| 2021-02-04T21:01:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,283
|
py
|
import numpy as np
import os
import pandas as pd
from Bio.Seq import Seq
from Bio import SeqIO
try:
from StringIO import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
import uuid
from joblib import Parallel, delayed
import argparse
import matplotlib
matplotlib.use('agg')
import seaborn as sns
import matplotlib.pyplot as plt
from pkg_resources import resource_filename
from janggu.data import Bioseq
from janggu.data import ReduceDim
import numpy as np
from janggu import inputlayer
from janggu import outputconv
from janggu import DnaConv2D
from janggu.data import ReduceDim
from janggu.data import Cover
try:
from StringIO import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import numpy as np
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import pandas as pd
import matplotlib.pylab as plt
import numpy as np
import scipy
import seaborn as sns
import glob
from sklearn.model_selection import KFold,StratifiedKFold
import warnings
from sklearn.metrics import roc_curve,roc_auc_score,average_precision_score,accuracy_score
import warnings
warnings.filterwarnings('ignore')
# warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor,RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.scorer import make_scorer
from sklearn.model_selection import train_test_split
from sklearn.base import TransformerMixin
from sklearn.datasets import make_regression
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor,GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LinearRegression, Ridge
import scipy
import numpy as np
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import LeaveOneOut
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from sklearn import linear_model
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge,Lars,BayesianRidge
from copy import deepcopy as dp
"""
Feature extraction (Top motif scores)
1. using janggu get DNA one-hot
3. read meme get motif PWMs in both strands
4. scan motifs get score_list, max(pos_strand,neg_strand)
with tree-based methods, we don't need to do normalization here
5. for each seq, get top N scores from (4) and their footprint score (given their positions), get adjusted score
Dependency
----------
meme (to get motif revcomp)
bedtools (to get fasta sequences for gkm_svm)
python library
--------------
janggu (tensorflow + keras)
biopython
sklearn
joblib
"""
def read_fasta(f):
my_dict = {}
for r in SeqIO.parse(f, "fasta"):
my_dict[r.id] = str(r.seq).upper()
return my_dict
def read_motif(meme_file):
revcomp_file = "/tmp/"+str(uuid.uuid4())
os.system("meme-get-motif -rc -all %s > %s"%(meme_file,revcomp_file))
original_motif_label = "++original++"
revcomp_motif_label = "--revcomp--"
dict1 = parse_meme(meme_file,label=original_motif_label)
dict2 = parse_meme(revcomp_file,label=revcomp_motif_label)
myDict = {}
for k in dict1:
motif_name = k.replace(original_motif_label,"")
myDict[motif_name]=[dict1[k].T.values,dict2[k.replace(original_motif_label,revcomp_motif_label)].T.values]
return myDict
def parse_meme(file,label=""):
"""function to read meme file to pd.DataFrame"""
lines = open(file).readlines()
i = 0
myDict = {}
while i < len(lines):
myList = lines[i].strip().split()
if len(myList) < 1:
i = i + 1
continue
if myList[0] == "MOTIF":
if lines[i+1].strip() == "":
desc = lines[i+2].strip().split()
flag = True
else:
desc = lines[i+1].strip().split()
flag = False
try:
motifLength = int(desc[5])
except:
print (desc)
i = i+1
continue
if flag:
myString = "\n".join(map(lambda x:"\t".join(x.strip().split()),lines[i+3:i+3+motifLength])).replace(" "," ")
df = pd.read_csv(StringIO(myString), sep="\t",header=None)
df.columns=['A','C','G','T']
myDict[myList[1]+label] = df
if df.shape[0] != motifLength or df.shape[1] !=4:
print ("something is wrong")
i = i+3+motifLength
continue
else:
myString = "\n".join(map(lambda x:"\t".join(x.strip().split()),lines[i+2:i+2+motifLength])).replace(" "," ")
df = pd.read_csv(StringIO(myString), sep="\t",header=None)
df.columns=['A','C','G','T']
myDict[myList[1]+label] = df
i = i+2+motifLength
if df.shape[0] != motifLength or df.shape[1] !=4:
print ("something is wrong")
continue
i = i+1
return myDict
def motif_scan(s,m):
## s, m are numpy array
## s.shape = L*4
## m.shape = 4*W
L = s.shape[0]
W = m.shape[1]
score_list = []
for i in range(L-W):
sub = np.matmul(s[i:i+W,:],m)
# if i < 3:
# print ("DNA seq",s[i:i+W,:])
# print ("motif",m)
# print ("mapping score: ",np.trace(sub))
score_list.append(np.trace(sub))
return score_list
def DNA_motif_scan(DNA_array,m1,m2):
score_list = []
# print (m1)
# print (m2)
for i in range(DNA_array.shape[0]):
score_list_1 = motif_scan(DNA_array[i,:,:],m1)
# print ("score_list_1",score_list_1)
score_list_2 = motif_scan(DNA_array[i,:,:],m2)
# print ("score_list_2",score_list_2)
for j in range(len(score_list_1)):
if score_list_2[j] > score_list_1[j]:
score_list_1[j] = score_list_2[j]
score_list.append(score_list_1)
# print (score_list)
out = np.array(score_list)
print ("DNA scanning out shape",out.shape)
return out
def get_roi(myList):
## roi is region of interest, term used by janggu
# chr19:13180899-13180900+
# strand = [list(x)[-1] for x in myList]
strand = [x[-1] for x in myList]
# print (strand)
chr = [x[:-1].split(":")[0] for x in myList]
start = [int(x[:-1].split(":")[-1].split("-")[0]) for x in myList]
end = [int(x[:-1].split(":")[-1].split("-")[1]) for x in myList]
roi_A = []
roi = []
for i in range(len(chr)):
roi_A.append([chr[i],start[i],end[i],myList[i],".",strand[i]])
roi.append([chr[i],start[i],end[i]])
return roi_A,roi
def get_high_low_data(input,pos_cutoff,neg_cutoff):
df = pd.read_csv(input,index_col=0)
pos = df[df['HbFBase']>=pos_cutoff].index.tolist()
neg = df[df['HbFBase']<=neg_cutoff].index.tolist()
print ("Pos size %s. Neg size %s"%(len(pos),len(neg)))
return df.loc[pos+neg],pos,neg
def roi2fasta(roi,genome_fa,flank):
df = pd.DataFrame(roi)
df[1] = df[1]-flank
df[2] = df[2]+flank
df.to_csv("tmp.bed",sep="\t",header=False,index=False)
os.system("bedtools getfasta -fi %s -fo tmp.fa -bed tmp.bed -s -name"%(genome_fa))
seq = read_fasta("tmp.fa")
os.system("rm tmp.fa tmp.bed")
return seq
## Define parameters
high_hbf = 50
low_hbf = 0
input = "Editable_A_scores.combined.scores.csv"
flank = 100
refgenome="/home/yli11/Data/Human/hg19/fasta/hg19.fa"
bw_file="/home/yli11/Projects/Li_gRNA/footprint/H1_H2_GM12878_Tn5_bw/Hudep2.bw"
meme_file = "selected_motifs.meme"
top_n=5 # number of features for each motif
## read data
data,high,low = get_high_low_data(input,high_hbf,low_hbf)
roi_A,roi = get_roi(high+low)
seq = roi2fasta(roi_A,refgenome,flank)
test = pd.DataFrame.from_dict(seq,orient='index')
data['seq'] = test[0]
# 1. using janggu get DNA one-hot
## get one-hot data and ATAC feature matrix
dna_A = Bioseq.create_from_refgenome(name='dna',refgenome=refgenome,roi=roi_A,flank=flank)
Tn5 = Cover.create_from_bigwig('bigwig_coverage',bigwigfiles=bw_file,roi=roi,binsize=1,stepsize=1,flank=flank)
## ReShape
dna_A=np.reshape(dna_A,(len(high+low),flank*2+1,4))
bw_values=np.reshape(Tn5,(len(high+low),flank*2+1))
## get motif PWM, 3. read meme get motif PWMs in both strands
motifs = read_motif(meme_file)
# 4. scan motifs get score_list, max(pos_strand,neg_strand)
score_list_A = Parallel(n_jobs=-1)(delayed(DNA_motif_scan)(dna_A,motifs[m][0],motifs[m][1]) for m in motifs)
def get_footprint_score(s,l,footprint_score):
flanking=2
# print (s,l)
left_start = s-flanking
# print ("left_start:",left_start)
if left_start >= 0:
left = list(footprint_score[left_start:s])
else:
left = [np.nan]
right_end = s+l+flanking
# print ("right_end:",right_end)
# print ("len(footprint_score):",len(footprint_score))
if right_end <= len(footprint_score):
right = list(footprint_score[s+l:right_end])
else:
right = [np.nan]
flanking = np.nanmean(left+right)
# print ("left",left,"right",right)
# print ("flanking",flanking,"left+right",left+right)
occ = np.nanmean(footprint_score[s:s+l])
# print ("all:",footprint_score[s:s+l],"occ:",occ)
return flanking - occ
def get_top_n_motif_scores(score_list,top_n):
"""score_list.shape = L * 1
return
------
pos, value list
"""
return score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]
# 5. for each seq, get top N scores from (4) and their footprint score (given their positions), get adjusted score
def get_adjusted_motif_score(motif_score,footprint_score,n):
"""motif_score and footprint_score are same shape, N * L"""
out = []
# print ("motif_score",motif_score)
motif_length = footprint_score.shape[1] - motif_score.shape[1]
for i in range(motif_score.shape[0]):
pos,value = get_top_n_motif_scores(motif_score[i],n)
# print ("pos,:",pos)
# print ("value,:",value)
FOS_list = [get_footprint_score(s,motif_length,footprint_score[i]) for s in pos]
# print ("FOS_list:",FOS_list)
value = [value[i]*FOS_list[i] for i in range(len(value))]
out.append(value)
return out
adjusted_scores = Parallel(n_jobs=-1)(delayed(get_adjusted_motif_score)(motif_score,bw_values,top_n) for motif_score in score_list_A)
def set_col_names(motifs,top_n,label):
out = []
for i in motifs:
for j in range(top_n):
out.append("%s_%s_%s"%(label,i,j))
return out
## get feature table
adjusted_scores = np.array(adjusted_scores)
adjusted_scores = np.swapaxes(adjusted_scores,0,1)
adjusted_scores = adjusted_scores.reshape((len(high+low),top_n*len(motifs)))
adjusted_scores = pd.DataFrame(adjusted_scores)
adjusted_scores.columns = set_col_names(motifs,top_n,"motif_footprint_score")
adjusted_scores.index = high+low
df = pd.concat([adjusted_scores,data],axis=1)
df.to_csv("ML_data.csv")
|
[
"yli11@nodegpu132.cm.cluster"
] |
yli11@nodegpu132.cm.cluster
|
93c54ba4b5d31db14bb0a47e1fb2f880b943e7e8
|
937ed32710aba405bcd9ec7f8f5c78ddeaf72bc9
|
/projects/ingsoft/biblioteca/reservas/urls.py
|
990f849c5dc8cef9e3612691ed7a11a726c05e10
|
[] |
no_license
|
arcegk/biblioteca
|
51104f98169ff5c786aa32a85e76cd8a9c650fad
|
741e3273e86f4e97148e2320291a0a0587fead96
|
refs/heads/master
| 2021-01-15T23:46:13.019594
| 2015-11-13T18:18:06
| 2015-11-13T18:18:06
| 46,137,007
| 0
| 0
| null | 2015-11-13T17:30:14
| 2015-11-13T17:30:14
| null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from django.conf.urls import patterns, url
from .views import *
urlpatterns=patterns('',
url(r'reserva_registro/$',reserva_registro, name = 'reserva_registro'),
url(r'reserva_consulta/$',reserva_consulta, name = 'reserva_consulta'),
)
|
[
"stevenbetancurt@hotmail.com"
] |
stevenbetancurt@hotmail.com
|
bbac79a283a0e0e2460705d1117a4a86d695565a
|
b42bdf7e439ce959c851b37ba1bb764a92779163
|
/venv/Scripts/easy_install-3.6-script.py
|
6dd7402ed579e8ca2d9347664802d8c03c3bc6e8
|
[] |
no_license
|
paulduval30/GrapheProject
|
069df240936bb6205e840fd5d9ef52f2db7b686e
|
428ab99dc6ac5ca749b2f553f82603d689fb64b7
|
refs/heads/master
| 2020-04-08T23:28:03.162684
| 2018-12-28T22:47:18
| 2018-12-28T22:47:18
| 159,825,711
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
#!C:\Users\pauld\PycharmProjects\GrapheProject\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"carotte@outlook.com"
] |
carotte@outlook.com
|
0534afcacc176683783f7b7e7eafe90f61bd904b
|
57ee0d71639215c5a3736939381c713596dd0ef1
|
/lesson4/lesson4_4.py
|
a842006f2d8e7588f014aadfe0a47a4078bfb413
|
[] |
no_license
|
dilya123/pythoMonth1
|
e90a7758f8f1615489d3cced3834baeb12d189ad
|
5336ea3f5dcc3410ed696645cc0196eda7113c7c
|
refs/heads/master
| 2023-06-27T00:56:17.464053
| 2021-07-29T14:00:09
| 2021-07-29T14:00:09
| 381,046,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
names = ["Maruf", "Temirlan", "Seyto"]
print(names)
names.append("Sultan")
print(names)
names.remove("Sultan")
print(names)
for i in range(5):
name = input()
names.append(name)
for i in names:
print(i)
|
[
"daniyarflash.m01@gmail.com"
] |
daniyarflash.m01@gmail.com
|
1e55966be921e9a8d0857f8429f9b089099ccaba
|
ae1c0b745dd759d741569558e754646bc7fa1c86
|
/Pythonprog/Python1.py
|
0afe49cc65de9bc1ce1a394c2d0c17fc0892fa9f
|
[] |
no_license
|
NAGA6699/Nagarjuna_i_Transform_DevOps_Assignments
|
a284be1230052ca7bdf9e80f4695725c5b1c135d
|
cdeded676b2edd06833419be0a29c4171e86bfa0
|
refs/heads/master
| 2023-08-23T16:42:06.664608
| 2021-07-24T13:39:21
| 2021-07-24T13:39:21
| 389,105,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
import re
f=open("Actualdata.txt")
num=list()
#c=0
l=f.read()
print(l)
n=re.findall('[0-9]+',l)
for i in n:
num.append(int(i))
print(sum(num),len(num))
##
#print(sum(num),len(num),num)
|
[
"VENNAPUSA-NAGARJUNA.REDDY@CAPGEMINI.COM"
] |
VENNAPUSA-NAGARJUNA.REDDY@CAPGEMINI.COM
|
0e0275cf428a67a90944af9e04ea11d7cd635d90
|
b21c3e8621e0f20d5f8a86e17e11aac221cb59a7
|
/studentsdb/urls.py
|
c31ded6d3b232eff7c36b598ff8ff630077706e8
|
[] |
no_license
|
SviatStrakhov/st_db
|
64e852523e354fee40faa2312f29f787b48267c5
|
3c6ace28228233ba681c74d86c62f2cb687f6783
|
refs/heads/master
| 2020-12-30T15:09:23.106154
| 2017-05-19T15:13:01
| 2017-05-19T15:13:01
| 91,117,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
"""studentsdb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from .settings import MEDIA_ROOT, DEBUG
from students.views.students_views import *
from students.views.groups_views import *
from students.views.journal_views import *
urlpatterns = [
# Students urls
url(r'^$', students_list, name='home'),
url(r'^students/add/$', students_add, name='students_add'),
url(r'^students/(?P<sid>\d+)/edit/$', students_edit, name='students_edit'),
url(r'^students/(?P<sid>\d+)/delete/$', students_delete, name='students_delete'),
#Groups urls
url(r'^groups/$', groups_list, name='groups'),
url(r'^groups/add/$', groups_add, name='groups_add'),
url(r'^groups/(?P<gid>\d+)/edit/$', groups_edit, name='groups_edit'),
url(r'^groups/(?P<sid>\d+)/delete/$', groups_delete, name='groups_delete'),
url(r'^journal/$', journal, name='journal'),
#url(r'/journsl/125'),
#url(r'/journal/update'),
url(r'^admin/', admin.site.urls),
]
|
[
"svoharts@gmail.com"
] |
svoharts@gmail.com
|
4966a3f5d0449b77c881c78bcc2137881aa21ae3
|
bc233c24523f05708dd1e091dca817f9095e6bb5
|
/bitmovin_api_sdk/models/prewarmed_encoder_pool.py
|
8da7630b67d41bfa0dd3425c382138e1ce51c897
|
[
"MIT"
] |
permissive
|
bitmovin/bitmovin-api-sdk-python
|
e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd
|
b0860c0b1be7747cf22ad060985504da625255eb
|
refs/heads/main
| 2023-09-01T15:41:03.628720
| 2023-08-30T10:52:13
| 2023-08-30T10:52:13
| 175,209,828
| 13
| 14
|
MIT
| 2021-04-29T12:30:31
| 2019-03-12T12:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 12,366
|
py
|
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.bitmovin_resource import BitmovinResource
from bitmovin_api_sdk.models.cloud_region import CloudRegion
from bitmovin_api_sdk.models.prewarmed_encoder_disk_size import PrewarmedEncoderDiskSize
from bitmovin_api_sdk.models.prewarmed_encoder_pool_status import PrewarmedEncoderPoolStatus
import pprint
import six
class PrewarmedEncoderPool(BitmovinResource):
@poscheck_model
def __init__(self,
id_=None,
name=None,
description=None,
created_at=None,
modified_at=None,
custom_data=None,
encoder_version=None,
cloud_region=None,
infrastructure_id=None,
disk_size=None,
target_pool_size=None,
gpu_enabled=None,
status=None):
# type: (string_types, string_types, string_types, datetime, datetime, dict, string_types, CloudRegion, string_types, PrewarmedEncoderDiskSize, int, bool, PrewarmedEncoderPoolStatus) -> None
super(PrewarmedEncoderPool, self).__init__(id_=id_, name=name, description=description, created_at=created_at, modified_at=modified_at, custom_data=custom_data)
self._encoder_version = None
self._cloud_region = None
self._infrastructure_id = None
self._disk_size = None
self._target_pool_size = None
self._gpu_enabled = None
self._status = None
self.discriminator = None
if encoder_version is not None:
self.encoder_version = encoder_version
if cloud_region is not None:
self.cloud_region = cloud_region
if infrastructure_id is not None:
self.infrastructure_id = infrastructure_id
if disk_size is not None:
self.disk_size = disk_size
if target_pool_size is not None:
self.target_pool_size = target_pool_size
if gpu_enabled is not None:
self.gpu_enabled = gpu_enabled
if status is not None:
self.status = status
@property
def openapi_types(self):
types = {}
if hasattr(super(PrewarmedEncoderPool, self), 'openapi_types'):
types = getattr(super(PrewarmedEncoderPool, self), 'openapi_types')
types.update({
'encoder_version': 'string_types',
'cloud_region': 'CloudRegion',
'infrastructure_id': 'string_types',
'disk_size': 'PrewarmedEncoderDiskSize',
'target_pool_size': 'int',
'gpu_enabled': 'bool',
'status': 'PrewarmedEncoderPoolStatus'
})
return types
@property
def attribute_map(self):
attributes = {}
if hasattr(super(PrewarmedEncoderPool, self), 'attribute_map'):
attributes = getattr(super(PrewarmedEncoderPool, self), 'attribute_map')
attributes.update({
'encoder_version': 'encoderVersion',
'cloud_region': 'cloudRegion',
'infrastructure_id': 'infrastructureId',
'disk_size': 'diskSize',
'target_pool_size': 'targetPoolSize',
'gpu_enabled': 'gpuEnabled',
'status': 'status'
})
return attributes
@property
def encoder_version(self):
# type: () -> string_types
"""Gets the encoder_version of this PrewarmedEncoderPool.
The encoder version which the pool's instances will be running (required)
:return: The encoder_version of this PrewarmedEncoderPool.
:rtype: string_types
"""
return self._encoder_version
@encoder_version.setter
def encoder_version(self, encoder_version):
# type: (string_types) -> None
"""Sets the encoder_version of this PrewarmedEncoderPool.
The encoder version which the pool's instances will be running (required)
:param encoder_version: The encoder_version of this PrewarmedEncoderPool.
:type: string_types
"""
if encoder_version is not None:
if not isinstance(encoder_version, string_types):
raise TypeError("Invalid type for `encoder_version`, type has to be `string_types`")
self._encoder_version = encoder_version
@property
def cloud_region(self):
# type: () -> CloudRegion
"""Gets the cloud_region of this PrewarmedEncoderPool.
The cloud region in which the pool's instances will be running. Must be a specific region (e.g. not 'AUTO', 'GOOGLE' or 'EUROPE') (required)
:return: The cloud_region of this PrewarmedEncoderPool.
:rtype: CloudRegion
"""
return self._cloud_region
@cloud_region.setter
def cloud_region(self, cloud_region):
# type: (CloudRegion) -> None
"""Sets the cloud_region of this PrewarmedEncoderPool.
The cloud region in which the pool's instances will be running. Must be a specific region (e.g. not 'AUTO', 'GOOGLE' or 'EUROPE') (required)
:param cloud_region: The cloud_region of this PrewarmedEncoderPool.
:type: CloudRegion
"""
if cloud_region is not None:
if not isinstance(cloud_region, CloudRegion):
raise TypeError("Invalid type for `cloud_region`, type has to be `CloudRegion`")
self._cloud_region = cloud_region
@property
def infrastructure_id(self):
# type: () -> string_types
"""Gets the infrastructure_id of this PrewarmedEncoderPool.
Define an external infrastructure to run the pool on.
:return: The infrastructure_id of this PrewarmedEncoderPool.
:rtype: string_types
"""
return self._infrastructure_id
@infrastructure_id.setter
def infrastructure_id(self, infrastructure_id):
# type: (string_types) -> None
"""Sets the infrastructure_id of this PrewarmedEncoderPool.
Define an external infrastructure to run the pool on.
:param infrastructure_id: The infrastructure_id of this PrewarmedEncoderPool.
:type: string_types
"""
if infrastructure_id is not None:
if not isinstance(infrastructure_id, string_types):
raise TypeError("Invalid type for `infrastructure_id`, type has to be `string_types`")
self._infrastructure_id = infrastructure_id
@property
def disk_size(self):
# type: () -> PrewarmedEncoderDiskSize
"""Gets the disk_size of this PrewarmedEncoderPool.
Disk size of the prewarmed instances in GB. Needs to be chosen depending on input file sizes and encoding features used. (required)
:return: The disk_size of this PrewarmedEncoderPool.
:rtype: PrewarmedEncoderDiskSize
"""
return self._disk_size
@disk_size.setter
def disk_size(self, disk_size):
# type: (PrewarmedEncoderDiskSize) -> None
"""Sets the disk_size of this PrewarmedEncoderPool.
Disk size of the prewarmed instances in GB. Needs to be chosen depending on input file sizes and encoding features used. (required)
:param disk_size: The disk_size of this PrewarmedEncoderPool.
:type: PrewarmedEncoderDiskSize
"""
if disk_size is not None:
if not isinstance(disk_size, PrewarmedEncoderDiskSize):
raise TypeError("Invalid type for `disk_size`, type has to be `PrewarmedEncoderDiskSize`")
self._disk_size = disk_size
@property
def target_pool_size(self):
# type: () -> int
"""Gets the target_pool_size of this PrewarmedEncoderPool.
Number of instances to keep prewarmed while the pool is running (required)
:return: The target_pool_size of this PrewarmedEncoderPool.
:rtype: int
"""
return self._target_pool_size
@target_pool_size.setter
def target_pool_size(self, target_pool_size):
# type: (int) -> None
"""Sets the target_pool_size of this PrewarmedEncoderPool.
Number of instances to keep prewarmed while the pool is running (required)
:param target_pool_size: The target_pool_size of this PrewarmedEncoderPool.
:type: int
"""
if target_pool_size is not None:
if target_pool_size is not None and target_pool_size < 1:
raise ValueError("Invalid value for `target_pool_size`, must be a value greater than or equal to `1`")
if not isinstance(target_pool_size, int):
raise TypeError("Invalid type for `target_pool_size`, type has to be `int`")
self._target_pool_size = target_pool_size
@property
def gpu_enabled(self):
# type: () -> bool
"""Gets the gpu_enabled of this PrewarmedEncoderPool.
Create pool with GPU instances for hardware encoding presets (e.g., VOD_HARDWARE_SHORTFORM).
:return: The gpu_enabled of this PrewarmedEncoderPool.
:rtype: bool
"""
return self._gpu_enabled
@gpu_enabled.setter
def gpu_enabled(self, gpu_enabled):
# type: (bool) -> None
"""Sets the gpu_enabled of this PrewarmedEncoderPool.
Create pool with GPU instances for hardware encoding presets (e.g., VOD_HARDWARE_SHORTFORM).
:param gpu_enabled: The gpu_enabled of this PrewarmedEncoderPool.
:type: bool
"""
if gpu_enabled is not None:
if not isinstance(gpu_enabled, bool):
raise TypeError("Invalid type for `gpu_enabled`, type has to be `bool`")
self._gpu_enabled = gpu_enabled
@property
def status(self):
# type: () -> PrewarmedEncoderPoolStatus
"""Gets the status of this PrewarmedEncoderPool.
Current status of the pool.
:return: The status of this PrewarmedEncoderPool.
:rtype: PrewarmedEncoderPoolStatus
"""
return self._status
@status.setter
def status(self, status):
# type: (PrewarmedEncoderPoolStatus) -> None
"""Sets the status of this PrewarmedEncoderPool.
Current status of the pool.
:param status: The status of this PrewarmedEncoderPool.
:type: PrewarmedEncoderPoolStatus
"""
if status is not None:
if not isinstance(status, PrewarmedEncoderPoolStatus):
raise TypeError("Invalid type for `status`, type has to be `PrewarmedEncoderPoolStatus`")
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
if hasattr(super(PrewarmedEncoderPool, self), "to_dict"):
result = super(PrewarmedEncoderPool, self).to_dict()
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PrewarmedEncoderPool):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"openapi@bitmovin.com"
] |
openapi@bitmovin.com
|
10633cacba3a9240db613dca72ff6eb3a8f19adc
|
29fc11636bf5053feb113ad837ec42ffe3f09bfd
|
/Terra/Bug_25/_mapping.py
|
cceb0075f51f4610781ef1def67a81168a521f6d
|
[] |
no_license
|
deyh2020/Bugs4Q
|
8446db4a8efcf3541ba740b7d658b6812d56fe3e
|
b6e306e12bd1c5fdec126655ad008c386340ab57
|
refs/heads/master
| 2023-05-31T03:41:28.576877
| 2021-06-17T15:08:03
| 2021-06-17T15:08:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36,622
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=invalid-name, inconsistent-return-statements
"""
Layout module to assist with mapping circuit qubits onto physical qubits.
"""
import logging
import pprint
import sys
import networkx as nx
import numpy as np
import sympy
from sympy import Number as N
from qiskit.qasm import _node as node
from qiskit.mapper import MapperError
from qiskit.dagcircuit import DAGCircuit
from qiskit.unroll import DagUnroller, DAGBackend
from qiskit.mapper._quaternion import quaternion_from_euler
logger = logging.getLogger(__name__)
# Notes:
# Measurements may occur and be followed by swaps that result in repeated
# measurement of the same qubit. Near-term experiments cannot implement
# these circuits, so we may need to modify the algorithm.
# It can happen that a swap in a deeper layer can be removed by permuting
# qubits in the layout. We don't do this.
# It can happen that initial swaps can be removed or partly simplified
# because the initial state is zero. We don't do this.
cx_data = {
"opaque": False,
"n_args": 0,
"n_bits": 2,
"args": [],
"bits": ["c", "t"],
# gate cx c,t { CX c,t; }
"body": node.GateBody([
node.Cnot([
node.Id("c", 0, ""),
node.Id("t", 0, "")
])
])
}
swap_data = {
"opaque": False,
"n_args": 0,
"n_bits": 2,
"args": [],
"bits": ["a", "b"],
# gate swap a,b { cx a,b; cx b,a; cx a,b; }
"body": node.GateBody([
node.CustomUnitary([
node.Id("cx", 0, ""),
node.PrimaryList([
node.Id("a", 0, ""),
node.Id("b", 0, "")
])
]),
node.CustomUnitary([
node.Id("cx", 0, ""),
node.PrimaryList([
node.Id("b", 0, ""),
node.Id("a", 0, "")
])
]),
node.CustomUnitary([
node.Id("cx", 0, ""),
node.PrimaryList([
node.Id("a", 0, ""),
node.Id("b", 0, "")
])
])
])
}
u2_data = {
"opaque": False,
"n_args": 2,
"n_bits": 1,
"args": ["phi", "lambda"],
"bits": ["q"],
# gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }
"body": node.GateBody([
node.UniversalUnitary([
node.ExpressionList([
node.BinaryOp([
node.BinaryOperator('/'),
node.Real(sympy.pi),
node.Int(2)
]),
node.Id("phi", 0, ""),
node.Id("lambda", 0, "")
]),
node.Id("q", 0, "")
])
])
}
h_data = {
"opaque": False,
"n_args": 0,
"n_bits": 1,
"args": [],
"bits": ["a"],
# gate h a { u2(0,pi) a; }
"body": node.GateBody([
node.CustomUnitary([
node.Id("u2", 0, ""),
node.ExpressionList([
node.Int(0),
node.Real(sympy.pi)
]),
node.PrimaryList([
node.Id("a", 0, "")
])
])
])
}
def layer_permutation(layer_partition, layout, qubit_subset, coupling, trials,
seed=None):
"""Find a swap circuit that implements a permutation for this layer.
The goal is to swap qubits such that qubits in the same two-qubit gates
are adjacent.
Based on Sergey Bravyi's algorithm.
The layer_partition is a list of (qu)bit lists and each qubit is a
tuple (qreg, index).
The layout is a dict mapping qubits in the circuit to qubits in the
coupling graph and represents the current positions of the data.
The qubit_subset is the subset of qubits in the coupling graph that
we have chosen to map into.
The coupling is a CouplingGraph.
TRIALS is the number of attempts the randomized algorithm makes.
Returns: success_flag, best_circ, best_d, best_layout, trivial_flag
If success_flag is True, then best_circ contains a DAGCircuit with
the swap circuit, best_d contains the depth of the swap circuit, and
best_layout contains the new positions of the data qubits after the
swap circuit has been applied. The trivial_flag is set if the layer
has no multi-qubit gates.
"""
if seed is not None:
np.random.seed(seed)
logger.debug("layer_permutation: ----- enter -----")
logger.debug("layer_permutation: layer_partition = %s",
pprint.pformat(layer_partition))
logger.debug("layer_permutation: layout = %s",
pprint.pformat(layout))
logger.debug("layer_permutation: qubit_subset = %s",
pprint.pformat(qubit_subset))
logger.debug("layer_permutation: trials = %s", trials)
rev_layout = {b: a for a, b in layout.items()}
gates = []
for layer in layer_partition:
if len(layer) > 2:
raise MapperError("Layer contains >2 qubit gates")
elif len(layer) == 2:
gates.append(tuple(layer))
logger.debug("layer_permutation: gates = %s", pprint.pformat(gates))
# Find layout maximum index
layout_max_index = max(map(lambda x: x[1]+1, layout.values()))
# Can we already apply the gates?
dist = sum([coupling.distance(layout[g[0]],
layout[g[1]]) for g in gates])
logger.debug("layer_permutation: dist = %s", dist)
if dist == len(gates):
logger.debug("layer_permutation: done already")
logger.debug("layer_permutation: ----- exit -----")
circ = DAGCircuit()
circ.add_qreg('q', layout_max_index)
circ.add_basis_element("CX", 2)
circ.add_basis_element("cx", 2)
circ.add_basis_element("swap", 2)
circ.add_gate_data("cx", cx_data)
circ.add_gate_data("swap", swap_data)
return True, circ, 0, layout, bool(gates)
# Begin loop over trials of randomized algorithm
n = coupling.size()
best_d = sys.maxsize # initialize best depth
best_circ = None # initialize best swap circuit
best_layout = None # initialize best final layout
for trial in range(trials):
logger.debug("layer_permutation: trial %s", trial)
trial_layout = layout.copy()
rev_trial_layout = rev_layout.copy()
# SWAP circuit constructed this trial
trial_circ = DAGCircuit()
trial_circ.add_qreg('q', layout_max_index)
# Compute Sergey's randomized distance
xi = {}
for i in coupling.get_qubits():
xi[i] = {}
for i in coupling.get_qubits():
for j in coupling.get_qubits():
scale = 1 + np.random.normal(0, 1 / n)
xi[i][j] = scale * coupling.distance(i, j) ** 2
xi[j][i] = xi[i][j]
# Loop over depths d up to a max depth of 2n+1
d = 1
# Circuit for this swap slice
circ = DAGCircuit()
circ.add_qreg('q', layout_max_index)
circ.add_basis_element("CX", 2)
circ.add_basis_element("cx", 2)
circ.add_basis_element("swap", 2)
circ.add_gate_data("cx", cx_data)
circ.add_gate_data("swap", swap_data)
# Identity wire-map for composing the circuits
identity_wire_map = {('q', j): ('q', j) for j in range(layout_max_index)}
while d < 2 * n + 1:
# Set of available qubits
qubit_set = set(qubit_subset)
# While there are still qubits available
while qubit_set:
# Compute the objective function
min_cost = sum([xi[trial_layout[g[0]]][trial_layout[g[1]]]
for g in gates])
# Try to decrease objective function
progress_made = False
# Loop over edges of coupling graph
for e in coupling.get_edges():
# Are the qubits available?
if e[0] in qubit_set and e[1] in qubit_set:
# Try this edge to reduce the cost
new_layout = trial_layout.copy()
new_layout[rev_trial_layout[e[0]]] = e[1]
new_layout[rev_trial_layout[e[1]]] = e[0]
rev_new_layout = rev_trial_layout.copy()
rev_new_layout[e[0]] = rev_trial_layout[e[1]]
rev_new_layout[e[1]] = rev_trial_layout[e[0]]
# Compute the objective function
new_cost = sum([xi[new_layout[g[0]]][new_layout[g[1]]]
for g in gates])
# Record progress if we succceed
if new_cost < min_cost:
logger.debug("layer_permutation: progress! "
"min_cost = %s", min_cost)
progress_made = True
min_cost = new_cost
opt_layout = new_layout
rev_opt_layout = rev_new_layout
opt_edge = e
# Were there any good choices?
if progress_made:
qubit_set.remove(opt_edge[0])
qubit_set.remove(opt_edge[1])
trial_layout = opt_layout
rev_trial_layout = rev_opt_layout
circ.apply_operation_back("swap", [(opt_edge[0][0],
opt_edge[0][1]),
(opt_edge[1][0],
opt_edge[1][1])])
logger.debug("layer_permutation: chose pair %s",
pprint.pformat(opt_edge))
else:
break
# We have either run out of qubits or failed to improve
# Compute the coupling graph distance
dist = sum([coupling.distance(trial_layout[g[0]],
trial_layout[g[1]]) for g in gates])
logger.debug("layer_permutation: dist = %s", dist)
# If all gates can be applied now, we are finished
# Otherwise we need to consider a deeper swap circuit
if dist == len(gates):
logger.debug("layer_permutation: all can be applied now")
trial_circ.compose_back(circ, identity_wire_map)
break
# Increment the depth
d += 1
logger.debug("layer_permutation: increment depth to %s", d)
# Either we have succeeded at some depth d < dmax or failed
dist = sum([coupling.distance(trial_layout[g[0]],
trial_layout[g[1]]) for g in gates])
logger.debug("layer_permutation: dist = %s", dist)
if dist == len(gates):
if d < best_d:
logger.debug("layer_permutation: got circuit with depth %s", d)
best_circ = trial_circ
best_layout = trial_layout
best_d = min(best_d, d)
if best_circ is None:
logger.debug("layer_permutation: failed!")
logger.debug("layer_permutation: ----- exit -----")
return False, None, None, None, False
logger.debug("layer_permutation: done")
logger.debug("layer_permutation: ----- exit -----")
return True, best_circ, best_d, best_layout, False
def direction_mapper(circuit_graph, coupling_graph):
"""Change the direction of CNOT gates to conform to CouplingGraph.
circuit_graph = input DAGCircuit
coupling_graph = corresponding CouplingGraph
Adds "h" to the circuit basis.
Returns a DAGCircuit object containing a circuit equivalent to
circuit_graph but with CNOT gate directions matching the edges
of coupling_graph. Raises an exception if the circuit_graph
does not conform to the coupling_graph.
"""
if "cx" not in circuit_graph.basis:
return circuit_graph
if circuit_graph.basis["cx"] != (2, 0, 0):
raise MapperError("cx gate has unexpected signature %s" %
circuit_graph.basis["cx"])
flipped_cx_circuit = DAGCircuit()
flipped_cx_circuit.add_qreg('q', 2)
flipped_cx_circuit.add_basis_element("CX", 2)
flipped_cx_circuit.add_basis_element("U", 1, 0, 3)
flipped_cx_circuit.add_basis_element("cx", 2)
flipped_cx_circuit.add_basis_element("u2", 1, 0, 2)
flipped_cx_circuit.add_basis_element("h", 1)
flipped_cx_circuit.add_gate_data("cx", cx_data)
flipped_cx_circuit.add_gate_data("u2", u2_data)
flipped_cx_circuit.add_gate_data("h", h_data)
flipped_cx_circuit.apply_operation_back("h", [("q", 0)])
flipped_cx_circuit.apply_operation_back("h", [("q", 1)])
flipped_cx_circuit.apply_operation_back("cx", [("q", 1), ("q", 0)])
flipped_cx_circuit.apply_operation_back("h", [("q", 0)])
flipped_cx_circuit.apply_operation_back("h", [("q", 1)])
cg_edges = coupling_graph.get_edges()
for cx_node in circuit_graph.get_named_nodes("cx"):
nd = circuit_graph.multi_graph.node[cx_node]
cxedge = tuple(nd["qargs"])
if cxedge in cg_edges:
logger.debug("cx %s[%d], %s[%d] -- OK",
cxedge[0][0], cxedge[0][1],
cxedge[1][0], cxedge[1][1])
continue
elif (cxedge[1], cxedge[0]) in cg_edges:
circuit_graph.substitute_circuit_one(cx_node,
flipped_cx_circuit,
wires=[("q", 0), ("q", 1)])
logger.debug("cx %s[%d], %s[%d] -FLIP",
cxedge[0][0], cxedge[0][1],
cxedge[1][0], cxedge[1][1])
else:
raise MapperError("circuit incompatible with CouplingGraph: "
"cx on %s" % pprint.pformat(cxedge))
return circuit_graph
def swap_mapper_layer_update(i, first_layer, best_layout, best_d,
best_circ, layer_list):
"""Update the QASM string for an iteration of swap_mapper.
i = layer number
first_layer = True if this is the first layer with multi-qubit gates
best_layout = layout returned from swap algorithm
best_d = depth returned from swap algorithm
best_circ = swap circuit returned from swap algorithm
layer_list = list of circuit objects for each layer
Return DAGCircuit object to append to the output DAGCircuit.
"""
layout = best_layout
layout_max_index = max(map(lambda x: x[1]+1, layout.values()))
dagcircuit_output = DAGCircuit()
dagcircuit_output.add_qreg("q", layout_max_index)
# Identity wire-map for composing the circuits
identity_wire_map = {('q', j): ('q', j) for j in range(layout_max_index)}
# If this is the first layer with multi-qubit gates,
# output all layers up to this point and ignore any
# swap gates. Set the initial layout.
if first_layer:
logger.debug("update_qasm_and_layout: first multi-qubit gate layer")
# Output all layers up to this point
for j in range(i + 1):
dagcircuit_output.compose_back(layer_list[j]["graph"], layout)
# Otherwise, we output the current layer and the associated swap gates.
else:
# Output any swaps
if best_d > 0:
logger.debug("update_qasm_and_layout: swaps in this layer, "
"depth %d", best_d)
dagcircuit_output.compose_back(best_circ, identity_wire_map)
else:
logger.debug("update_qasm_and_layout: no swaps in this layer")
# Output this layer
dagcircuit_output.compose_back(layer_list[i]["graph"], layout)
return dagcircuit_output
def swap_mapper(circuit_graph, coupling_graph,
initial_layout=None,
basis="cx,u1,u2,u3,id", trials=20, seed=None):
"""Map a DAGCircuit onto a CouplingGraph using swap gates.
Args:
circuit_graph (DAGCircuit): input DAG circuit
coupling_graph (CouplingGraph): coupling graph to map onto
initial_layout (dict): dict from qubits of circuit_graph to qubits
of coupling_graph (optional)
basis (str): basis string specifying basis of output DAGCircuit
trials (int): number of trials.
seed (int): initial seed.
Returns:
DAGCircuit: object containing a circuit equivalent to
circuit_graph that respects couplings in coupling_graph, and
a layout dict mapping qubits of circuit_graph into qubits
of coupling_graph. The layout may differ from the initial_layout
if the first layer of gates cannot be executed on the
initial_layout.
Raises:
MapperError: if there was any error during the mapping or with the
parameters.
"""
if circuit_graph.width() > coupling_graph.size():
raise MapperError("Not enough qubits in CouplingGraph")
# Schedule the input circuit
layerlist = list(circuit_graph.layers())
logger.debug("schedule:")
for i, v in enumerate(layerlist):
logger.debug(" %d: %s", i, v["partition"])
if initial_layout is not None:
# Check the input layout
circ_qubits = circuit_graph.get_qubits()
coup_qubits = coupling_graph.get_qubits()
qubit_subset = []
for k, v in initial_layout.items():
qubit_subset.append(v)
if k not in circ_qubits:
raise MapperError("initial_layout qubit %s[%d] not in input "
"DAGCircuit" % (k[0], k[1]))
if v not in coup_qubits:
raise MapperError("initial_layout qubit %s[%d] not in input "
"CouplingGraph" % (v[0], v[1]))
else:
# Supply a default layout
qubit_subset = coupling_graph.get_qubits()
qubit_subset = qubit_subset[0:circuit_graph.width()]
initial_layout = {a: b for a, b in
zip(circuit_graph.get_qubits(), qubit_subset)}
# Find swap circuit to preceed to each layer of input circuit
layout = initial_layout.copy()
layout_max_index = max(map(lambda x: x[1]+1, layout.values()))
# Construct an empty DAGCircuit with one qreg "q"
# and the same set of cregs as the input circuit
dagcircuit_output = DAGCircuit()
dagcircuit_output.add_qreg("q", layout_max_index)
for name, size in circuit_graph.cregs.items():
dagcircuit_output.add_creg(name, size)
# Make a trivial wire mapping between the subcircuits
# returned by swap_mapper_layer_update and the circuit
# we are building
identity_wire_map = {}
for j in range(layout_max_index):
identity_wire_map[("q", j)] = ("q", j)
for name, size in circuit_graph.cregs.items():
for j in range(size):
identity_wire_map[(name, j)] = (name, j)
first_layer = True # True until first layer is output
logger.debug("initial_layout = %s", layout)
# Iterate over layers
for i, layer in enumerate(layerlist):
# Attempt to find a permutation for this layer
success_flag, best_circ, best_d, best_layout, trivial_flag \
= layer_permutation(layer["partition"], layout,
qubit_subset, coupling_graph, trials, seed)
logger.debug("swap_mapper: layer %d", i)
logger.debug("swap_mapper: success_flag=%s,best_d=%s,trivial_flag=%s",
success_flag, str(best_d), trivial_flag)
# If this fails, try one gate at a time in this layer
if not success_flag:
logger.debug("swap_mapper: failed, layer %d, "
"retrying sequentially", i)
serial_layerlist = list(layer["graph"].serial_layers())
# Go through each gate in the layer
for j, serial_layer in enumerate(serial_layerlist):
success_flag, best_circ, best_d, best_layout, trivial_flag \
= layer_permutation(serial_layer["partition"],
layout, qubit_subset, coupling_graph,
trials, seed)
logger.debug("swap_mapper: layer %d, sublayer %d", i, j)
logger.debug("swap_mapper: success_flag=%s,best_d=%s,"
"trivial_flag=%s",
success_flag, str(best_d), trivial_flag)
# Give up if we fail again
if not success_flag:
raise MapperError("swap_mapper failed: " +
"layer %d, sublayer %d" % (i, j) +
", \"%s\"" %
serial_layer["graph"].qasm(
no_decls=True,
aliases=layout))
# If this layer is only single-qubit gates,
# and we have yet to see multi-qubit gates,
# continue to the next inner iteration
if trivial_flag and first_layer:
logger.debug("swap_mapper: skip to next sublayer")
continue
# Update the record of qubit positions for each inner iteration
layout = best_layout
# Update the QASM
dagcircuit_output.compose_back(
swap_mapper_layer_update(j,
first_layer,
best_layout,
best_d,
best_circ,
serial_layerlist),
identity_wire_map)
# Update initial layout
if first_layer:
initial_layout = layout
first_layer = False
else:
# Update the record of qubit positions for each iteration
layout = best_layout
# Update the QASM
dagcircuit_output.compose_back(
swap_mapper_layer_update(i,
first_layer,
best_layout,
best_d,
best_circ,
layerlist),
identity_wire_map)
# Update initial layout
if first_layer:
initial_layout = layout
first_layer = False
# If first_layer is still set, the circuit only has single-qubit gates
# so we can use the initial layout to output the entire circuit
if first_layer:
layout = initial_layout
for i, layer in enumerate(layerlist):
dagcircuit_output.compose_back(layer["graph"], layout)
# Parse openqasm_output into DAGCircuit object
dag_unrrolled = DagUnroller(dagcircuit_output,
DAGBackend(basis.split(",")))
dagcircuit_output = dag_unrrolled.expand_gates()
return dagcircuit_output, initial_layout
def yzy_to_zyz(xi, theta1, theta2, eps=1e-9):
"""Express a Y.Z.Y single qubit gate as a Z.Y.Z gate.
Solve the equation
.. math::
Ry(theta1).Rz(xi).Ry(theta2) = Rz(phi).Ry(theta).Rz(lambda)
for theta, phi, and lambda.
Return a solution theta, phi, and lambda.
"""
Q = quaternion_from_euler([theta1, xi, theta2], 'yzy')
euler = Q.to_zyz()
P = quaternion_from_euler(euler, 'zyz')
# output order different than rotation order
out_angles = (euler[1], euler[0], euler[2])
abs_inner = abs(P.data.dot(Q.data))
if not np.allclose(abs_inner, 1, eps):
logger.debug("xi=%s", xi)
logger.debug("theta1=%s", theta1)
logger.debug("theta2=%s", theta2)
logger.debug("solutions=%s", out_angles)
logger.debug("abs_inner=%s", abs_inner)
raise MapperError('YZY and ZYZ angles do not give same rotation matrix.')
return out_angles
def compose_u3(theta1, phi1, lambda1, theta2, phi2, lambda2):
"""Return a triple theta, phi, lambda for the product.
u3(theta, phi, lambda)
= u3(theta1, phi1, lambda1).u3(theta2, phi2, lambda2)
= Rz(phi1).Ry(theta1).Rz(lambda1+phi2).Ry(theta2).Rz(lambda2)
= Rz(phi1).Rz(phi').Ry(theta').Rz(lambda').Rz(lambda2)
= u3(theta', phi1 + phi', lambda2 + lambda')
Return theta, phi, lambda.
"""
# Careful with the factor of two in yzy_to_zyz
thetap, phip, lambdap = yzy_to_zyz((lambda1 + phi2),
theta1, theta2)
(theta, phi, lamb) = (thetap, phi1 + phip, lambda2 + lambdap)
return (theta, phi, lamb)
def cx_cancellation(circuit):
"""Cancel back-to-back "cx" gates in circuit."""
runs = circuit.collect_runs(["cx"])
for run in runs:
# Partition the run into chunks with equal gate arguments
partition = []
chunk = []
for i in range(len(run) - 1):
chunk.append(run[i])
qargs0 = circuit.multi_graph.node[run[i]]["qargs"]
qargs1 = circuit.multi_graph.node[run[i + 1]]["qargs"]
if qargs0 != qargs1:
partition.append(chunk)
chunk = []
chunk.append(run[-1])
partition.append(chunk)
# Simplify each chunk in the partition
for chunk in partition:
if len(chunk) % 2 == 0:
for n in chunk:
circuit._remove_op_node(n)
else:
for n in chunk[1:]:
circuit._remove_op_node(n)
def optimize_1q_gates(circuit):
"""Simplify runs of single qubit gates in the QX basis.
Return a new circuit that has been optimized.
"""
qx_basis = ["u1", "u2", "u3", "cx", "id"]
dag_unroller = DagUnroller(circuit, DAGBackend(qx_basis))
unrolled = dag_unroller.expand_gates()
runs = unrolled.collect_runs(["u1", "u2", "u3", "id"])
for run in runs:
qname = unrolled.multi_graph.node[run[0]]["qargs"][0]
right_name = "u1"
right_parameters = (N(0), N(0), N(0)) # (theta, phi, lambda)
for current_node in run:
nd = unrolled.multi_graph.node[current_node]
assert nd["condition"] is None, "internal error"
assert len(nd["qargs"]) == 1, "internal error"
assert nd["qargs"][0] == qname, "internal error"
left_name = nd["name"]
assert left_name in ["u1", "u2", "u3", "id"], "internal error"
if left_name == "u1":
left_parameters = (N(0), N(0), nd["params"][0])
elif left_name == "u2":
left_parameters = (sympy.pi / 2, nd["params"][0], nd["params"][1])
elif left_name == "u3":
left_parameters = tuple(nd["params"])
else:
left_name = "u1" # replace id with u1
left_parameters = (N(0), N(0), N(0))
# Compose gates
name_tuple = (left_name, right_name)
if name_tuple == ("u1", "u1"):
# u1(lambda1) * u1(lambda2) = u1(lambda1 + lambda2)
right_parameters = (N(0), N(0), right_parameters[2] +
left_parameters[2])
elif name_tuple == ("u1", "u2"):
# u1(lambda1) * u2(phi2, lambda2) = u2(phi2 + lambda1, lambda2)
right_parameters = (sympy.pi / 2, right_parameters[1] +
left_parameters[2], right_parameters[2])
elif name_tuple == ("u2", "u1"):
# u2(phi1, lambda1) * u1(lambda2) = u2(phi1, lambda1 + lambda2)
right_name = "u2"
right_parameters = (sympy.pi / 2, left_parameters[1],
right_parameters[2] + left_parameters[2])
elif name_tuple == ("u1", "u3"):
# u1(lambda1) * u3(theta2, phi2, lambda2) =
# u3(theta2, phi2 + lambda1, lambda2)
right_parameters = (right_parameters[0], right_parameters[1] +
left_parameters[2], right_parameters[2])
elif name_tuple == ("u3", "u1"):
# u3(theta1, phi1, lambda1) * u1(lambda2) =
# u3(theta1, phi1, lambda1 + lambda2)
right_name = "u3"
right_parameters = (left_parameters[0], left_parameters[1],
right_parameters[2] + left_parameters[2])
elif name_tuple == ("u2", "u2"):
# Using Ry(pi/2).Rz(2*lambda).Ry(pi/2) =
# Rz(pi/2).Ry(pi-2*lambda).Rz(pi/2),
# u2(phi1, lambda1) * u2(phi2, lambda2) =
# u3(pi - lambda1 - phi2, phi1 + pi/2, lambda2 + pi/2)
right_name = "u3"
right_parameters = (sympy.pi - left_parameters[2] -
right_parameters[1], left_parameters[1] +
sympy.pi / 2, right_parameters[2] +
sympy.pi / 2)
elif name_tuple[1] == "nop":
right_name = left_name
right_parameters = left_parameters
else:
# For composing u3's or u2's with u3's, use
# u2(phi, lambda) = u3(pi/2, phi, lambda)
# together with the qiskit.mapper.compose_u3 method.
right_name = "u3"
# Evaluate the symbolic expressions for efficiency
left_parameters = tuple(map(lambda x: x.evalf(), list(left_parameters)))
right_parameters = tuple(map(lambda x: x.evalf(), list(right_parameters)))
right_parameters = compose_u3(left_parameters[0],
left_parameters[1],
left_parameters[2],
right_parameters[0],
right_parameters[1],
right_parameters[2])
# Why evalf()? This program:
# OPENQASM 2.0;
# include "qelib1.inc";
# qreg q[2];
# creg c[2];
# u3(0.518016983430947*pi,1.37051598592907*pi,1.36816383603222*pi) q[0];
# u3(1.69867232277986*pi,0.371448347747471*pi,0.461117217930936*pi) q[0];
# u3(0.294319836336836*pi,0.450325871124225*pi,1.46804720442555*pi) q[0];
# measure q -> c;
# took >630 seconds (did not complete) to optimize without
# calling evalf() at all, 19 seconds to optimize calling
# evalf() AFTER compose_u3, and 1 second to optimize
# calling evalf() BEFORE compose_u3.
# 1. Here down, when we simplify, we add f(theta) to lambda to
# correct the global phase when f(theta) is 2*pi. This isn't
# necessary but the other steps preserve the global phase, so
# we continue in that manner.
# 2. The final step will remove Z rotations by 2*pi.
# 3. Note that is_zero is true only if the expression is exactly
# zero. If the input expressions have already been evaluated
# then these final simplifications will not occur.
# TODO After we refactor, we should have separate passes for
# exact and approximate rewriting.
# Y rotation is 0 mod 2*pi, so the gate is a u1
if (right_parameters[0] % (2 * sympy.pi)).is_zero \
and right_name != "u1":
right_name = "u1"
right_parameters = (0, 0, right_parameters[1] +
right_parameters[2] +
right_parameters[0])
# Y rotation is pi/2 or -pi/2 mod 2*pi, so the gate is a u2
if right_name == "u3":
# theta = pi/2 + 2*k*pi
if ((right_parameters[0] - sympy.pi / 2) % (2 * sympy.pi)).is_zero:
right_name = "u2"
right_parameters = (sympy.pi / 2, right_parameters[1],
right_parameters[2] +
(right_parameters[0] - sympy.pi / 2))
# theta = -pi/2 + 2*k*pi
if ((right_parameters[0] + sympy.pi / 2) % (2 * sympy.pi)).is_zero:
right_name = "u2"
right_parameters = (sympy.pi / 2, right_parameters[1] +
sympy.pi, right_parameters[2] -
sympy.pi + (right_parameters[0] +
sympy.pi / 2))
# u1 and lambda is 0 mod 2*pi so gate is nop (up to a global phase)
if right_name == "u1" and (right_parameters[2] % (2 * sympy.pi)).is_zero:
right_name = "nop"
# Simplify the symbolic parameters
right_parameters = tuple(map(sympy.simplify, list(right_parameters)))
# Replace the data of the first node in the run
new_params = []
if right_name == "u1":
new_params = [right_parameters[2]]
if right_name == "u2":
new_params = [right_parameters[1], right_parameters[2]]
if right_name == "u3":
new_params = list(right_parameters)
nx.set_node_attributes(unrolled.multi_graph, name='name',
values={run[0]: right_name})
# params is a list of sympy symbols
nx.set_node_attributes(unrolled.multi_graph, name='params',
values={run[0]: new_params})
# Delete the other nodes in the run
for current_node in run[1:]:
unrolled._remove_op_node(current_node)
if right_name == "nop":
unrolled._remove_op_node(run[0])
return unrolled
def remove_last_measurements(dag_circuit, perform_remove=True):
"""Removes all measurements that occur as the last operation
on a given qubit for a DAG circuit. Measurements that are followed by
additional gates are untouched.
This operation is done in-place on the input DAG circuit if perform_pop=True.
Parameters:
dag_circuit (qiskit.dagcircuit._dagcircuit.DAGCircuit): DAG circuit.
perform_remove (bool): Whether to perform removal, or just return node list.
Returns:
list: List of all measurements that were removed.
"""
removed_meas = []
try:
meas_nodes = dag_circuit.get_named_nodes('measure')
except DAGCircuitError:
return removed_meas
for idx in meas_nodes:
_, succ_map = dag_circuit._make_pred_succ_maps(idx)
if len(succ_map) == 2:
# All succesors of the measurement are outputs, one for qubit and one for cbit
# (As opposed to more gates being applied), and it is safe to remove the
# measurement node and add it back after the swap mapper is done.
removed_meas.append(dag_circuit.multi_graph.node[idx])
if perform_remove:
dag_circuit._remove_op_node(idx)
return removed_meas
def return_last_measurements(dag_circuit, removed_meas, final_layout):
"""Returns the measurements to a quantum circuit, removed by
`remove_last_measurements` after the swap mapper is finished.
This operation is done in-place on the input DAG circuit.
Parameters:
dag_circuit (qiskit.dagcircuit._dagcircuit.DAGCircuit): DAG circuit.
removed_meas (list): List of measurements previously removed.
final_layout (dict): Qubit layout after swap mapping.
"""
if any(removed_meas) and 'measure' not in dag_circuit.basis.keys():
dag_circuit.add_basis_element("measure", 1, 1, 0)
for meas in removed_meas:
new_q_label = final_layout[meas['qargs'][0]]
dag_circuit.apply_operation_back(name='measure', qargs=[new_q_label],
cargs=meas['cargs'])
|
[
"zpz2393247079@gmail.com"
] |
zpz2393247079@gmail.com
|
4c3ae5d9175b6d3dfd6c1d550c0293f6c0646ba1
|
678999c17480f902023c1c08db21b23dd9dd66fd
|
/user/migrations/0002_auto_20190121_1206.py
|
7b31a26157fba00065137b53fc9726e9e44a8ccd
|
[] |
no_license
|
leocristino/django-vuejs-api
|
423efb5859b6b63d8b806749bb646c9b3ba51361
|
72a342664ced409f28aec7d3d30a75017b9093fa
|
refs/heads/master
| 2020-04-18T07:57:22.938058
| 2019-01-24T15:51:30
| 2019-01-24T15:51:30
| 167,378,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
# Generated by Django 2.1.5 on 2019-01-21 14:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='email',
),
migrations.RemoveField(
model_name='user',
name='id',
),
migrations.RemoveField(
model_name='user',
name='sex',
),
migrations.AddField(
model_name='user',
name='user_id',
field=models.AutoField(default=1, primary_key=True, serialize=False),
preserve_default=False,
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(max_length=250),
),
migrations.AlterField(
model_name='user',
name='password',
field=models.CharField(max_length=250),
),
]
|
[
"leo_cristino@yahoo.com.br"
] |
leo_cristino@yahoo.com.br
|
3e45d3ca7391b768fff91c4ebbdcfba94405a1b2
|
99fbb577cb5edb60cf31401d5127d739f571d334
|
/src/storfox_framework/grpc.py
|
56a7dcbd6683ac2fdffa04221d062212c043d6c7
|
[
"Apache-2.0"
] |
permissive
|
storfox/storfox-framework
|
0c93df7058b00e645524b702cd23ba00cc6fbf99
|
ffe8a89cb9abb203d696c6b24467f97834bd92a0
|
refs/heads/main
| 2023-03-07T16:57:29.632454
| 2021-02-19T14:01:33
| 2021-02-19T14:01:33
| 339,478,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
import typing
import asyncio
from grpclib.utils import graceful_exit
from grpclib.server import Server
def run_forever(handlers: typing.List[typing.Any], host, port):
async def start():
server = Server(handlers)
with graceful_exit([server]):
await server.start(host, port)
print(f"Serving on {host}:{port}")
await server.wait_closed()
loop = asyncio.get_event_loop()
loop.run_until_complete(start())
|
[
"bekhzod.tillakhanov@gmail.com"
] |
bekhzod.tillakhanov@gmail.com
|
8347b63b3084d8d3c89dadf0d3338a8b97aa8f3b
|
b192fa5cca5faaace36b3cac52d67bd4b50d881c
|
/libs/myflick/db/models.py
|
4b965ff92c534dc1947e81f2a3edc52aba6a424e
|
[] |
no_license
|
mitemitreski/myflicks
|
f9ae3501607e3d321f7812731766c73f6fa6df3e
|
c95addd564d71c2267fbf029060d14848c225c7e
|
refs/heads/master
| 2021-01-18T02:11:51.298371
| 2013-11-09T16:35:20
| 2013-11-09T16:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,844
|
py
|
from datetime import datetime
from sys import stderr
from json import loads as json_loads
from json import dumps as json_dumps
from random import shuffle
from werkzeug.urls import url_quote, url_quote_plus
from sqlalchemy.sql.expression import func, and_
from sqlalchemy.orm.exc import NoResultFound
import requests
from myflick.db import BaseModel
forbidden_domains = ('collider', 'impawards', 'imdb', 'dbcovers', 'turkcealtyazi', 'ebayimg',
'iceposter', 'beyondhollywood', 'examiner', 'bigcommerce', 'thisdistractedglobe',
'bdbphotos', 'mposter', 'images-amazon', 'audiorushes')
class User(BaseModel):
@staticmethod
def save_g_data(session, profile_id, fullname, email):
try:
u = User.load(session, nickname = profile_id, service = 'gmail')
except NoResultFound:
u = User(nickname = profile_id, service = 'gmail',
fullname = fullname, email = email)
session.add(u)
session.flush()
else:
u.fullname = fullname
u.email = email
return u
@staticmethod
def save_twitter_data(session, nickname, fullname, email):
try:
u = User.load(session, nickname = nickname, service = 'twitter')
except NoResultFound:
u = User(nickname = nickname, service = 'twitter',
fullname = fullname, email = email)
session.add(u)
session.flush()
else:
u.fullname = fullname
u.email = email
return u
@staticmethod
def save_fb_data(session, nickname, fullname, email):
try:
u = User.load(session, nickname = nickname, service = 'fb')
except NoResultFound:
u = User(nickname = nickname, service = 'fb',
fullname = fullname, email = email)
session.add(u)
session.flush()
else:
u.fullname = fullname
u.email = email
return u
@property
def alias(self):
if self.service in ('gmail', 'twitter', 'fb'):
return self.fullname
raise NotImplementedError, ' for service ' + self.service
@property
def home_url(self):
if self.service=='fb':
return "https://www.facebook.com/profile.php?id=%s" % self.nickname
if self.service=='twitter':
return "http://twitter.com/%s" % self.fullname
raise NotImplementedError, ' for service ' + self.service
@property
def alias_repr(self):
return url_quote(self.alias.replace(' ', '_'), safe='')
def set_rating(self, movie, rating):
try:
r = Rating.load(self.session, user=self, movie=movie)
except NoResultFound:
self.session.add(Rating(user=self, movie=movie,
rating=rating, rated=datetime.utcnow()))
self.session.flush()
else:
r.rating = rating
r.rated = datetime.utcnow()
def drop_rating(self, movie):
try:
r = Rating.load(self.session, user=self, movie=movie)
except NoResultFound:
pass
else:
r.drop()
self.session.flush()
@staticmethod
def recent(session, limit=5):
sq = session.query(Rating.user_id, func.max(Rating.rated).label('max_rated'))\
.group_by(Rating.user_id).subquery()
res = session.query(User)\
.join((sq, sq.c.user_id==User.id))\
.order_by(sq.c.max_rated.desc()).limit(limit).all()
return res
class Movie(BaseModel):
def __repr__(self):
return "<Movie: %s (%d)>" % (self.title, self.year)
def imdb_title_fetch(self):
if self.imdbid is None:
qtitle = self.title.lower()
if '(' in qtitle:
qtitle = qtitle.split('(')[0].strip()
if qtitle.endswith(' the'):
qtitle = qtitle[:-4]
if qtitle.endswith(' a'):
qtitle = qtitle[:-2]
# http://www.omdbapi.com/?s=Star%20Wars&y=1977&r=JSON
q = requests.get("http://www.omdbapi.com/?s=%s&y=%d&r=JSON" % (qtitle, self.year))
try:
q = json_loads(q.content)['Search']
except KeyError:
stderr.write("Error fetching: http://www.omdbapi.com/?s=%s&y=%d&r=JSON \n" % (qtitle, self.year))
return
self.imdbid = q[0]['imdbID']
q = requests.get("http://www.omdbapi.com/?i=%s&r=JSON" % self.imdbid)
self.meta = q.content
self.session.flush()
def get_meta(self):
if self.meta is None:
self.imdb_title_fetch()
if self.meta is None:
return {}
if type(self.meta) is dict:
return self.meta
return json_loads(self.meta)
def image_fetch(self):
q = '"%s" %d poster' % (self.title, self.year)
q = 'http://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=' + url_quote_plus(q)
q = requests.get(q)
q = json_loads(q.content)
try:
q = q['responseData']['results']
except (TypeError, KeyError):
return
for item in q:
img = item['url']
img_lower = img.lower()
cond = [forbidden not in img_lower
for forbidden in forbidden_domains]
if all(cond):
self.img = img
return
def get_image(self):
if self.img is None:
self.image_fetch()
if self.img is None:
return ''
return self.img
class Rating(BaseModel):
@staticmethod
def last_rated(session, limit=10):
sq = session.query(Rating.movie_id.label('movie_id'),
func.min(Rating.rated).label('min_rated'))\
.group_by(Rating.movie_id).subquery()
res = session.query(User, Movie.id, Movie.title, Rating.rating)\
.join((Rating, Rating.user_id==User.id))\
.join((Movie, Movie.id==Rating.movie_id))\
.join((sq, and_(sq.c.movie_id==Movie.id,
sq.c.min_rated==Rating.rated)))\
.order_by(sq.c.min_rated.desc()).limit(limit).all()
return res
@staticmethod
def _top_rated(session, limit, offset=9.90, appendto=tuple()):
# select movie_id, avg(rating) from rating group by movie_id
# having (avg(rating) > 9.90 and avg(rating) <=10.0);
top_offset = min([offset + .1, 10.0])
avg_ = func.avg(Rating.rating)
cnt_ = func.count(Rating.user_id)
res = session.query(Rating.movie_id, avg_)\
.group_by(Rating.movie_id)\
.having(and_(cnt_ > 1, avg_ > offset, avg_ <= top_offset))\
.all()
if len(res) > 0 or offset < 1.0:
res = tuple(res)
appendto = appendto + res
if len(appendto) >= limit or offset < 1.0:
appendto = list(appendto)
shuffle(appendto)
if len(appendto) > limit:
appendto[limit:] = []
return appendto
return Rating._top_rated(session, limit=limit, offset=offset-0.1, appendto=appendto)
@staticmethod
def top_rated(session, limit=5):
res = Rating._top_rated(session, limit)
ids = [r.movie_id for r in res]
movies = session.query(Movie).filter(Movie.id.in_(ids)).all()
movies = dict((m.id, m)
for m in movies)
return tuple((movies[id_], avg_)
for id_, avg_ in res)
|
[
"b.petrushev@gmail.com"
] |
b.petrushev@gmail.com
|
132af75dcb792056a588ce1774127ea575113a40
|
1ce535cff909700ba348f71294a211c4726b9470
|
/adapters/rabbitmq.py
|
1346c10d4fd277bd33b416407faf3258e7e9609f
|
[] |
no_license
|
WilliamMolina/python_testing
|
dfa66036e7802f7a19669bec23e5ae838346166d
|
7325c35c9531b3f7d6a4de7177213431715de667
|
refs/heads/master
| 2020-07-29T03:43:58.757020
| 2019-09-19T22:06:52
| 2019-09-19T22:06:52
| 209,656,515
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
from util.util import my_print
class RabbitMQ(object):
@classmethod
def connect(cls, user, password):
my_print(user, password)
|
[
"wmolina98@gmail.com"
] |
wmolina98@gmail.com
|
61b677b51b7bebc1c8cc8e05f097dc085aeec9df
|
cc7b4e71b3c27240ec650a75cc6f6bbab5e11387
|
/crdb/urls.py
|
1c725c9a22b9ce806c2359100497cf8be837a680
|
[
"MIT"
] |
permissive
|
jsayles/CoworkingDB
|
0cdada869d950a28cfef20d1b9c1eb3eb4d7b1c2
|
78776910eba0354a7fd96b2e2c53a78e934d8673
|
refs/heads/master
| 2023-02-22T23:11:19.040799
| 2021-12-28T19:13:39
| 2021-12-28T19:13:39
| 883,951
| 3
| 0
|
MIT
| 2023-02-15T17:59:10
| 2010-09-02T18:36:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,967
|
py
|
from django.conf import settings
from django.contrib import admin
from django.urls import path, include
from django.views.generic.base import RedirectView
from django.contrib.auth import views as auth_views
from django.http import HttpResponse, HttpResponseRedirect
from crdb import views, api
app_name = 'crdb'
urlpatterns = [
path('favicon.ico', RedirectView.as_view(url='/static/img/favicon.ico', permanent=True)),
path('robots.txt', lambda r: HttpResponse("User-agent: *\nDisallow: /", content_type="text/plain")),
path('account/', include('django.contrib.auth.urls')),
path('login/', auth_views.LoginView.as_view(), name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('invite/', views.request_invite, name='request_invite'),
path('', views.home, name='home'),
path('search/', views.search, name='search'),
path('profile/', views.profile_redirect, name='profile_redirect'),
path('profile/<str:username>', views.profile_edit, name='profile_edit'),
path('people/', views.people_list, name='people_list'),
path('person/<str:username>', views.person_view, name='person_view'),
path('projects/', views.project_list, name='project_list'),
path('project/edit/', views.project_edit, name='project_edit'),
path('project/edit/<str:code>', views.project_edit, name='project_edit'),
path('project/view/<str:code>', views.project_view, name='project_view'),
path('email/add/', views.email_add, name='email_add'),
path('email/manage/<email_pk>/<action>/', views.email_manage, name='email_manage'),
path('email/verify/<email_pk>/', views.email_verify, name='email_verify'),
path('admin/', admin.site.urls),
path('api/', include('crdb.api')),
]
# if settings.DEBUG:
# import debug_toolbar
# urlpatterns.append(path('__debug__/', include(debug_toolbar.urls)))
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"jsayles@gmail.com"
] |
jsayles@gmail.com
|
a98c4f8f8a17d49a9ab1ab91e53471e81ff2909b
|
bda804b2922af42b25fd066ba5fb172b7acf8477
|
/doigetabonus.py
|
f5e62c745b3d3cd3000d9fcf43dcd907e928ea5b
|
[] |
no_license
|
kammitama5/Distracting_KATAS
|
c8fb813afee3a8527320c794dff5274c61551bbc
|
d10984bef26fada09a6ba0a8493ef640f22b2acc
|
refs/heads/master
| 2020-06-27T19:09:55.518148
| 2018-05-02T04:14:44
| 2018-05-02T04:14:44
| 74,521,719
| 0
| 1
| null | 2017-08-10T04:15:40
| 2016-11-22T23:17:00
|
Python
|
UTF-8
|
Python
| false
| false
| 162
|
py
|
def bonus_time(salary, bonus):
if bonus == True:
s1 = salary * 10
return '$' + str(s1)
else:
return '$' + str(salary)
|
[
"noreply@github.com"
] |
kammitama5.noreply@github.com
|
a12d20d29e3840bb48e7f2d3bc435d0bc15ed2ff
|
ceadb36233359cee0fcaa0f849e59c0039d5a87f
|
/modules/kkpandas/kkio.py
|
31ab79af8d63371cbc2dcdffa16594d805ce4037
|
[] |
no_license
|
Mia9469/Rodgers2014
|
df1a6150f2fa0e3470293d868a2d46f0bd8d6065
|
390b6089c429877dfea9c56de153bbea0380c20f
|
refs/heads/master
| 2021-06-04T08:02:06.087375
| 2016-07-10T00:17:44
| 2016-07-10T00:17:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,956
|
py
|
"""Methods to read KlustaKwik-formatted spike times into dataframe.
Low-level
---------
These are just simple (but efficient) wrappers around pandas reading
methods that add on column names, etc.
read_resfile
read_fetfile
read_clufile
load_spiketimes
Medium-level
------------
from_KK : auto finds all relevant files in directory, methods to choose
groups, memoization ...
High-level
----------
KK_Server : object for integrating across multiple sessions / directories
You train it on the locations of data and it deals with calling from_KK.
"""
import numpy as np
import pandas
import os
import glob
from KKFileSchema import KKFileSchema
import utility
SPIKE_TIME_COLUMN_NAME = 'time'
# Bare-bones input/output functions for each filetype
# We use pandas loading functions because they are much faster for
# this type of data than equivalent functions in matplotlib
def read_resfile(filename):
"""Returns spiketimes in samples as Series"""
return pandas.read_table(
filename, names=[SPIKE_TIME_COLUMN_NAME])[SPIKE_TIME_COLUMN_NAME]
def write_resfile(df, filename):
"""Returns spiketimes in samples as Series"""
with file(filename, 'w') as fi:
df.tofile(fi, sep="\n")
fi.write('\n')
def read_fetfile(filename, guess_time_column=True, return_nfeatures=False):
"""Reads features from fet file.
If guess_time_column, will look at the last column and if it contains
only positive values, then we assume it is the spike time.
"""
with file(filename) as fi:
n_features = int(fi.readline().strip())
table_sz = len(fi.readline().strip().split(' '))
# Have to specify the names of the columns or it uses the first row
# as the titles
data = pandas.read_table(filename, sep=' ', skiprows=1,
names=['feature%d' % n for n in range(table_sz)])
# Auto-guess whether the last column is a time (it probably is)
if guess_time_column and np.all(data[data.columns[-1]] > 0):
data = data.rename(columns={data.columns[-1]: SPIKE_TIME_COLUMN_NAME},
copy=False)
# Here is where code to drop unwanted features would go, based on
# n_features
if return_nfeatures:
return data, n_features
else:
return data
def write_fetfile(df, filename, also_write_times=True,
count_time_as_feature=True):
"""Write out features to fetfile.
also_write_times: Write spike times as another row of feature file
count_time_as_feature: Include the spike time in the feature count
Notes
-----
To view the files in Klusters, you must set both also_write_times
and count_time_as_feature to True. This is a bug in Klusters though,
bceause you wouldn't actually want to use the spike time as a feature
for clustering.
"""
if SPIKE_TIME_COLUMN_NAME not in df.columns and also_write_times:
print "warning: no spike times provided to write"
also_write_times = False
cols = df.columns
if also_write_times:
if count_time_as_feature:
n_features = df.shape[1]
else:
n_features = df.shape[1] - 1
elif SPIKE_TIME_COLUMN_NAME in df.columns:
# Drop the spike times before writing
cols = cols.drop([SPIKE_TIME_COLUMN_NAME])
n_features = df.shape[1] - 1
else:
n_features = df.shape[1]
with file(filename, 'w') as fi:
fi.write("%d\n" % n_features)
df.to_csv(fi, sep=' ', cols=cols, header=False, index=False)
def read_clufile(filename):
"""Returns cluster ids as Series"""
return pandas.read_table(filename, skiprows=1, names=['unit'])['unit']
def write_clufile(df, filename):
"""Write cluster DataFrame as a *.clu file"""
nclusters = len(df.unique())
with file(filename, 'w') as fi:
fi.write("%d\n" % nclusters)
df.tofile(fi, sep="\n")
fi.write("\n")
def read_spkfile(filename, n_spikes=-1, n_samples=-1, n_channels=-1):
"""Returns waveforms as 3d array (n_spk, n_samp, n_chan)
You can leave at most one shape parameter as -1
"""
waveforms = np.fromfile(filename, dtype=np.int16)
return waveforms.reshape((n_spikes, n_samples, n_channels))
def write_spkfile(waveforms, filename):
"""Writes waveforms to binary file
waveforms : 3d array (n_spk, n_samp, n_chan)
It will be converted to int16 before writing.
"""
waveforms.astype(np.int16).tofile(filename)
def load_spiketimes(kfs_or_path, group, fs=None):
"""Given KKFileSchema or path to one, load spike times from group
Returns Series
"""
kfs = KKFileSchema.coerce(kfs_or_path)
# check if res-files exist, which are faster to load
if 'res' in kfs.available_filetypes:
spiketimes = read_resfile(kfs.resfiles[group])
elif 'fet' in kfs.available_filetypes:
spiketimes = read_fetfile(kfs.fetfiles[group])[SPIKE_TIME_COLUMN_NAME]
else:
raise ValueError("no available method to grab spike times")
# optionally convert to seconds
if fs:
spiketimes = spiketimes / float(fs)
return spiketimes
def read_all_from_group(basename='.', group=1, n_samples=-1, n_spikes=-1,
n_channels=-1):
d = {}
kfs = KKFileSchema.coerce(basename)
res = read_resfile(kfs.resfiles[group])
d['res'] = res
clu = read_clufile(kfs.clufiles[group])
d['clu'] = clu
fet = read_fetfile(kfs.fetfiles[group])
d['fet'] = fet
if n_spikes == -1:
n_spikes = len(res)
spk = read_spkfile(kfs.spkfiles[group], n_spikes=n_spikes,
n_channels=n_channels, n_samples=n_samples)
d['spk'] = spk
return d
# This is the main function to intelligently load data from KK files
def from_KK(basename='.', groups_to_get=None, group_multiplier=None, fs=None,
verify_unique_clusters=True, add_group_as_column=True,
load_memoized=False, save_memoized=False,
also_get_features=False, also_get_waveforms=False, n_samples=-1, n_channels=-1):
"""Main function for loading KlustaKwik data.
basename : path to, or basename of, files
group : int or list of groups to get, otherwise get all groups
group_multiplier : if None, the cluster ids are used as-is
if int, then the group number times this multiplier is added to
the cluster id.
This is useful if groups contain the same cluster ids but you
want them to have unique labels.
fs : if None, the times are returned as integer number of samples
otherwise, they are divided by this number
verify_unique_clusters : if True, check that there are no overlapping
cluster ids across groups
add_group_as_column : if True, then the returned value has a column
for the group from which the spike came.
also_get_features, also_get_waveforms : if True, then the returned
value has columns for these as well.
n_samples, n_channels : Only necessary if also_get_waveforms. Only
one of these two parameters is necessary in that case.
Memoization
---
Loading is faster if it is done using the binary pandas save and load
functions than it is with the ASCII KlustaKwik format. For this reason
you can specify that the data be saved as a pandas file, or loaded from
a pandas file.
These options now default to False because of the potential for accidental
mis-use. The reason is that no checking is done whether the current
parameters are the same as the previous ones, when the memoization was
done.
load_memoized : If a file like basename.kkp exists, load this DataFrame
and return. Note all other parameters (except basename) are ignored.
save_memoized : the data will be written to a file like
basename.kkp after loading.
Returns:
DataFrame with columns 'unit', 'time', and optionally 'group'
"""
memoized_filename = None # to be determined later, if necessary
# load files like basename
try:
kfs = KKFileSchema.coerce(basename)
except ValueError:
# This occurs when no spike files are found, but there might still
# be kkp files.
load_memoized = True
memoized_filename = glob.glob(os.path.join(basename, '*.kkp'))[0]
# try to load memoized
if load_memoized:
if memoized_filename is None:
memoized_filename = kfs.basename + '.kkp'
try:
data = pandas.load(memoized_filename)
return_early = True
except IOError:
return_early = False
if return_early:
return data
# which groups to get
if groups_to_get:
if not hasattr(groups_to_get, '__len__'):
groups_to_get = [groups_to_get]
else:
groups_to_get = kfs.groups
# get each group
group_d = {}
for group in groups_to_get:
spiketimes = load_spiketimes(kfs, group, fs)
if 'clu' in kfs.available_filetypes:
unit_ids = read_clufile(kfs.clufiles[group])
else:
unit_ids = np.ones(spike_times.shape) * group
if group_multiplier:
unit_ids += group_multiplier * group
# concatenate into data frame and add to dict
if add_group_as_column:
group_d[group] = pandas.DataFrame(
{spiketimes.name: spiketimes, unit_ids.name: unit_ids,
'group': np.ones(len(spiketimes), dtype=np.int) * group})
else:
group_d[group] = pandas.DataFrame(
{spiketimes.name: spiketimes, unit_ids.name: unit_ids})
# optionally get features too
if also_get_features:
assert 'fet' in kfs.available_filetypes
# Read the feature file
fetfile = kfs.fetfiles[group]
features = read_fetfile(
fetfile, guess_time_column=True, return_nfeatures=False)
# Pop off the time column since we don't need it
features.pop('time')
# Concatenate to df for this group
assert len(features) == len(group_d[group])
group_d[group] = pandas.concat([group_d[group], features], axis=1)
# optionally get waveforms too
if also_get_waveforms:
assert 'spk' in kfs.available_filetypes
# Read the spike file
# We know the number of spikes, but we need either the number
# of samples or the number of channels
spkfile = kfs.spkfiles[group]
waveforms = read_spkfile(spkfile, n_spikes=len(group_d[group]),
n_samples=n_samples, n_channels=n_channels)
# Flatten, convert to dataframe, and concatenate to result
nsamptot = waveforms.shape[1] * waveforms.shape[2]
waveforms_df = pandas.DataFrame(
waveforms.swapaxes(1, 2).reshape(waveforms.shape[0], nsamptot),
columns=['wf%d' % n for n in range(nsamptot)])
group_d[group] = pandas.concat(
[group_d[group], waveforms_df], axis=1)
# optionally check if groups contain same cluster
if verify_unique_clusters:
clusters_by_group = [
set(np.unique(np.asarray(groupdata.unit)))
for groupdata in group_d.values()]
if len(clusters_by_group) > 0:
# find number of unique clusters
# will error here if no clusters found
n_unique_clusters = len(set.union(*clusters_by_group))
n_total_clusters = sum([len(g) for g in clusters_by_group])
if n_unique_clusters != n_total_clusters:
raise ValueError("got %d overlapping clusters" %
(n_total_clusters - n_unique_clusters))
# turn list into one giant dataframe for everybody
sorted_keys = sorted(group_d.keys())
data = pandas.concat([group_d[key] for key in sorted_keys],
ignore_index=True)
if save_memoized:
data.save(memoized_filename)
return data
def flush(kfs_or_path, verbose=False):
"""Remove any memoized file (basename.kkp) from the directory."""
# Coerce to file schema
kfs = KKFileSchema.coerce(kfs_or_path)
# Find the memoized file
to_delete = kfs.basename + '.kkp'
# Delete it if it exists
if os.path.exists(to_delete):
if verbose: print "deleting", to_delete
os.remove(to_delete)
else:
if verbose: print "no memoized files to delete"
class KK_Server:
"""Object to load spike data from multiple sessions (directories)
The from_KK class method works great for a single session or a small
amount of data. Eventually you want to load from many different sessions
easily.
The purpose of this object is to encapsulate the finding and I/O of
KK files across sessions. Once it is initialized, you just specify the
session name and the unit that you want and it returns it.
You can also save it to disk and then load it later, without reinitializing
all of the file locations.
It also takes care of memoization, sampling rates, etc.
"""
def __init__(self, session_d=None, session_list=None, parent_dir=None,
group_multiplier=100, fs=30e3, **kk_kwargs):
"""Initialize a new server from scratch
session_d : dict {session_name: full_path_to_KK_dir}
session_list : list of session names (keys to session_d)
parent_dir : If session_d is None, looks for subdirectories like
[os.path.join(parent_dir, session_name)
for session_name in session_list]
group_multiplier, fs, **kk_kwargs : used in call to from_KK
"""
# Set up dict of sessions
if session_d is None:
session_d = {}
for session in session_list:
session_d[session] = os.path.join(parent_dir, session)
self.session_d = session_d
self.session_list = sorted(self.session_d.keys())
# Set up calling kwargs
self.kk_kwargs = kk_kwargs
self.kk_kwargs['group_multiplier'] = group_multiplier
self.kk_kwargs['fs'] = fs
self.kk_kwargs['load_memoized'] = True
self.kk_kwargs['save_memoized'] = True
def get(self, session=None, group=None, unit=None, **kwargs):
"""Returns spike times for specified session * unit
Extra keywords override object defaults (eg group_multiplier, fs,
memoization...)
Current behavior is to always load and save memoized versions for
best speed. This might change ...
"""
# Where the files are
dirname = self.session_d[session]
# Update the usual calling kwargs with any additional ones
call_kwargs = self.kk_kwargs.copy()
call_kwargs.update(kwargs)
# Do the loading
spikes = from_KK(dirname, **call_kwargs)
# Make this panda pick
#sub = spikes[spikes.unit == unit]
sub = utility.panda_pick_data(spikes, group=group, unit=unit)
return sub
def load(self, filename):
"""Renamed to get to avoid confusion with "save" """
raise DeprecationWarning("Use 'get' instead")
def save(self, filename):
"""Saves information for later use
All that is necessary to reconstitute this object is session_d
and kk_kwargs
"""
import cPickle
to_pickle = {
'session_d': self.session_d,
'kk_kwargs': self.kk_kwargs}
with file(filename, 'w') as fi:
cPickle.dump(to_pickle, fi)
def flush(self, verbose=False):
"""Delete all memoized data in my session dict"""
# Flush all sessions in the object
for session, path in self.session_d.items():
if verbose:
print "flushing", session
# Call the flush method from this module (not this object)
flush(path, verbose)
@classmethod
def from_saved(self, filename):
"""Load server from saved information"""
import cPickle
with file(filename) as fi:
res = cPickle.load(fi)
session_d = res['session_d']
kk_kwargs = res['kk_kwargs']
res = KK_Server(session_d=session_d)
res.kk_kwargs = kk_kwargs
return res
# Utility function for testing something, also demonstrates the usage
# of the reading and writing methods.
def append_duplicated_spikes(data_dir, output_dir, groupnum, idxs, n_samples=24):
"""Appends a fake neuron of duplicated spikes.
This is useful for testing whether some of the spikes are all in one
part of the cluster, which might suggest drift or bad clustering.
data_dir : klusters directory of original data (will not be modified)
output_dir : klusters directory containing copy of original data
(THIS ONE WILL BE MODIFIED!)
Copy over all clu, fet, res, etc files to the new directory.
groupnum : tetrode number, ie extension of klusters files to modify
idxs : indexes of spikes to duplicate as a new cluster
This functions doesn't know which unit you are trying to clone (if
any), so the indexes should be indexes into ALL of the spikes from
the group.
It will extract the times, features, and waveforms of the indexed spikes,
then append them to the end of the same files in output_dir.
The new cluster has an ID one greater than previous max.
"""
# find files
kfs1 = KKFileSchema.coerce(data_dir)
kfs2 = KKFileSchema.coerce(output_dir)
# Duplicate clu
clu = kkpandas.kkio.read_clufile(kfs1.clufiles[groupnum])
newclunum = clu.max() + 1
newclu = pandas.concat([clu,
pandas.Series(newclunum * np.ones(len(idxs)), dtype=np.int)],
ignore_index=True)
kkpandas.kkio.write_clufile(newclu, kfs2.clufiles[groupnum])
# Duplicate res
res = kkpandas.kkio.read_resfile(kfs1.resfiles[groupnum])
newres = pandas.concat([res, res.ix[idxs]], ignore_index=True)
kkpandas.kkio.write_resfile(newres, kfs2.resfiles[groupnum])
# Duplicate fet
fet = kkpandas.kkio.read_fetfile(kfs1.fetfiles[groupnum])
newfet = pandas.concat([fet, fet.ix[idxs]], ignore_index=True)
kkpandas.kkio.write_fetfile(newfet, kfs2.fetfiles[groupnum])
# Duplicate spk
spk = kkpandas.kkio.read_spkfile(kfs1.spkfiles[groupnum], n_samples=24,
n_spikes=fet.shape[0])
newspk = np.concatenate([spk, spk[idxs, :]], axis=0)
kkpandas.kkio.write_spkfile(newspk, kfs2.spkfiles[groupnum])
|
[
"xrodgers@gmail.com"
] |
xrodgers@gmail.com
|
d20720bc0a10e59361208c0e07e618e0d84769f0
|
c6c142f46daba7ab31f7a9a233fefbf56f5e1473
|
/scripts/colormap.py
|
55402b2b68bdc24354fd0c174d33240488840ff6
|
[
"Artistic-2.0"
] |
permissive
|
fluffyfreak/demoscene
|
c111c5de615f57db69b32c7706dcb930caecaaa8
|
af5c06ab2b8ebf42994a4903c8c3c05cc288b4b7
|
refs/heads/master
| 2022-02-03T23:50:27.515797
| 2019-07-20T11:40:57
| 2019-07-20T11:49:11
| 198,051,701
| 1
| 0
| null | 2019-07-21T12:08:59
| 2019-07-21T12:08:58
| null |
UTF-8
|
Python
| false
| false
| 4,122
|
py
|
#!/usr/bin/env python -B
import Image
import argparse
import os
import json
from quantize import Quantize
IMG_GRAY = 0
IMG_CLUT = 1
IMG_RGB24 = 2
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate color map required by some pixel effects.')
parser.add_argument(
'-c', '--colors', type=int, default=None,
help=('Generate new palette with given number of colors. '
'Otherwise use original palette.'))
parser.add_argument(
'-f', '--force', action='store_true',
help='If output files exist, the tool will overwrite them.')
parser.add_argument(
'-m', '--map', type=str, default='shades',
choices=['lighten', 'darken', 'shades', 'transparency'],
help='Type of color map.')
parser.add_argument(
'input', metavar='INPUT', type=str,
help='Path to input image or JSON (with array of colors) file.')
parser.add_argument(
'output', metavar='OUTPUT', type=str,
help='Output files basename (without extension).')
args = parser.parse_args()
inputPath = os.path.abspath(args.input)
outputPath = os.path.abspath(args.output)
if not os.path.isfile(inputPath):
raise SystemExit('Input file does not exists!')
if inputPath.endswith('.json'):
rawPal = []
pal = []
with open(inputPath) as f:
for color in json.load(f):
assert len(color) == 3
color = int(color[0]), int(color[1]), int(color[2])
assert all(comp >= 0 and comp <= 255 for comp in color)
pal.append(color)
rawPal.extend(color)
colors = len(pal)
else:
try:
image = Image.open(inputPath)
except IOError as ex:
raise SystemExit('Error: %s.' % ex)
if image.mode != 'P':
raise SystemExit('Image has to be of CLUT type.')
rawPal = image.getpalette()
colors = max(set(image.getdata())) + 1
pal = [tuple(rawPal[3 * i:3 * (i + 1)]) for i in range(colors)]
colorMapData = []
if args.map == 'lighten':
size = (256, colors)
for y in range(size[1]):
for x in range(size[0]):
r, g, b = pal[y][0], pal[y][1], pal[y][2]
a = float(x) / 255.0
r += (255 - r) * a
g += (255 - g) * a
b += (255 - b) * a
colorMapData.append((int(r), int(g), int(b)))
elif args.map == 'darken':
size = (256, colors)
for y in range(size[1]):
for x in range(size[0]):
r, g, b = pal[y][0], pal[y][1], pal[y][2]
a = 1.0 - float(x) / 255.0
r -= r * a
g -= g * a
b -= b * a
colorMapData.append((int(r), int(g), int(b)))
elif args.map == 'shades':
size = (256, colors)
for y in range(size[1]):
for x in range(size[0]):
r, g, b = pal[y][0], pal[y][1], pal[y][2]
a = 2.0 * float(x) / 255.0 - 1.0
if a >= 0.0:
r += (255 - r) * a
g += (255 - g) * a
b += (255 - b) * a
else:
r += r * a
g += g * a
b += b * a
colorMapData.append((int(r), int(g), int(b)))
elif args.map == 'transparency':
size = (colors, colors)
for y in range(colors):
for x in range(colors):
r, g, b = pal[y][0], pal[y][1], pal[y][2]
r = (r + pal[x][0]) / 2
g = (g + pal[x][1]) / 2
b = (b + pal[x][2]) / 2
colorMapData.append((int(r), int(g), int(b)))
colorMap = Image.new('RGB', size)
colorMap.putdata(colorMapData)
if not args.colors:
data = []
for color in colorMapData:
pixel = 0
dist = 3 * 255 * 255
r, g, b = color
for i, neighbour in enumerate(pal):
nr, ng, nb = neighbour
nr = r - nr
ng = g - ng
nb = b - nb
d = nr * nr + ng * ng + nb * nb
if d < dist:
dist = d
pixel = i
data.append(pixel)
output = Image.new('L', size)
output.putdata(data)
output.putpalette(rawPal)
else:
output = Quantize(colorMap, colors=args.colors, dithering=False)
if os.path.isfile(outputPath) and not args.force:
raise SystemExit('Will not overwrite output file!')
output.save(outputPath)
|
[
"krystian.baclawski@gmail.com"
] |
krystian.baclawski@gmail.com
|
9ddab87be24b61ccdd222e1ab7cdb6aca5d98451
|
6996f7af93a4a48e2db880c906acd4ceaabe8fcb
|
/20200127.py
|
ebc66eb3e2f03fee18e6649c76f4b7d3b5f68c07
|
[] |
no_license
|
MantaCoeno/MantaCoeno
|
0de472d2cd43ec52c7d56a08604d280850af3d48
|
510f63dc6d554b6c01d19f749ec7bb017525d861
|
refs/heads/master
| 2020-12-19T07:23:33.325742
| 2020-01-28T20:01:40
| 2020-01-28T20:01:40
| 235,662,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,249
|
py
|
"""Пейзаж из как минимум 5 статических фигур"""
import graphics as gr
def draw_land(window, color_land, weight, height):
my_land = gr.Rectangle(gr.Point(0, height) , gr.Point(weight, 0))
my_land.draw(window)
my_land.setFill(color_land)
return 0
def draw_sky(window, horizon_height , weight, color_sky):
my_sky = gr.Rectangle(gr.Point(0, horizon_height) , gr.Point(weight, 0))
my_sky.draw(window)
my_sky.setFill(color_sky)
return 0
def draw_moon(window, moon_center_x, moon_center_y, moon_r, color_moon):
my_moon = gr.Circle(gr.Point(moon_center_x, moon_center_y), moon_r)
my_moon.draw(window)
my_moon.setFill(color_moon)
return 0
def draw_clouds(window, cloud_centers_x, cloud_centers_y,
cloud_part_r, color_cloud, number_of_clouds):
for j in range(number_of_clouds):
for i in range (3):
my_cloud_part = gr.Circle(gr.Point(cloud_centers_x[j]+50*i, cloud_centers_y[j]),
cloud_part_r)
my_cloud_part.draw(window)
my_cloud_part.setFill(color_cloud)
return 0
def draw_forest(window, horizon_height, number_of_trees):
for i in range (number_of_trees):
my_tree = gr.Rectangle(gr.Point(20*i, horizon_height-20) ,
gr.Point(20+20*i, horizon_height+20) )
my_tree.draw(window)
my_tree.setFill("green")
return 0
#test commit
def draw_Night(window,
weight, height,
color_land, color_sky, color_moon, color_cloud,
horizon_height,
moon_center_x, moon_center_y, moon_r,
cloud_centers_x, cloud_centers_y, cloud_part_r, number_of_clouds,
number_of_trees):
draw_land(window, color_land, weight, height)
draw_sky(window, horizon_height , weight, color_sky)
draw_moon(window, moon_center_x, moon_center_y, moon_r, color_moon)
draw_clouds(window, cloud_centers_x, cloud_centers_y,
cloud_part_r, color_cloud, number_of_clouds)
draw_forest(window, horizon_height, number_of_trees)
return
def main():
print("1")
draw = True
weight = 600
height = 600
color_land="grey"
color_sky="blue"
color_moon="yellow"
color_cloud="white"
horizon_height=240
moon_center_x=50
moon_center_y=50
moon_r = 20
cloud_centers_x = [70, 480]
cloud_centers_y = [70, 80]
cloud_part_r = 40
number_of_trees = 10
if len(cloud_centers_x) == len(cloud_centers_y):
number_of_clouds = len(cloud_centers_x)
else:
print("error in list of cloud senters")
draw = False
if draw == True:
window = gr.GraphWin("Night", weight, height)
draw_Night(window,
weight, height,
color_land, color_sky, color_moon, color_cloud,
horizon_height,
moon_center_x, moon_center_y, moon_r,
cloud_centers_x, cloud_centers_y, cloud_part_r, number_of_clouds,
number_of_trees)
window.getMouse()
window.close()
main()
|
[
"lexamal@inbox.ru"
] |
lexamal@inbox.ru
|
9b3d1fe21bedcf96752c64d8ee91c9feb8a95628
|
3504c73d6b5cd49d18d9ea91da77b558f41e45bc
|
/client/app.py
|
5361d4de3a75492716aceee54c9e4d4a5b998ee0
|
[] |
no_license
|
WokoLiu/python_oauth2_example
|
351eec5634d34285fed19a7810ca821b6e1ddbc5
|
e0bc86b30a9e4e12a9e275aa3a7316eaca402b4e
|
refs/heads/master
| 2020-05-25T14:39:02.878603
| 2019-06-04T07:58:36
| 2019-06-04T07:58:44
| 187,850,043
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,743
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019-05-20 12:21
# @Author : Woko
# @File : app.py
import os
from functools import wraps
import requests
from authlib.flask.client import OAuth
from flask import Flask
from flask import jsonify
from flask import redirect
from flask import render_template
from flask import session
from flask import url_for
from six.moves.urllib.parse import urlencode
from werkzeug.exceptions import HTTPException
app = Flask(__name__, template_folder='./template')
app.debug = True
app.secret_key = 'secret'
oauth = OAuth(app)
os.environ.setdefault('AUTHLIB_INSECURE_TRANSPORT', '1') # use http
CLIENT_ID = 'YOUR-CLIENT-ID'
CLIENT_SECRET = 'YOUR-CLIENT-SECRET'
REDIRECT_URI = 'http://127.0.0.1:3000/callback'
auth0 = oauth.register(
'woko_test',
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
api_base_url='http://127.0.0.1:5000',
access_token_url='http://127.0.0.1:5000/oauth/token',
authorize_url='http://127.0.0.1:5000/oauth/authorize',
client_kwargs={
'scope': 'profile',
},
)
# Here we're using the /callback route.
@app.route('/callback')
def callback_handling():
# Handles response from token endpoint
token = auth0.authorize_access_token()
# If it's a jwt, we can decode from it,
# otherwise get from userinfo api like this
res = oauth.woko_test.get('/api/me') # type: requests.Response
userinfo = res.json()
# Store the user information in flask session.
session['profile'] = {
'user_id': userinfo['id'],
'name': userinfo['username'],
}
return redirect('/dashboard')
@app.route('/login')
def login():
return auth0.authorize_redirect(redirect_uri=REDIRECT_URI)
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
if 'profile' not in session:
# Redirect to Login page here
return redirect('/')
return f(*args, **kwargs)
return decorated
@app.route('/dashboard')
@requires_auth
def dashboard():
return render_template('dashboard.html', userinfo=session['profile'])
@app.route('/logout')
def logout():
# Clear session stored data
session.clear()
# Redirect user to logout endpoint
params = {'returnTo': url_for('home', _external=True),
'client_id': CLIENT_ID}
return redirect(auth0.api_base_url + '/logout?' + urlencode(params))
@app.errorhandler(Exception)
def handle_auth_error(ex):
response = jsonify(message=str(ex))
response.status_code = (ex.code if isinstance(ex, HTTPException) else 500)
return response
# Controllers API
@app.route('/')
def home():
return render_template('home.html')
if __name__ == "__main__":
app.run(host='127.0.0.1', port=3000)
|
[
"ylliu@hillinsight.com"
] |
ylliu@hillinsight.com
|
4c3908350c034494b0b10684dfc7e6b248e20a57
|
850b3c8fb4544b1185304f47cdbe66a6ea0b3f91
|
/pegLoanerApi/serializers.py
|
cef6ccec7082ee0590ed9929f884339e954b8ca4
|
[] |
no_license
|
ehmd96/LoanManager
|
7f29366490cd6172819a1470343354af52365d01
|
e5dd17a8266c1c19589131e43e7cf23db276b018
|
refs/heads/master
| 2023-01-11T22:48:19.253401
| 2020-11-22T09:02:18
| 2020-11-22T09:02:18
| 314,994,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
from rest_framework import serializers
from .models import Agent, Customer
class AgentSerializer(serializers.ModelSerializer):
class Meta:
model = Agent
fields = "__all__"
class CustomerSerializer(serializers.ModelSerializer):
class Meta:
model = Customer
fields = "__all__"
|
[
"ehmd96@gmail.com"
] |
ehmd96@gmail.com
|
5c8a3b8efe3588a8ad92057563d18b3d7bc47a1d
|
285d479edc1931276b5afb553e607bd07b085bcb
|
/cloth_manage/clothes/migrations/0005_auto_20210103_2100.py
|
8decb540c4859498a1ecacd6c5a0d372f6d6e7f1
|
[] |
no_license
|
kimuson13/close-cloth
|
d0a794156f0c874a3273b6a2ae9dc6bf3591cce7
|
72a6158153ce2f37007fe50658359b8f5ded3a2d
|
refs/heads/main
| 2023-05-28T22:48:10.892121
| 2021-06-12T09:45:39
| 2021-06-12T09:45:39
| 324,504,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
# Generated by Django 3.0.4 on 2021-01-03 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clothes', '0004_auto_20210103_1006'),
]
operations = [
migrations.AlterField(
model_name='wanted',
name='priority',
field=models.IntegerField(choices=[(1, '優先度1'), (2, '優先度2'), (3, '優先度3'), (4, '優先度4'), (5, '優先度5')]),
),
]
|
[
"kimuson13@basket.com"
] |
kimuson13@basket.com
|
a4785e18190f295e7c02fc7cb26245184e76a00e
|
868cd4895a8da17a7e3e2c8da0ec9e139f8d0c30
|
/project/AI경진대회/submission.py
|
710eb1881db844187d1d994c164ade2c2a9c836c
|
[] |
no_license
|
inJAJA/Study
|
35d4e410df7b476a4c298664bb99ce9b09bf6296
|
c2fd9a1e1f3a31cb3737cbb4891d848cc802f1d4
|
refs/heads/master
| 2022-12-21T11:41:15.396610
| 2020-09-20T23:51:45
| 2020-09-20T23:51:45
| 263,212,524
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,831
|
py
|
import cv2
import glob
import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten, Input
from sklearn.model_selection import KFold, cross_val_score, cross_val_predict, train_test_split
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.metrics import accuracy_score, f1_score
from keras.models import load_model
from keras.layers import LeakyReLU
leaky = LeakyReLU(alpha = 0.2)
leaky.__name__ = 'leaky'
'''
#1. data
x_train = np.load('/tf/notebooks/Ja/data/x_train.npy').reshape(-1, 56, 56, 1)
x_pred = np.load('/tf/notebooks/Ja/data/x_test.npy').reshape(-1, 56, 56, 1)
x_val = np.load('/tf/notebooks/Ja/data/x_val.npy').reshape(-1, 56, 56, 1)
y_train = np.load('/tf/notebooks/Ja/data/y_train.npy')
y_val = np.load('/tf/notebooks/Ja/data/y_test.npy')
# load_model
model = load_model('/tf/notebooks/Ja/save_model/model02_3.h5')
model.summary()
# y_pred = model.predict(x_test)
# f1_score = f1_score(y_test, y_pred)
# print('f1_score : ', f1_score)
# submit_data
y_predict = model.predict(x_pred)
y_predict = np.where(y_predict >= 0.5, 1, 0)
y_predict = y_predict.reshape(-1,)
np.save('/tf/notebooks/Ja/sub/y_predict.npy', arr = y_predict)
print('save_complete')
'''
# y_predict_load
y_predict = np.load('/tf/notebooks/Ja/sub/y_predict.npy')
y_predict = y_predict.reshape(-1,)
print(y_predict.shape)
# submission
def submission(y_sub):
for i in range(len(y_sub)):
path = '/tf/notebooks/Ja/data/test/test_label.txt'
f1 = open(path, 'r')
title = f1.read().splitlines()
f = open('/tf/notebooks/Ja/sub/submission1.txt', 'a', encoding='utf-8')
f.write(title[i]+' '+str(y_sub[i]) + '\n')
print('complete')
submission(y_predict)
|
[
"zaiin4050@gmail.com"
] |
zaiin4050@gmail.com
|
70421ab0f7256c9fc8a3926b8edd9733b99a6420
|
4b4b2ca38d277b9d773e0a8464de8b3401d74f10
|
/trans/wsgi.py
|
3b681dbb884e77d5ac01d2f75c4e97dbd07168e7
|
[] |
no_license
|
ChanSeaThink/trans
|
98b997993da380c921a83cb3a47acf6fafa52fe3
|
386bb27f633570133a272bda1bc77ba352b348ff
|
refs/heads/master
| 2021-01-10T18:16:14.375155
| 2016-03-15T10:08:00
| 2016-03-15T10:08:00
| 47,160,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
WSGI config for trans project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "trans.settings")
application = get_wsgi_application()
|
[
"42406671@qq.com"
] |
42406671@qq.com
|
5b6ddb51b3f2e37fbb6faaf3b35a7ab9bcad071d
|
ae177eca9ff64ba6471ec49d5dc41949615d3b17
|
/flask/flaskexample/a_Model.py
|
dd07e02060f7f610d425cebb01a23c8b043a09b9
|
[] |
no_license
|
filipkos/HobbyHopper
|
cb716e16a4a4ad79889470cc34ff0a9cd99b98d1
|
d0f9082c38f64a46dc56313c190cd0551683a2f2
|
refs/heads/master
| 2020-08-07T12:01:16.300668
| 2019-11-06T21:50:07
| 2019-11-06T21:50:07
| 213,443,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
import numpy as np
from recommender import CosSimilarityRecommender
def ModelIt(fromUser = 'Default', topics=[], user = {}, csr = None, mn = 0, sig = 1):
lst = []
for k in user:
if user[k] == 1:
lst.append(k)
vec = np.array([t in lst for t in topics]).astype(int)
pred = csr.predict((vec - mn) / sig) * sig + mn
ranks = np.argsort(pred[0])[::-1]
result = {}
i = 0
for r in ranks:
t = topics[r]
if t in user.keys():
continue
result[topics[r]] = pred[0, r]
i += 1
if i > 2:
break;
print('Predictions:')
print(result)
if fromUser != 'Default':
return result
else:
return 'check your input'
|
[
"filipkos@Filips-MacBook-Air.local"
] |
filipkos@Filips-MacBook-Air.local
|
bda7a198ec463810a64819f9a7c778d0665ab503
|
62bc7601dd5b436261fb81320247f1760c0ecdcb
|
/sql_queries.py
|
6943875894e8fdc3544dbf0fe74783c5341b6657
|
[] |
no_license
|
augustovictor/sparkify-dwh
|
5f793e036804755cbcb3c1139dfdc38972852814
|
bf80e2c1d803ff3007515c498e55c3bf438ef825
|
refs/heads/master
| 2022-09-18T12:08:19.665988
| 2020-06-02T22:18:45
| 2020-06-02T22:18:53
| 266,589,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,579
|
py
|
import configparser
# CONFIG
config = configparser.ConfigParser()
config.read('dwh.cfg')
# DROP TABLES
staging_events_table_drop = "DROP TABLE IF EXISTS staging_events_table"
staging_songs_table_drop = "DROP TABLE IF EXISTS staging_songs_table"
songplay_table_drop = "DROP TABLE IF EXISTS songplay_table"
user_table_drop = "DROP TABLE IF EXISTS user_table"
song_table_drop = "DROP TABLE IF EXISTS song_table"
artist_table_drop = "DROP TABLE IF EXISTS artist_table"
time_table_drop = "DROP TABLE IF EXISTS time_table"
# CREATE TABLES
staging_events_table_create= ("""
CREATE TABLE IF NOT EXISTS staging_events_table (
artist VARCHAR(255),
auth VARCHAR(255),
first_name VARCHAR(255),
gender CHAR,
item_in_session INTEGER,
last_name VARCHAR(255),
length DECIMAL(11, 5),
level VARCHAR(10),
location VARCHAR(255),
method VARCHAR(20),
page VARCHAR(100),
registration DECIMAL(18, 3),
session_id INTEGER,
song VARCHAR,
status INTEGER,
ts BIGINT,
user_agent VARCHAR,
user_id VARCHAR(255)
)
BACKUP NO
""")
staging_songs_table_create = ("""
CREATE TABLE IF NOT EXISTS staging_songs_table (
artist_id VARCHAR(100),
artist_latitude VARCHAR(100),
artist_location VARCHAR(100),
artist_longitude VARCHAR(100),
artist_name VARCHAR(255),
duration DECIMAL(11, 6),
num_songs VARCHAR(100),
song_id VARCHAR(100),
title VARCHAR(255),
year SMALLINT
)
BACKUP NO
""")
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplay_table (
songplay_id BIGINT IDENTITY(0, 1) PRIMARY KEY,
start_time DATE SORTKEY,
user_id VARCHAR(80) NOT NULL REFERENCES user_table(user_id),
level VARCHAR(100),
song_id VARCHAR(90) NOT NULL REFERENCES song_table(song_id),
artist_id VARCHAR(80) NOT NULL REFERENCES artist_table(artist_id),
session_id VARCHAR(70),
location VARCHAR(255),
user_agent VARCHAR
)
BACKUP NO
DISTSTYLE EVEN
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS user_table (
user_id VARCHAR(80) PRIMARY KEY,
first_name VARCHAR(255),
last_name VARCHAR(255),
gender CHAR,
level VARCHAR(100)
) BACKUP NO DISTSTYLE ALL COMPOUND SORTKEY(user_id, first_name)
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS song_table (
song_id VARCHAR(80) PRIMARY KEY,
title VARCHAR(255),
artist_id VARCHAR(100) REFERENCES artist_table(artist_id),
year SMALLINT,
duration DECIMAL(11, 5)
) BACKUP NO DISTSTYLE ALL COMPOUND SORTKEY(song_id, title)
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artist_table (
artist_id VARCHAR(80) PRIMARY KEY,
name VARCHAR(255),
location VARCHAR(100),
lattitude VARCHAR(100),
longitude VARCHAR(100)
) BACKUP NO DISTSTYLE ALL COMPOUND SORTKEY(artist_id, name)
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time_table (
time_key BIGINT IDENTITY(0, 1),
start_time DATE SORTKEY,
hour SMALLINT,
day SMALLINT,
week SMALLINT,
month SMALLINT,
year SMALLINT,
weekday SMALLINT) BACKUP NO DISTSTYLE ALL
""")
# STAGING TABLES
staging_events_copy = ("""
COPY staging_events_table FROM {} CREDENTIALS {} json {} REGION {}
""").format(config['S3']['LOG_DATA'], config['IAM_ROLE']['ARN'], config['S3']['LOG_JSONPATH'], config['AWS']['REGION'])
staging_songs_copy = ("""
COPY staging_songs_table FROM {} CREDENTIALS {} json 'auto' REGION {}
""").format(config['S3']['SONG_DATA'], config['IAM_ROLE']['ARN'], config['AWS']['REGION'])
# FINAL TABLES
# length, song, artist
songplay_table_insert = ("""
INSERT INTO songplay_table (
start_time,
user_id,
level,
song_id,
artist_id,
session_id,
location,
user_agent
)
(
SELECT DISTINCT
TIMESTAMP 'epoch' + se.ts/1000 * INTERVAL '1 second' AS start_time,
se.user_id as user_id,
se.level as level,
s.song_id as song_id,
a.artist_id as artist_id,
se.session_id as session_id,
a.location as location,
se.user_agent as user_agent
FROM staging_events_table se
JOIN artist_table a ON (a.name = se.artist)
JOIN song_table s ON (s.title = se.song AND s.duration = se.length)
WHERE se.page = 'NextSong'
)
""")
user_table_insert = ("""
INSERT INTO user_table (
user_id,
first_name,
last_name,
gender,
level
)
(
SELECT DISTINCT
se.user_id AS user_id,
se.first_name AS first_name,
se.last_name AS last_name,
se.gender AS gender,
se.level AS level
FROM staging_events_table se
WHERE se.user_id IS NOT NULL
AND se.page = 'NextSong'
)
""")
song_table_insert = ("""
INSERT INTO song_table (
song_id,
title,
artist_id,
year,
duration
)
(
SELECT DISTINCT
ss.song_id AS song_id,
ss.title AS title,
ss.artist_id AS artist_id,
ss.year AS year,
ss.duration AS duration
FROM staging_songs_table ss
WHERE song_id IS NOT NULL
AND title IS NOT NULL
AND duration IS NOT NULL
)
""")
artist_table_insert = ("""
INSERT INTO artist_table (
artist_id,
name,
location,
lattitude,
longitude
)
(
SELECT DISTINCT
ss.artist_id AS artist_id,
ss.artist_name AS name,
ss.artist_location AS location,
ss.artist_latitude AS latitude,
ss.artist_longitude AS longitude
FROM staging_songs_table ss
WHERE artist_id IS NOT NULL
)
""")
time_table_insert = ("""
INSERT INTO time_table (
start_time,
hour,
day,
week,
month,
year,
weekday
)
(
SELECT DISTINCT
TIMESTAMP 'epoch' + se.ts/1000 * INTERVAL '1 second' AS start_time,
EXTRACT(hour FROM start_time) AS hour,
EXTRACT(day FROM start_time) AS day,
EXTRACT(week FROM start_time) AS week,
EXTRACT(month FROM start_time) AS month,
EXTRACT(year FROM start_time) AS year,
EXTRACT(weekday FROM start_time) AS weekday
FROM staging_events_table se
WHERE se.ts IS NOT NULL
)
""")
# QUERY LISTS
create_table_queries = [staging_events_table_create, staging_songs_table_create, user_table_create, artist_table_create, song_table_create, time_table_create, songplay_table_create]
drop_table_queries = [staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
copy_table_queries = [staging_events_copy, staging_songs_copy]
insert_table_queries = [user_table_insert, song_table_insert, artist_table_insert, time_table_insert, songplay_table_insert]
|
[
"victor.costa@loft.com.br"
] |
victor.costa@loft.com.br
|
973ffbb7fa840003e341762399925dc4b527ad1d
|
ad2704933de4502ae9de91e6d915f9dbe010b446
|
/take/chapter03/knock22.py
|
b5d6c37c700c0c28eba88eb6502335a499fb7d04
|
[] |
no_license
|
tmu-nlp/100knock2017
|
266e68917d8d5a7f5d0c064f1bc2da5fa402a253
|
629bd1155d0fe78cd9302ae9a7cdf0922b778fe7
|
refs/heads/master
| 2021-01-19T17:36:53.328997
| 2017-07-24T07:09:54
| 2017-07-24T07:09:54
| 88,334,932
| 8
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
import re
from knock20 import gen_uk_info
catName = re.findall(r'\[\[Category:(.*)\]\]', gen_uk_info())
for c in catName:
print(c)
|
[
"take"
] |
take
|
35f83edd99698a38e1d32d107366d68bea80c37d
|
9360d1a9cef857f373785776651d22eb2bf4595a
|
/python/paddle/v2/framework/tests/test_image_classification_layer.py
|
b4eda13552e60f009ec910e3d21e9d77107133a1
|
[
"Apache-2.0"
] |
permissive
|
hyqskevin/Paddle
|
a2f34c1f1e2a99c9b49419bb73c783f60b919132
|
e745bcfc5e611bac7fa0a550651d13768871bfb6
|
refs/heads/develop
| 2021-09-16T01:39:52.060357
| 2017-11-04T05:40:04
| 2017-11-04T05:40:04
| 109,517,080
| 0
| 0
|
Apache-2.0
| 2018-06-14T12:13:27
| 2017-11-04T17:50:58
|
C++
|
UTF-8
|
Python
| false
| false
| 2,708
|
py
|
import unittest
import paddle.v2.framework.layers as layers
import paddle.v2.framework.nets as nets
from paddle.v2.framework.framework import Program
def conv_block(input,
num_filter,
groups,
dropouts,
program=None,
init_program=None):
return nets.img_conv_group(
input=input,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * groups,
conv_filter_size=3,
conv_act='relu',
conv_with_batchnorm=True,
conv_batchnorm_drop_rate=dropouts,
pool_type='max',
program=program,
init_program=init_program)
class TestLayer(unittest.TestCase):
def test_batch_norm_layer(self):
program = Program()
init_program = Program()
images = layers.data(
name='pixel',
shape=[3, 48, 48],
data_type='float32',
program=program)
layers.batch_norm(
input=images, program=program, init_program=init_program)
# print str(program)
def test_dropout_layer(self):
program = Program()
init_program = Program()
images = layers.data(
name='pixel',
shape=[3, 48, 48],
data_type='float32',
program=program)
layers.dropout(
x=images,
dropout_prob=0.5,
program=program,
init_program=init_program)
# print str(program)
def test_img_conv_group(self):
program = Program()
init_program = Program()
images = layers.data(
name='pixel',
shape=[3, 48, 48],
data_type='float32',
program=program,
init_program=init_program)
conv1 = conv_block(images, 64, 2, [0.3, 0], program, init_program)
conv2 = conv_block(conv1, 256, 3, [0.4, 0.4, 0], program, init_program)
# print str(program)
def test_elementwise_add_with_act(self):
program = Program()
init_program = Program()
image1 = layers.data(
name='pixel1',
shape=[3, 48, 48],
data_type='float32',
program=program,
init_program=init_program)
image2 = layers.data(
name='pixel2',
shape=[3, 48, 48],
data_type='float32',
program=program,
init_program=init_program)
out = layers.elementwise_add(
x=image1,
y=image2,
act='relu',
program=program,
init_program=init_program)
# print(program)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
hyqskevin.noreply@github.com
|
c8652d17bcf61bbe3816e64b8c426634b51edab6
|
595c28386d827b3f6828cf2c17bf462fb4c3e56c
|
/app/main.py
|
240a702eb5cc578a33f51a927cef373aa5902722
|
[] |
no_license
|
keyapi/fastapi-demo
|
5107ffc65ad74d6602125c9115eaeb9a221858f0
|
9e3cab16197b002c92a8a7bc78200b2a0ea45bf4
|
refs/heads/master
| 2023-05-31T06:06:43.446548
| 2020-06-10T06:52:16
| 2020-06-10T06:52:16
| 265,177,805
| 0
| 0
| null | 2021-06-11T18:07:31
| 2020-05-19T07:34:16
|
Python
|
UTF-8
|
Python
| false
| false
| 107
|
py
|
from fastapi import FastAPI
from app.api.movies import movies
app = FastAPI()
app.include_router(movies)
|
[
"kaepyz@gmail.com"
] |
kaepyz@gmail.com
|
b474a110e0e2f5a297c632049c67fe7440b9f102
|
cf574da1855619c5ee9b5b39e95df96e042f092b
|
/mastermind.v1.py
|
f5746a0b2fe1dcf32da356be31240bf77c041def
|
[] |
no_license
|
ashroff12/Mastermind
|
2399af7116caf0c56eee0e86df17753d3ed86adc
|
9ada8d57261c3f8e83dce830d50862ba2bc56e11
|
refs/heads/master
| 2020-07-31T18:58:01.433104
| 2019-10-02T00:26:13
| 2019-10-02T00:26:13
| 210,719,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,468
|
py
|
import random
import tkinter
root = tkinter.Tk()
COLORS = ['red', 'yellow', 'blue', 'lime', 'aqua', 'gold']
code = []
white = 0
black = 0
guess_count=0
current_guess = [''] * 4
color_picker = ''
def new_code():
global code
while len(code) <4:
r = random.randint(0,5)
if r not in code:
code.append(r)
def score_guess(guess):
global black
global white
global code
global guess_count
white=0
black=0
#first check for white
#print (guess)
#print (code)
for i in range(4):
if guess[i] == str(code[i]):
white += 1
else:
for j in range(4):
if guess[i] == str(code[j]):
black += 1
print("whites: " + str(white) + " blacks: " + str(black))
if white == 4:
#do some win stuff
print("You win:(")
elif guess_count == 10:
#you ran out pf guesses
print('You suck at your only job')
else:
#no win no lose keep going
player_guess()
#then check for black
def player_guess():
print('Make a guess')
guess = input()
score_guess(guess)
def opencolorpicker(pos, btn):
global color_picker
color_picker = tkinter.Toplevel(root)
color1 = tkinter.Button(color_picker, width=4, height=2, bg=COLORS[0], command=lambda:closecolorpicker(pos, btn, COLORS[0]))
color1.grid(row=0, column=0)
color2 = tkinter.Button(color_picker, width=4, height=2, bg=COLORS[1], command=lambda:closecolorpicker(pos, btn, COLORS[1]))
color2.grid(row=0, column=1)
color3 = tkinter.Button(color_picker, width=4, height=2, bg=COLORS[2], command=lambda:closecolorpicker(pos, btn, COLORS[2]))
color3.grid(row=0, column=2)
color4 = tkinter.Button(color_picker, width=4, height=2, bg=COLORS[3], command=lambda:closecolorpicker(pos, btn, COLORS[3]))
color4.grid(row=0, column=3)
color5 = tkinter.Button(color_picker, width=4, height=2, bg=COLORS[4], command=lambda:closecolorpicker(pos, btn, COLORS[4]))
color5.grid(row=0, column=4)
color6 = tkinter.Button(color_picker, width=4, height=2, bg=COLORS[5], command=lambda:closecolorpicker(pos, btn, COLORS[5]))
color6.grid(row=0, column=5)
def closecolorpicker(pos, btn, newcolor):
global color_picker
color_picker.destroy()
btn.config(bg=newcolor)
current_guess[pos] = newcolor
def draw():
global canvas
canvas.delete(tkinter.ALL)
canvas.create_rectangle(0,0,cwidth,cheight, fill='orange')
#buttons for tkinter
b1 = tkinter.Button(root, width=4, height=2, bg="#00ffff", command=lambda:opencolorpicker(0,b1))
b1.grid(row=1, column=0)
b2 = tkinter.Button(root, width=4, height=2, bg="#000fff000", command=lambda:opencolorpicker(0,b2))
b2.grid(row=1, column=1)
b3 = tkinter.Button(root, width=4, height=2, bg="#129034", command=lambda:opencolorpicker(0,b3))
b3.grid(row=1, column=2)
b4 = tkinter.Button(root, width=4, height=2, bg="#519022", command=lambda:opencolorpicker(0,b4))
b4.grid(row=1, column=3)
#submit button
submit = tkinter.Button(root, text="Submit Guess", font="Arial 16 bold", padx=10)
submit.grid(row=1, column=4, padx=20)
#canvas
cwidth=300
cheight=400
canvas=tkinter.Canvas(root, width=cwidth, height=cheight)
canvas.grid(row=2, column=0, columnspan=5)
#runthegame
new_code()
draw()
root.mainloop()
|
[
"noreply@github.com"
] |
ashroff12.noreply@github.com
|
9552c2393b09a5a75f719a8b09b833e868deb70b
|
fd31572c183304f1385a8e89782d44976106ead1
|
/Prueba/backend/migrations/0010_auto_20181014_2126.py
|
07cc2616ddb3c6945448c40197c17ff3e65df270
|
[] |
no_license
|
ThexCrack10/PruebaCargaMasiva
|
ddfac6d39532f3e13a09c0b34609642c5a7ea3ac
|
c996bcd8c5f5c81c5bce1f64143af83c02200c1f
|
refs/heads/master
| 2020-03-31T15:47:55.583833
| 2018-10-16T02:33:03
| 2018-10-16T02:33:03
| 152,351,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 789
|
py
|
# Generated by Django 2.1.1 on 2018-10-15 00:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0009_remove_categoria_flag'),
]
operations = [
migrations.AlterField(
model_name='codigoproducto',
name='valor',
field=models.CharField(max_length=128),
),
migrations.AlterField(
model_name='producto',
name='codigos',
field=models.ManyToManyField(through='backend.CodigoProducto', to='backend.TipoCodigo'),
),
migrations.AlterField(
model_name='tipocodigo',
name='nombre',
field=models.CharField(max_length=128),
),
]
|
[
"noreply@github.com"
] |
ThexCrack10.noreply@github.com
|
57dd2c9ed0a23098dd5744524a6738b64b207994
|
5e53250c6f72341d052ca3592301dedaa4aacfe8
|
/Backend/FoodTruck/env/bin/pyrsa-decrypt-bigfile
|
e9e1679977210bff0a61d6fd92b9514a463873ee
|
[] |
no_license
|
coder46/Uber-Food-Trucks
|
3132a5b7d38a2531792618ac8dd4198e0929d8d0
|
6cd65629c451df20104c1ae43626302cb6c1f5e6
|
refs/heads/master
| 2020-12-14T09:51:02.438960
| 2017-06-21T18:58:29
| 2017-06-21T18:58:29
| 59,014,518
| 1
| 1
| null | 2016-05-22T20:20:11
| 2016-05-17T10:53:51
|
Python
|
UTF-8
|
Python
| false
| false
| 246
|
#!/root/GameServer/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import decrypt_bigfile
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(decrypt_bigfile())
|
[
"faisal.iiit@gmail.com"
] |
faisal.iiit@gmail.com
|
|
a30fe77305034b6938183425ed12f89ba43e1684
|
096be495ae3e995d884f6154c0e5a00348e49d1d
|
/companies-leetcode/google/arrays-and-strings/plus-one.py
|
05ac6dcbe019121c0b43cf48075e1495e4eec2d0
|
[] |
no_license
|
JackMGrundy/coding-challenges
|
b81828efd580934226931735949c1fba1151dfac
|
0ef3ef71d75ea20cd3079ad6aa3211f61efb7b7a
|
refs/heads/master
| 2022-12-04T11:28:27.247546
| 2022-11-28T01:57:21
| 2022-11-28T01:57:21
| 163,903,589
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,146
|
py
|
"""
Given a non-empty array of digits representing a non-negative integer, plus one to the integer.
The digits are stored such that the most significant digit is at the head of the list, and each element in the array contain a single digit.
You may assume the integer does not contain any leading zero, except the number 0 itself.
Example 1:
Input: [1,2,3]
Output: [1,2,4]
Explanation: The array represents the integer 123.
Example 2:
Input: [4,3,2,1]
Output: [4,3,2,2]
Explanation: The array represents the integer 4321.
"""
# 99th percentile
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
carry = 0
digits[-1] += 1
for i in range(len(digits)-1, -1, -1):
digits[i] += carry
if digits[i] < 10:
return digits
else:
carry = digits[i] - 9
digits[i] = 0
return [1] + digits
# 99th percentile. Obnoxious 1-liner
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
return [ int(x) for x in list(str((1+sum([ x*(10**(len(digits)-1-i)) for i,x in enumerate(digits) ]))))]
|
[
"jackmgrundy@gmail.com"
] |
jackmgrundy@gmail.com
|
08bea8286ae5e59331c5743477c15c69f9b9d292
|
55b877071f1dc660dae32bcb7eee6933327318f4
|
/accounts/migrations/0003_auto_20211119_2108.py
|
6e9e35e6e001cf6dac5cff000d4a0fd91e20c549
|
[] |
no_license
|
dnd-hack/backend
|
61a18c3f35e8b447b6dfe65e1611b513250a7926
|
0a908b125141a217217dfe160a1367e30aed2c76
|
refs/heads/master
| 2023-09-02T18:26:41.110359
| 2021-11-20T07:25:36
| 2021-11-20T07:25:36
| 429,919,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
# Generated by Django 3.1 on 2021-11-19 21:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20211119_2102'),
]
operations = [
migrations.AlterField(
model_name='user',
name='nickname',
field=models.CharField(blank=True, max_length=15, null=True, unique=True),
),
]
|
[
"julysein723@gmail.com"
] |
julysein723@gmail.com
|
c4c972137d674256036caab4316d51ab6eed3370
|
6bd5c04d9234233ab16a3cbd871f92b33522c817
|
/project/SciProgHF/test/exacorr_exatensor_lambda/test
|
296525b51fb1a92ba52cbbdf9899a39f16eb8bbe
|
[] |
no_license
|
YHordijk/scientificprogramming
|
61d897c0a45c267e4b66c786254b47e6da09b56d
|
c3545807781c5f83424d52ee0163fd2b61f82239
|
refs/heads/main
| 2023-04-01T01:32:46.561152
| 2021-03-28T17:53:20
| 2021-03-28T17:53:20
| 346,670,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
#!/usr/bin/env python
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from runtest_dirac import Filter, TestRun
test = TestRun(__file__, sys.argv)
f = Filter()
f.add(from_string = 'Final L1amp',
num_lines = 2,
rel_tolerance = 1.0e-9)
f.add(from_string = 'Individual (non-zero) components',
to_string = 'Total contribution to principal components',
rel_tolerance = 1.0e-6)
test.run(['ccsd_x2c.inp'], ['H2O_nosym.mol'], f)
#The next run works (LV, Feb 12 2021) but takes too long to be useful
#test.run(['ccsd_energy_large_cvalue_exacorr.inp'], ['h2o.xyz'], f, args=' --incmo')
sys.exit(test.return_code)
|
[
"42876712+YHordijk@users.noreply.github.com"
] |
42876712+YHordijk@users.noreply.github.com
|
|
26574308c84e26256aab26abae95b62c2f9142a3
|
83ba5377cb46faac367919af70d5e7f9634aad54
|
/pylesson9/lab9(2).py
|
b7e7f96e97919e1e65c4dd6606c5fccd0b9f0252
|
[] |
no_license
|
josephj02233/joseph0426
|
2547c885c6c1162d5bfa4aa7a2c61c82035f0176
|
202bf5a778738d5120464b8376a9ecde596731bf
|
refs/heads/master
| 2020-05-23T10:08:15.497034
| 2017-01-30T03:01:46
| 2017-01-30T03:01:46
| 80,389,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
mylist = ["joseph","ben","hanna","nicole","lala"]
def first(mylist):
for i in mylist:
print(i[0])
first(mylist)
|
[
"josephkuo@Josephde-MacBook-Pro.local"
] |
josephkuo@Josephde-MacBook-Pro.local
|
484dfaad65bf0e7d37f06d6b654e76afa6b3f4ca
|
679884a99e45dada95d5206707c625bce7c56535
|
/migrations/versions/f580e944c99f_add_security_fields_to_member_table.py
|
b35b2c6eac0ab740c32c23d3b36a1f4c099b2fc9
|
[] |
no_license
|
cgil/trie
|
87eaadd43a0348aa5f2ef3b045dd32f4e0170dce
|
71a2532855c70302d9dbcb58cecc0ccb62b5ecd2
|
refs/heads/master
| 2021-12-14T04:08:39.588530
| 2017-08-06T21:56:38
| 2017-08-06T21:56:38
| 57,602,358
| 0
| 0
| null | 2021-12-03T03:17:40
| 2016-05-01T14:45:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,121
|
py
|
"""add security fields to member table
Revision ID: f580e944c99f
Revises: c5b44fdcd4c5
Create Date: 2016-06-08 17:22:12.961146
"""
# revision identifiers, used by Alembic.
revision = 'f580e944c99f'
down_revision = 'c5b44fdcd4c5'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('member', sa.Column('active', sa.Boolean()))
op.add_column('member', sa.Column('confirmed_at', sa.DateTime()))
op.add_column('member', sa.Column('current_login_at', sa.DateTime()))
op.add_column('member', sa.Column('current_login_ip', sa.String()))
op.add_column('member', sa.Column('last_login_at', sa.DateTime()))
op.add_column('member', sa.Column('last_login_ip', sa.String()))
op.add_column('member', sa.Column('login_count', sa.Integer()))
def downgrade():
op.drop_column('member', 'active')
op.drop_column('member', 'confirmed_at')
op.drop_column('member', 'current_login_at')
op.drop_column('member', 'current_login_ip')
op.drop_column('member', 'last_login_at')
op.drop_column('member', 'last_login_ip')
op.drop_column('member', 'login_count')
|
[
"cgil1210@gmail.com"
] |
cgil1210@gmail.com
|
8f5e234e6a0017632d62fde22f59b64148ec8075
|
3516632c2279da5cf14464c702b580c049cb6494
|
/lecture_3_inheritance/__init__.py
|
2f3853f1e605eb0dadd05a2efb29dfc091906248
|
[] |
no_license
|
ivn-svn/May2021PythonOOP
|
401a0f858946b3df8b93dd74b27d6f0b7f42c7d3
|
9d084dcbc8ff6851a751d537bfdbb5fd940904a0
|
refs/heads/main
| 2023-07-01T09:13:23.459677
| 2021-08-10T18:10:29
| 2021-08-10T18:10:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
class Person:
def __init__(self, name, age):
self.name = name
self.age = self.set_age(age)
def set_age(self, age):
if age <= 0:
raise ValueError("Age can not be negative")
return age
def sleep(self):
return "I am sleeping"
def __repr__(self):
return "This is a person"
class Employee(Person):
def __init__(self, name, age, date):
super().__init__(name, age)
self.date = date
def work(self):
return "working"
def __repr__(self):
return super().__repr__() + " who is also a Employee"
e = Employee("test", 5, "...")
print(e.sleep())
class Manager(Person):
def __init__(self, name, age, people_managing):
super().__init__(name, age)
self.people_managing = people_managing
class Contractor(Person):
def __init__(self, name, age, date_of_expiry):
super().__init__(name, age)
self.date_of_expiry = date_of_expiry
|
[
"ines.iv.ivanova@gmail.com"
] |
ines.iv.ivanova@gmail.com
|
73a4f67632962c643879b6829a97c06adc540872
|
5e3ebc83bc3fe2f85c34563689b82b1fc8b93a04
|
/google/ads/googleads/v5/services/types/customer_service.py
|
5af3161dc3f4f93dd24c34b5537d8fbf46d71867
|
[
"Apache-2.0"
] |
permissive
|
pdsing/google-ads-python
|
0ce70227cd6bb13a25cd13de0ca05c2636279ecd
|
ee2c059498d5679a0d1d9011f3795324439fad7c
|
refs/heads/master
| 2023-05-04T18:39:57.412453
| 2021-05-21T16:38:17
| 2021-05-21T16:38:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,157
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v5.enums.types import access_role as gage_access_role
from google.ads.googleads.v5.enums.types import (
response_content_type as gage_response_content_type,
)
from google.ads.googleads.v5.resources.types import customer as gagr_customer
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v5.services",
marshal="google.ads.googleads.v5",
manifest={
"GetCustomerRequest",
"MutateCustomerRequest",
"CreateCustomerClientRequest",
"CustomerOperation",
"CreateCustomerClientResponse",
"MutateCustomerResponse",
"MutateCustomerResult",
"ListAccessibleCustomersRequest",
"ListAccessibleCustomersResponse",
},
)
class GetCustomerRequest(proto.Message):
r"""Request message for
[CustomerService.GetCustomer][google.ads.googleads.v5.services.CustomerService.GetCustomer].
Attributes:
resource_name (str):
Required. The resource name of the customer
to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1)
class MutateCustomerRequest(proto.Message):
r"""Request message for
[CustomerService.MutateCustomer][google.ads.googleads.v5.services.CustomerService.MutateCustomer].
Attributes:
customer_id (str):
Required. The ID of the customer being
modified.
operation (google.ads.googleads.v5.services.types.CustomerOperation):
Required. The operation to perform on the
customer
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v5.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(proto.STRING, number=1)
operation = proto.Field(
proto.MESSAGE, number=4, message="CustomerOperation",
)
validate_only = proto.Field(proto.BOOL, number=5)
response_content_type = proto.Field(
proto.ENUM,
number=6,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class CreateCustomerClientRequest(proto.Message):
r"""Request message for
[CustomerService.CreateCustomerClient][google.ads.googleads.v5.services.CustomerService.CreateCustomerClient].
Attributes:
customer_id (str):
Required. The ID of the Manager under whom
client customer is being created.
customer_client (google.ads.googleads.v5.resources.types.Customer):
Required. The new client customer to create.
The resource name on this customer will be
ignored.
email_address (str):
Email address of the user who should be
invited on the created client customer.
Accessible only to customers on the allow-list.
access_role (google.ads.googleads.v5.enums.types.AccessRoleEnum.AccessRole):
The proposed role of user on the created
client customer. Accessible only to customers on
the allow-list.
"""
customer_id = proto.Field(proto.STRING, number=1)
customer_client = proto.Field(
proto.MESSAGE, number=2, message=gagr_customer.Customer,
)
email_address = proto.Field(proto.STRING, number=5, optional=True)
access_role = proto.Field(
proto.ENUM, number=4, enum=gage_access_role.AccessRoleEnum.AccessRole,
)
class CustomerOperation(proto.Message):
r"""A single update on a customer.
Attributes:
update (google.ads.googleads.v5.resources.types.Customer):
Mutate operation. Only updates are supported
for customer.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
"""
update = proto.Field(
proto.MESSAGE, number=1, message=gagr_customer.Customer,
)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask.FieldMask,
)
class CreateCustomerClientResponse(proto.Message):
r"""Response message for CreateCustomerClient mutate.
Attributes:
resource_name (str):
The resource name of the newly created
customer client.
invitation_link (str):
Link for inviting user to access the created
customer. Accessible to allowlisted customers
only.
"""
resource_name = proto.Field(proto.STRING, number=2)
invitation_link = proto.Field(proto.STRING, number=3)
class MutateCustomerResponse(proto.Message):
r"""Response message for customer mutate.
Attributes:
result (google.ads.googleads.v5.services.types.MutateCustomerResult):
Result for the mutate.
"""
result = proto.Field(
proto.MESSAGE, number=2, message="MutateCustomerResult",
)
class MutateCustomerResult(proto.Message):
r"""The result for the customer mutate.
Attributes:
resource_name (str):
Returned for successful operations.
customer (google.ads.googleads.v5.resources.types.Customer):
The mutated customer with only mutable fields after mutate.
The fields will only be returned when response_content_type
is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(proto.STRING, number=1)
customer = proto.Field(
proto.MESSAGE, number=2, message=gagr_customer.Customer,
)
class ListAccessibleCustomersRequest(proto.Message):
r"""Request message for
[CustomerService.ListAccessibleCustomers][google.ads.googleads.v5.services.CustomerService.ListAccessibleCustomers].
"""
class ListAccessibleCustomersResponse(proto.Message):
r"""Response message for
[CustomerService.ListAccessibleCustomers][google.ads.googleads.v5.services.CustomerService.ListAccessibleCustomers].
Attributes:
resource_names (Sequence[str]):
Resource name of customers directly
accessible by the user authenticating the call.
"""
resource_names = proto.RepeatedField(proto.STRING, number=1)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
pdsing.noreply@github.com
|
70a13b86cb33399e492395b061b0bff4685da789
|
c5dfee744a946d09e889ba2ab76ceeb388bb270e
|
/lecopain/controllers/__init__.py
|
d392a0b76ef751f47bf8e3be4dc22e033f1e704f
|
[] |
no_license
|
pigalon/lecopainPi
|
665cc3ac17b7b6af5cc26b7bdbc055b15f3484d1
|
6fddf002290883524523598392935370465b0314
|
refs/heads/master
| 2023-02-21T23:11:30.960798
| 2021-02-13T19:55:45
| 2021-02-13T19:55:45
| 171,766,010
| 0
| 0
| null | 2023-02-15T23:45:10
| 2019-02-20T23:35:43
|
Python
|
UTF-8
|
Python
| false
| false
| 79
|
py
|
from flask import Blueprint
controllers = Blueprint('controllers', __name__)
|
[
"pigalon@gmail.com"
] |
pigalon@gmail.com
|
d35e86abe3e8aef5654aeb497b85f223d6392106
|
360b6e66bf512b1ad4edbdf30adf0563622b8630
|
/declensions.py
|
6bca8456646cc793b1fc73a7a7402d960b31d17c
|
[] |
no_license
|
wilson-ben/Latin
|
07f4f85b994c1ef906bcb4ee6302aa20b9ebafd5
|
8599f5f81775bea681d594822cfc86212a553cb9
|
refs/heads/master
| 2020-03-31T05:20:42.482920
| 2018-10-07T13:13:32
| 2018-10-07T13:13:32
| 151,942,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 707
|
py
|
import inflect
import requests
from bs4 import BeautifulSoup
# Pull from www.thelatinlibrary.com/decl.html
# https://en.wiktionary.org/wiki/Category:Latin_appendices
# Not currently working.
def declenFunc():
dec_choice = input("Declension Number [1-5] : ")
if int(dec_choice) <= 5:
inflect_tool = inflect.engine()
webAddr = inflect_tool.number_to_words(int(dec_choice))
page = requests.get("https://en.wiktionary.org/wiki/Appendix:Latin_{}_declension".format(webAddr))
entire_page = BeautifulSoup(page.text, 'html.parser')
print(entire_page)
elif dec_choice == '/back':
return 0
else:
print("invalid choice...")
|
[
"bjwilson@protonmail.com"
] |
bjwilson@protonmail.com
|
ef4c0bc53bd6c1439cc2eaefeb4ca438cf482474
|
bd178784528af48f377d0cbfba6cdb73438a4bb6
|
/wscript
|
91440deff0a8674d2ca9f391e72abf5615d036de
|
[
"MIT"
] |
permissive
|
seem-sky/htmlscanner
|
f8c7146d259a3e2498ea9fc49ab40eca6b4e60d5
|
f7cbb4d1d3ff9e8729cd864b8746dcc4a065629c
|
refs/heads/master
| 2021-01-12T22:21:01.390314
| 2012-06-30T22:14:23
| 2012-06-30T22:14:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
from shutil import copy
srcdir = '.'
blddir = 'build'
VERSION = '0.0.1'
def set_options(opt):
opt.tool_options('compiler_cxx')
def configure(conf):
conf.check_tool('compiler_cxx')
conf.check_tool('node_addon')
def build(bld):
obj = bld.new_task_gen('cxx', 'shlib', 'node_addon')
obj.target = 'htmlscanner'
obj.source = 'src/xh_scanner.cc src/htmlscanner.cc'
def install(inst):
copy('build/Release/htmlscanner.node', 'lib/htmlscanner.node')
|
[
"peter@jbaron.com"
] |
peter@jbaron.com
|
|
8721ab24b3f8aa47415bbcf03d6be6c57135b38c
|
4e144702ff35467136e6a0167c98b49c409d2c98
|
/opensfm/dataset.py
|
c0056ae99279f91cca094ac2ebb34454df949851
|
[
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"GPL-3.0-only",
"Python-2.0"
] |
permissive
|
YangJae96/KMU_Visual-SLAM
|
50f894a86431e5257670f7a01905eceb4222e607
|
8fbbcdda58a5b349a344b580d22d13ab69ff53cc
|
refs/heads/master
| 2022-12-08T15:51:57.200744
| 2020-03-18T07:05:30
| 2020-03-18T07:05:30
| 246,765,583
| 0
| 0
|
BSD-2-Clause
| 2022-11-22T04:40:03
| 2020-03-12T07:02:19
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 14,897
|
py
|
import cv2 as cv
import os
from os.path import abspath, join, dirname
import copy
import logging
import argparse
import time
import sys
import numpy as np
import errno
import io
import json
import gzip
import pickle
import six
import networkx as nx
from opensfm import extract_metadata
from opensfm import detect_features
from opensfm import match_features
from opensfm import create_tracks
from opensfm import reconstruct
from opensfm import mesh_data
from opensfm import undistort
from opensfm import undistorted_dataset
from opensfm import compute_depthmaps
from opensfm import dataset
from opensfm import log
from opensfm import io
from opensfm import exif
from opensfm import types
from opensfm import config
from opensfm import features
from opensfm import tracking
from PIL import Image
logger = logging.getLogger(__name__)
logging.getLogger("Starting Webcam!!").setLevel(logging.WARNING)
class DataSet(object):
"""Accessors to the main input and output data.
Data include input images, masks, and segmentation as well
temporary data such as features and matches and the final
reconstructions.
All data should be stored inside a variable to use RAM.
Not in the folder.
It is possible to store data remotely or in different formats
by subclassing this class and overloading its methods.
"""
def __init__(self, data_path, image_list):
self.data_path=data_path
self.image_list=image_list
self._load_config()
self._load_mask_list()
self.meta_data_d={}
self.camera_models={}
self.feature_of_images={}
self.feature_report={}
self.match_of_images={}
self.track_graph_of_images=nx.Graph()
self.reconstructions=[]
self.reconstructions_as_json={}
#self.undistorted_data
self.udata_image={}
self.udata_reconstruction=[]
self.udata_track_graph=nx.Graph()
self.raw_depthmap={}
self.raw_ply={}
self.clean_depthmap={}
self.clean_ply={}
self.pruned_depthmap={}
self.pruned_ply={}
def _load_config(self):
config_file = os.path.join(self.data_path, 'config.yaml')
self.config = config.load_config(config_file)
def show(self, image):
cv.imshow('test',image)
cv.waitKey(0)
cv.destroyAllWindows()
def images(self):
"""List of all images in the dataset"""
return self.image_list.keys()
def _image_file(self, image):
"""Path to the image file."""
return self.image_list[image]
def image_size(self,image):
return image.shape[0], image.shape[1]
def _exif_file(self, image):
"""
Return path of exif information for given image
:param image: Image name, with extension (i.e. 123.jpg)
"""
return os.path.join(self._exif_path(), image + '.exif')
def _exif_path(self):
"""Return path of extracted exif directory"""
return os.path.join(self.data_path, 'exif')
def save_exif(self, image, data):
try:
os.makedirs(self._exif_path())
except os.error as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(self._exif_path()):
raise
with io.open_wt(self._exif_file(image)) as fout:
io.json_dump(data, fout)
def load_exif(self, image):
#image='1.jpg'
return self.meta_data_d
def _camera_models_file(self):
"""Return path of camera model file"""
return os.path.join(self.data_path, 'camera_models.json')
def load_camera_models(self):
# """Return camera models data"""
with io.open_rt(self._camera_models_file()) as fin:
obj = json.load(fin)
return io.cameras_from_json(obj)
#"""Load pre-extracted image exif metadata."""
def save_camera_models(self, camera_models):
"""Save camera models data"""
with io.open_wt(self._camera_models_file()) as fout:
obj = io.cameras_to_json(camera_models)
io.json_dump(obj, fout)
def _camera_models_file(self):
"""Return path of camera model file"""
return os.path.join(self.data_path, 'camera_models.json')
def _load_mask_list(self):
"""Load mask list from mask_list.txt or list masks/ folder."""
mask_list_file = os.path.join(self.data_path, 'mask_list.txt')
if os.path.isfile(mask_list_file):
with io.open_rt(mask_list_file) as fin:
lines = fin.read().splitlines()
self._set_mask_list(lines)
else:
self._set_mask_path(os.path.join(self.data_path, 'masks'))
def _set_mask_path(self, path):
"""Set mask path and find all masks in there"""
self.mask_files = {}
for image in self.images():
filepath = os.path.join(path, image + '.png')
if os.path.isfile(filepath):
self.mask_files[image] = filepath
def features_exist(self, image):
return os.path.isfile(self._feature_file(image)) or\
os.path.isfile(self._feature_file_legacy(image))
def feature_type(self):
"""Return the type of local features (e.g. AKAZE, SURF, SIFT)"""
feature_name = self.config['feature_type'].lower()
if self.config['feature_root']:
feature_name = 'root_' + feature_name
return feature_name
def load_image(self, image, unchanged=False, anydepth=False):
"""Load image pixels as numpy array.
The array is 3D, indexed by y-coord, x-coord, channel.
The channels are in RGB order.
"""
a=io.imread(self._image_file(image), unchanged=unchanged, anydepth=anydepth)
return a
def load_mask(self, image):
"""Load image mask if it exists, otherwise return None."""
if image in self.mask_files: ## 여기 안들어감
mask_path = self.mask_files[image]
mask = io.imread(mask_path, grayscale=True)
if mask is None:
raise IOError("Unable to load mask for image {} "
"from file {}".format(image, mask_path))
else:
mask = None
return mask
def load_segmentation_mask(self, image):
"""Build a mask from segmentation ignore values.
The mask is non-zero only for pixels with segmentation
labels not in segmentation_ignore_values.
"""
ignore_values = self.segmentation_ignore_values(image)
if not ignore_values:
return None
segmentation = self.load_segmentation(image)
if segmentation is None:
return None
return self._mask_from_segmentation(segmentation, ignore_values)
def _combine_masks(self, mask, smask):
if mask is None:
if smask is None:
return None
else:
return smask
else:
if smask is None:
return mask
else:
return mask & smask
def segmentation_ignore_values(self, image):
"""List of label values to ignore.
Pixels with this labels values will be masked out and won't be
processed when extracting features or computing depthmaps.
"""
return self.config.get('segmentation_ignore_values', [])
def load_combined_mask(self, image):
"""Combine binary mask with segmentation mask.
Return a mask that is non-zero only where the binary
mask and the segmentation mask are non-zero.
"""
mask = self.load_mask(image) # 필요없다 None값
smask = self.load_segmentation_mask(image) # 필요없다 None 값
return self._combine_masks(mask, smask)
def load_features_mask(self, image, points):
"""Load a feature-wise mask.
This is a binary array true for features that lie inside the
combined mask.
The array is all true when there's no mask.
"""
if points is None or len(points) == 0:
return np.array([], dtype=bool)
mask_image = self.load_combined_mask(image)
if mask_image is None:
logger.debug('No segmentation for {}, no features masked.'.format(image))
return np.ones((points.shape[0],), dtype=bool)
#detect features명령어에선 여기서 종료함 **********
exif = self.load_exif(image)
width = exif["width"]
height = exif["height"]
orientation = exif["orientation"]
new_height, new_width = mask_image.shape
ps = upright.opensfm_to_upright(
points[:, :2], width, height, orientation,
new_width=new_width, new_height=new_height).astype(int)
mask = mask_image[ps[:, 1], ps[:, 0]]
n_removed = np.sum(mask == 0)
logger.debug('Masking {} / {} ({:.2f}) features for {}'.format(
n_removed, len(mask), n_removed / len(mask), image))
return np.array(mask, dtype=bool)
def _feature_file(self, image):
"""
Return path of feature file for specified image
:param image: Image name, with extension (i.e. 123.jpg)
"""
return os.path.join(self._feature_path(), image + '.features.npz')
def _feature_path(self):
"""Return path of feature descriptors and FLANN indices directory"""
return os.path.join(self.data_path, "features")
def _save_features(self, filepath, points, descriptors, colors=None):
io.mkdir_p(self._feature_path())
features.save_features(filepath, points, descriptors, colors, self.config)
def save_features(self, image, points, descriptors, colors):
self._save_features(self._feature_file(image), points, descriptors, colors)
def save_total_features(self,features):
self.feature_of_images=features
def save_report_of_features(self,image,report):
self.feature_report.update({image:report})
def _matches_path(self):
"""Return path of matches directory"""
return os.path.join(self.data_path, 'matches')
def _matches_file(self, image):
#"""File for matches for an image"""
return os.path.join(self._matches_path(), '{}_matches.pkl.gz'.format(image))
def save_matches(self, image, matches):
io.mkdir_p(self._matches_path())
with gzip.open(self._matches_file(image), 'wb') as fout:
pickle.dump(matches, fout)
def load_matches(self, image):
return self.match_of_images[image]
def save_total_matches(self,image,matches):
self.match_of_images.update({image:matches})
def load_features(self, image):
return features.load_features(self._feature_file(image), self.config)
def _tracks_graph_file(self, filename=None):
"""Return path of tracks file"""
return os.path.join(self.data_path, filename or 'tracks.csv')
def save_tracks_graph(self, graph, filename=None):
with io.open_wt(self._tracks_graph_file(filename)) as fout:
tracking.save_tracks_graph(fout, graph)
def save_total_tracks_graph(self,graph,filename=None):
self.track_graph_of_images=graph
def invent_reference_lla(self, images=None):
#lat, lon, alt = 0.0, 0.0, 0.0
reference = {'latitude': 0.0, 'longitude': 0.0, 'altitude': 0} # Set altitude manually.
#self.save_reference_lla(reference)
return reference
def save_reference_lla(self, reference):
with io.open_wt(self._reference_lla_path()) as fout:
io.json_dump(reference, fout)
def _reference_lla_path(self):
return os.path.join(self.data_path, 'reference_lla.json')
def reference_lla_exists(self):
return os.path.isfile(self._reference_lla_path())
def _reconstruction_file(self, filename):
"""Return path of reconstruction file"""
return os.path.join(self.data_path, filename or 'reconstruction.json')
def save_reconstruction(self, reconstruction, filename=None, minify=False):
with io.open_wt(self._reconstruction_file(filename)) as fout:
io.json_dump(io.reconstructions_to_json(reconstruction), fout, minify)
def save_reconstruction_to_json(self, reconstruction):
self.reconstructions_as_json.update({'cameras':reconstruction.cameras})
self.reconstructions_as_json.update({'shots':reconstruction.shots})
self.reconstructions_as_json.update({'points':reconstruction.points})
def save_undistorted_reconstruction(self, reconstruction):
self.udata_reconstruction=reconstruction
def save_undistorted_tracks_graph(self,graph):
self.udata_track_graph=graph
def save_udata(self,udata):
self.undistorted_data=udata
def save_raw_depthmap(self, image, depthmap):
self.raw_depthmap.update({image:depthmap})
def save_raw_ply(self, image, ply):
self.raw_ply.update({image:ply})
def save_clean_depthmap(self, image, depthmap):
self.clean_depthmap.update({image:depthmap})
def save_clean_ply(self, image, ply):
self.clean_ply.update({image:ply})
def save_pruned_depthmap(self, image, depthmap):
self.pruned_depthmap.update({image:depthmap})
def save_ply_line(self, image, ply):
self.pruned_ply.update({image:ply})
def load_raw_depthmap(self, image):
o = self.raw_depthmap[image]
return o['depth'], o['plane'], o['score'], o['nghbr'], o['nghbrs']
def load_clean_depthmap(self, image):
o = self.clean_depthmap[image]
return o['depth'], o['plane'], o['score']
def load_pruned_depthmap(self, image):
o = self.pruned_depthmap[image]
if 'detections' not in o:
return o['points'], o['normals'], o['colors'], o['labels'], np.zeros(o['labels'].shape)
else:
return o['points'], o['normals'], o['colors'], o['labels'], o['detections']
def _report_path(self):
return os.path.join(self.data_path, 'reports')
def save_report(self, report_str, path):
"""Save report string to a file."""
filepath = os.path.join(self._report_path(), path)
io.mkdir_p(os.path.dirname(filepath))
with io.open_wt(filepath) as fout:
return fout.write(report_str)
def load_report(self, path):
"""Load a report file as a string."""
with io.open_rt(os.path.join(self._report_path(), path)) as fin:
return fin.read()
def profile_log(self):
#"Filename where to write timings."
return os.path.join(self.data_path, 'profile.log')
def _depthmap_path(self):
#self.depthmap_path = os.path.join(self.data_path, 'undistorted')
return os.path.join(self.data_path)
|
[
"savejwy@gmail.com"
] |
savejwy@gmail.com
|
3094fe984a24bf450f9bc8dcd44759ae5508af34
|
b96d89f0c96cca14dee4746435cfb43cf307a05e
|
/library/api/db.py
|
d4a1162a1225bb026bc69df156e3b6b28dd7064a
|
[
"MIT"
] |
permissive
|
SuperTucano/TcloudServer
|
60b4b20ba1f67d3717dea7343744d18df7c3aa73
|
3b1edab9b16b21e106650b2c433df19ce123e2cc
|
refs/heads/master
| 2023-03-19T07:54:07.688416
| 2021-03-04T02:47:33
| 2021-03-04T02:47:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,858
|
py
|
from contextlib import contextmanager
from datetime import datetime
# from flask_caching import Cache
from flask_sqlalchemy import SQLAlchemy as BaseSQLAlchemy
# from redis import StrictRedis
from library.api.exceptions import SaveObjectException
# from public_config import REDIS_HOST, REDIS_PORT, REDIS_DB, REDIS_PASSWORD
class SQLAlchemy(BaseSQLAlchemy):
@contextmanager
def auto_commit(self):
try:
yield
self.session.commit()
except Exception:
self.session.rollback()
raise SaveObjectException()
# mysql数据库
db = SQLAlchemy()
# redis
# t_redis = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, password=REDIS_PASSWORD, decode_responses=True)
# 缓存,依赖redis
# cache = Cache(
# config={
# 'CACHE_TYPE': 'redis',
# 'CACHE_REDIS_HOST': REDIS_HOST,
# 'CACHE_REDIS_PORT': REDIS_PORT,
# 'CACHE_REDIS_PASSWORD': REDIS_PASSWORD,
# 'CACHE_REDIS_DB': REDIS_DB
# })
class EntityModel(db.Model):
__abstract__ = True
__table_args__ = (
dict(
mysql_engine='InnoDB',
mysql_charset='utf8',
)
)
id = db.Column(db.Integer, primary_key=True)
creation_time = db.Column(db.DateTime, default=datetime.now)
modified_time = db.Column(db.TIMESTAMP,
nullable=False,
default=db.func.current_timestamp())
@classmethod
def gets(cls, ids):
print('***',cls.query.filter(cls.id.in_(ids)).all())
return cls.query.filter(cls.id.in_(ids)).all()
class EntityWithNameModel(EntityModel):
__abstract__ = True
name = db.Column(db.String(100), nullable=False)
def __repr__(self):
return self.name.encode('utf-8')
def __unicode__(self):
return self.name
|
[
"mobile#3"
] |
mobile#3
|
c9d7b717d2f910039ac2b9cd61e8e8c56ccd5d80
|
7cabf218f6fe777ed608c4db3597a5f1fe2c81db
|
/libs/boxes/blob.py
|
b3f165c7e6a9219264a5d82ae3b452e275029f3e
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
choheemun/FastMaskRCNN
|
6a71fde7cf46c3a2648ada3c82e6c6bd7fdba8b6
|
bd97f02bb949dba8a61040c57e1a548351478716
|
refs/heads/master
| 2022-12-02T13:19:53.093607
| 2020-08-21T17:03:07
| 2020-08-21T17:03:07
| 288,670,057
| 0
| 0
|
Apache-2.0
| 2020-08-19T07:58:57
| 2020-08-19T07:58:57
| null |
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Blob helper functions."""
import numpy as np
import cv2
from ..fast_rcnn.config import cfg
def im_list_to_blob(ims):
"""Convert a list of images into a network input.
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in xrange(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
return blob
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
if cfg.TRAIN.RANDOM_DOWNSAMPLE:
r = 0.6 + np.random.rand() * 0.4
im_scale *= r
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
return im, im_scale
|
[
"shangchong90@gmail.com"
] |
shangchong90@gmail.com
|
42c7916877fd0a005e89532588c3b3dc34e60e58
|
548ee01beaed5f8da3433c33e21d6b362394fb97
|
/Work/sadovod_7.py
|
e1ea032c9d77ec17d850ee96e39163177db2b8cc
|
[] |
no_license
|
avto727/Parser_Python
|
fb5a50f685747f24ec940be68bb99d3aa9ea6e60
|
e9a015ffda35f6bb47cf63aa792243064ec01cb1
|
refs/heads/master
| 2021-01-23T20:21:54.376540
| 2017-10-25T19:59:14
| 2017-10-25T19:59:14
| 102,853,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,860
|
py
|
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import ElementNotVisibleException
import requests
from bs4 import BeautifulSoup
import urllib.request
import csv
import json
import urllib.parse
def init_driver():
driver = webdriver.Chrome()
# driver.wait = WebDriverWait(driver, 1)
return driver
def get_html(url, page):
if page == 1:
r = requests.get(url).text
soup = BeautifulSoup(r, 'lxml')
else:
driver = init_driver()
driver.get(url)
try:
page = str(page)
button = driver.find_element_by_link_text(page)
button.click()
except TimeoutException:
print("Box or Button not found in sadovod.city")
time.sleep(5)
r = requests.get(url).text
soup = BeautifulSoup(r, 'lxml')
return soup
# Добавлено вычисление последней страницы
def write_csv(data):
with open('sadovod.csv', 'a') as f:
writer = csv.writer(f, delimiter=';', lineterminator='\n')
writer.writerow( (data['number'], data['article'], data['title'], data['price'], data['sizes'], data['color'], data['facture'], data['date'], data['url']) )
def get_total_pages(soup):
t_pages = soup.find('ul', class_='pagination').find_all('a')[-1].get('data-ci-pagination-page')
return int(t_pages)
def parse(soup):
table = soup.find('div', class_='product-list clearfix')
items = table.find_all('div', class_='product-item')
i = 1
for item in items:
number = i
try:
title = item.find('div', class_='description-container text-center').find('a').text.strip()
except:
title = ''
try:
url_g = 'https://sadovod.city' + item.find('div', class_='image-container').find('a').get('href')
except:
url_g = ''
try:
article = url_g.split('/')[4]
except:
url_g = ''
try:
price = item.find('div', class_='price-container text-right').text.strip()
except:
price = ''
# Краулер: Переходим на страницу каждого товара, чтобы получить размеры и цвета.
soup = get_html(url_g, 1)
table_2 = soup.find('div', class_='combinations')
try:
sizes = table_2.find('div', class_='select col-md-6 col-sm-9').find('select').text.strip()
except:
sizes = ''
try:
color = table_2.find('div', class_='product-colors form-group').find('select').text.strip()
except:
color = ''
try:
facture = table_2.find('div', class_='product-tkan form-group').find('p').text.strip()
except:
facture = ''
try:
pdate = table_2.find('div', class_='product-price form-group').text.strip()
date = pdate.split(':')[2].split(' ')[1]
except:
date = ''
data = {'number':number,
'article':article,
'title':title,
'price':price,
'sizes':sizes,
'color':color,
'facture':facture,
'date':date,
'url':url_g}
write_csv(data)
i += 1
def main():
url = 'https://sadovod.city/category/66'
total_pages = get_total_pages(get_html(url, 1))
for i in range(1, total_pages):
print(i)
parse(get_html(url, i))
if __name__ == '__main__':
main()
|
[
"avto727@bk.ru"
] |
avto727@bk.ru
|
5570e3a8fbc46a067126b5eb098b3395a917270a
|
72f55c6dd922f36caa22fd14df4ba081f0c647bd
|
/onnxruntime/python/tools/quantization/registry.py
|
26c7daf299e046762ce87789bcc781e3dda29862
|
[
"MIT"
] |
permissive
|
mrshu/onnxruntime
|
dd78a19d878ca2307190267bef6a8dc612f03b11
|
335edaa2c485ba0dec877bf4cdbd652e2d5d105c
|
refs/heads/master
| 2023-03-29T18:00:02.136409
| 2021-03-17T17:30:06
| 2021-03-17T17:30:06
| 348,849,243
| 0
| 0
|
MIT
| 2021-03-17T20:54:03
| 2021-03-17T20:54:02
| null |
UTF-8
|
Python
| false
| false
| 2,300
|
py
|
from .quant_utils import QuantizationMode
from .operators.base_operator import QuantOperatorBase
from .operators.qdq_base_operator import QDQOperatorBase
from .operators.matmul import MatMulInteger, QLinearMatMul
from .operators.attention import AttentionQuant
from .operators.embed_layernorm import EmbedLayerNormalizationQuant
from .operators.gather import GatherQuant
from .operators.conv import QLinearConv, ConvInteger, QDQConv
from .operators.activation import QLinearActivation, QDQRemovableActivation
from .operators.binary_op import QLinearBinaryOp
from .operators.maxpool import QMaxPool
from .operators.gavgpool import QGlobalAveragePool
from .operators.lstm import LSTMQuant
from .operators.split import QSplit
from .operators.pad import QPad
from .operators.reshape import ReshapeQuant
CommonOpsRegistry = {"Gather": GatherQuant,
"EmbedLayerNormalization": EmbedLayerNormalizationQuant,
"Reshape": ReshapeQuant}
IntegerOpsRegistry = {
"Conv": ConvInteger,
"MatMul": MatMulInteger,
"Attention": AttentionQuant,
"LSTM": LSTMQuant,
}
IntegerOpsRegistry.update(CommonOpsRegistry)
QLinearOpsRegistry = {
"Conv": QLinearConv,
"MatMul": QLinearMatMul,
"Add": QLinearBinaryOp,
"Mul": QLinearBinaryOp,
"Relu": QLinearActivation,
"Clip": QLinearActivation,
"LeakyRelu": QLinearActivation,
"Sigmoid": QLinearActivation,
"MaxPool": QMaxPool,
"GlobalAveragePool": QGlobalAveragePool,
"Split": QSplit,
"Pad": QPad,
}
QLinearOpsRegistry.update(CommonOpsRegistry)
QDQRegistry = {
"Conv": QDQConv,
"Clip": QDQRemovableActivation,
"Relu": QDQRemovableActivation,
}
def CreateDefaultOpQuantizer(onnx_quantizer, node):
return QuantOperatorBase(onnx_quantizer, node)
def CreateOpQuantizer(onnx_quantizer, node):
registry = IntegerOpsRegistry if onnx_quantizer.mode == QuantizationMode.IntegerOps else QLinearOpsRegistry
if node.op_type in registry.keys():
return registry[node.op_type](onnx_quantizer, node)
return QuantOperatorBase(onnx_quantizer, node)
def CreateQDQQuantizer(onnx_quantizer, node):
if node.op_type in QDQRegistry.keys():
return QDQRegistry[node.op_type](onnx_quantizer, node)
return QDQOperatorBase(onnx_quantizer, node)
|
[
"noreply@github.com"
] |
mrshu.noreply@github.com
|
1c60b60e915d1cbcec983ace5b6bf441fb4fd2da
|
2dbb33661981c6dd0395f94b72640373afaaec9e
|
/test_vgg16.py
|
629786b1f633a04c349e389c04082bb038f4287a
|
[] |
no_license
|
drewlinsley/causal_clicktionary
|
7dfe31e118665a2bcd6ed60559c581f2ddcafa56
|
09b1eb2439d6ad2ae32e4b121a9909f562c34423
|
refs/heads/master
| 2020-12-30T15:54:45.032415
| 2017-06-12T18:41:22
| 2017-06-12T18:41:22
| 90,798,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,628
|
py
|
import os
import time
import re
import sys
import tensorflow as tf
import numpy as np
from argparse import ArgumentParser
from ops.data_loader import inputs
from settings import config
from models import baseline_vgg16 as vgg16
from glob import glob
from ops.tf_loss import class_accuracy
# Train or finetune a vgg16 while cuing to clickme
def test_vgg16():
dbc = config.config()
validation_pointer = os.path.join(
dbc.packaged_data_path, '%s_%s.%s' % (
'validation',
dbc.packaged_data_file,
dbc.output_format))
# Prepare data on CPU
with tf.device('/cpu:0'):
val_images, val_labels, val_files = inputs(
tfrecord_file=validation_pointer,
batch_size=dbc.validation_batch,
im_size=dbc.validation_image_size,
model_input_shape=dbc.model_image_size[:2],
num_epochs=1,
data_augmentations=dbc.validation_augmentations,
shuffle_batch=True)
# Prepare pretrained model on GPU
with tf.device('/gpu:0'):
with tf.variable_scope('cnn'):
cnn = vgg16.Vgg16()
validation_mode = tf.Variable(False, name='training')
cnn.build(
val_images, output_shape=1000,
train_mode=validation_mode)
sample_layer = cnn['fc7']
accs = class_accuracy(cnn.prob, val_labels)
saver = tf.train.Saver(
tf.all_variables(), max_to_keep=10)
# Initialize the graph
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(tf.group(tf.initialize_all_variables(),
tf.initialize_local_variables()))
# Set up exemplar threading
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
saver.restore(sess, dbc.model_types['vgg16'][0])
# Start training loop
results = {
'accs': [],
'preds': [],
'labs': [],
'files': []
}
np_path = os.path.join(
dbc.checkpoint_directory, 'validation_results')
step = 0
scores, labels = [], []
try:
print 'Testing model'
while not coord.should_stop():
start_time = time.time()
score, lab, f, probs = sess.run(
[sample_layer, val_labels, val_files, cnn['prob']])
import ipdb;ipdb.set_trace()
print acc
except tf.errors.OutOfRangeError:
print 'Done testing.'
finally:
np.savez(np_path, **results)
print 'Saved to: %s' % np_path
coord.request_stop()
coord.join(threads)
sess.close()
print '%.4f%% correct' % np.mean(results['accs'])
if simulate_subjects:
sim_subs = []
print 'Simulating subjects'
scores = np.concatenate(scores)
labels = np.concatenate(results['labs'])
for sub in tqdm(range(simulate_subjects)):
it_results = {
'accs': [],
'preds': [],
'labs': [],
'files': []
}
neuron_drop = np.random.rand(scores.shape[1]) > .95
it_scores = np.copy(scores)
it_scores[:, neuron_drop] = 0
pred = svc.predict(it_scores)
acc = np.mean(pred == labels)
it_results['accs'] += [acc]
it_results['preds'] += [pred]
it_results['labs'] += [labels]
it_results['files'] += [np.concatenate(results['files'])]
sim_subs += [it_results]
np.save(np_path + '_sim_subs', sim_subs)
if __name__ == '__main__':
test_vgg16()
|
[
"drewlinsley@gmail.com"
] |
drewlinsley@gmail.com
|
0e8e463058c35e6a3e9ce2433aee64d9bacc0aaa
|
431ade33986e3294f1bf793ef1aa590b15721a84
|
/apps/members/views.py
|
079457b0f6fe3db44991925d7e773c3d004fb95f
|
[] |
no_license
|
seun-otosho/DjangoProject
|
f7332386e664058ff40df351ee08730f8d13cce2
|
6c82a0b0d4bad5a3bbfc1cb4daf222c5373427dd
|
refs/heads/master
| 2023-07-16T06:18:23.082721
| 2021-09-05T03:09:42
| 2021-09-05T03:09:42
| 402,141,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,811
|
py
|
"""Members app views"""
import json
from typing import Dict, Any
from django.forms.models import BaseModelForm
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
from django.views.decorators.http import require_http_methods
from django.views.generic import ListView, CreateView, DetailView
from django_filters.views import FilterView
from apps.members.filters import InstitutionFilter
from apps.members.forms import InstitutionCreateForm
from apps.members.models import Institution
class InstitutionListView(ListView):
model = Institution
def get_context_data(self, **kwargs: Dict[str, Any]) -> Dict[str, Any]:
return super().get_context_data(
form=InstitutionCreateForm(), filterset=InstitutionFilter, **kwargs
)
class InstitutionFilterView(FilterView):
filterset_class = InstitutionFilter
class InstitutionCreateView(CreateView):
model = Institution
form_class = InstitutionCreateForm
template_name = "members/institution_create_form.html"
def form_valid(self, form: BaseModelForm) -> HttpResponse:
institution = form.save()
response = HttpResponse()
response["HX-Trigger"] = json.dumps(
{"redirect": {"url": institution.get_absolute_url()}}
)
return response
class InstitutionDetailView(DetailView):
model = Institution
def display_institutions(request):
institutions = Institution.objects.all()
return render(request, 'display_institutions.html', {'institutions': institutions})
@require_http_methods(['DELETE'])
def delete_institution(request, id):
Institution.objects.filter(id=id).delete()
institutions = Institution.objects.all()
return render(request, 'members_list.html', {'institutions': institutions})
|
[
"toloruntotosho@gmail.com"
] |
toloruntotosho@gmail.com
|
800168913aa297a0b123c6eb142f54bc47d5751d
|
bc263109d4ef4b5e6ff54f406c97156dea155558
|
/hash-tables/missing-number-two.py
|
198ebe45baad93079deb53910fbdb517b652817f
|
[] |
no_license
|
ipeksargin/data-structures-algorithms
|
0be048780f60174675d477b421d7cb2f8c376a56
|
73f4b519d849c44de2886e21776507d0e32349c3
|
refs/heads/master
| 2021-01-15T01:56:07.448018
| 2020-05-09T21:12:48
| 2020-05-09T21:12:48
| 242,839,908
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
#Time complexity is equal to O(n) (sorting function is ignored)
arrayOne = [7,2,5,4,6,3,5,3]
arrayTwo = [7,2,5,3,5,3]
def missingNumber(arrayOne, arrayTwo):
table = {}
#table is a dictionary and is created to store values of arrayOne.
for i in range(len(arrayOne)):
# if value already in the dictionary.
if(arrayOne[i] in table):
table[arrayOne[i]] = table[arrayOne[i]] + 1 # increase its value by one.
else:
table[arrayOne[i]] = 1 # else add it to dict and have value of one.
#for each value in arrayTwo decrease dict value count by 1
for j in range(len(arrayTwo)):
if(arrayTwo[j] in table):
table[arrayTwo[j]] = table[arrayTwo[j]] - 1
print(table)
# Check dict by keys to get missing numbers
keys = table.keys()
final = []
for x in keys:
if(table[x] > 0):
final.append(x)
#sort missing values
final.sort()
# Therefore, the ones whose value is more than one are missing values.
print("Missing elements are",final)
missingNumber(arrayOne, arrayTwo)
|
[
"noreply@github.com"
] |
ipeksargin.noreply@github.com
|
fbf27b9ff9f266361e7be470de1a3b47aba8c336
|
e925d35110a2a7ff8579f97eb673f24f98bf63c7
|
/misc_tools.py
|
1595c4b310d93894b52d65a73e4ee5e2c0175c12
|
[] |
no_license
|
SiweiGong/mobile_robot_framework
|
b0918bba16cb2898a0f69f4e854c6a4fe3c6e0d6
|
aca7e56152069a970a07288178d6be998b1f7938
|
refs/heads/master
| 2020-12-02T17:27:45.146574
| 2020-02-01T08:37:20
| 2020-02-01T08:37:20
| 231,074,491
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,468
|
py
|
import math
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
def angle_diff(angle1, angle2):
return np.arctan2(np.sin(angle1-angle2), np.cos(angle1-angle2))
def error_ellipse(position, sigma):
covariance = sigma[0:2,0:2]
eigenvals, eigenvecs = np.linalg.eig(covariance)
#get largest eigenvalue and eigenvector
max_ind = np.argmax(eigenvals)
max_eigvec = eigenvecs[:,max_ind]
max_eigval = eigenvals[max_ind]
#get smallest eigenvalue and eigenvector
min_ind = 0
if max_ind == 0:
min_ind = 1
min_eigvec = eigenvecs[:,min_ind]
min_eigval = eigenvals[min_ind]
#chi-square value for sigma confidence interval
chisquare_scale = 2.2789
#calculate width and height of confidence ellipse
width = 2 * np.sqrt(chisquare_scale*max_eigval)
height = 2 * np.sqrt(chisquare_scale*min_eigval)
angle = np.arctan2(max_eigvec[1],max_eigvec[0])
#generate covariance ellipse
error_ellipse = Ellipse(xy=[position[0],position[1]], width=width, height=height, angle=angle/np.pi*180)
error_ellipse.set_alpha(0.25)
return error_ellipse
def plot_state(particles, landmarks):
# Visualizes the state of the particle filter.
#
# Displays the particle cloud, mean position and
# estimated mean landmark positions and covariances.
draw_mean_landmark_poses = False
map_limits = [-1, 12, 0, 10]
#particle positions
xs = []
ys = []
#landmark mean positions
lxs = []
lys = []
for particle in particles:
xs.append(particle['x'])
ys.append(particle['y'])
for i in range(len(landmarks)):
landmark = particle['landmarks'][i+1]
lxs.append(landmark['mu'][0])
lys.append(landmark['mu'][1])
# ground truth landmark positions
lx=[]
ly=[]
for i in range (len(landmarks)):
lx.append(landmarks[i+1][0])
ly.append(landmarks[i+1][1])
# best particle
estimated = best_particle(particles)
robot_x = estimated['x']
robot_y = estimated['y']
robot_theta = estimated['theta']
# estimated traveled path of best particle
hist = estimated['history']
hx = []
hy = []
for pos in hist:
hx.append(pos[0])
hy.append(pos[1])
# plot filter state
plt.clf()
#particles
plt.plot(xs, ys, 'r.')
if draw_mean_landmark_poses:
# estimated mean landmark positions of each particle
plt.plot(lxs, lys, 'b.')
# estimated traveled path of best particle
plt.plot(hx, hy, 'r-')
# true landmark positions
plt.plot(lx, ly, 'b+',markersize=10)
# draw error ellipse of estimated landmark positions of best particle
for i in range(len(landmarks)):
landmark = estimated['landmarks'][i+1]
ellipse = error_ellipse(landmark['mu'], landmark['sigma'])
plt.gca().add_artist(ellipse)
# draw pose of best particle
plt.quiver(robot_x, robot_y, np.cos(robot_theta), np.sin(robot_theta), angles='xy',scale_units='xy')
plt.axis(map_limits)
plt.pause(0.01)
def best_particle(particles):
#find particle with highest weight
highest_weight = 0
best_particle = None
for particle in particles:
if particle['weight'] > highest_weight:
best_particle = particle
highest_weight = particle['weight']
return best_particle
|
[
"gongsiwei1993@gmail.com"
] |
gongsiwei1993@gmail.com
|
91c7da877e877547098bf3d946bea2e2c5591e98
|
76f456fd719363695384347e88e482f3c3e6b756
|
/site_scons/site_tools/compilation_db.py
|
be040a84d0e71cdfab2d3b8ae157f773f5bb6751
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
Silentd00m/lz4_stream
|
11425866735d97cd259f3fbd19cde04b8c93dcb1
|
dd7eaf580338bb15502b34987672eec87bbfd6a6
|
refs/heads/master
| 2021-09-01T12:52:28.308433
| 2017-12-27T03:31:32
| 2017-12-27T03:31:32
| 115,476,783
| 0
| 0
| null | 2017-12-27T03:20:48
| 2017-12-27T03:20:47
| null |
UTF-8
|
Python
| false
| false
| 5,702
|
py
|
# Copyright 2015 MongoDB Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import SCons
import itertools
# Implements the ability for SCons to emit a compilation database for the MongoDB project. See
# http://clang.llvm.org/docs/JSONCompilationDatabase.html for details on what a compilation
# database is, and why you might want one. The only user visible entry point here is
# 'env.CompilationDatabase'. This method takes an optional 'target' to name the file that
# should hold the compilation database, otherwise, the file defaults to compile_commands.json,
# which is the name that most clang tools search for by default.
# TODO: Is there a better way to do this than this global? Right now this exists so that the
# emitter we add can record all of the things it emits, so that the scanner for the top level
# compilation database can access the complete list, and also so that the writer has easy
# access to write all of the files. But it seems clunky. How can the emitter and the scanner
# communicate more gracefully?
__COMPILATION_DB_ENTRIES=[]
# We make no effort to avoid rebuilding the entries. Someday, perhaps we could and even
# integrate with the cache, but there doesn't seem to be much call for it.
class __CompilationDbNode(SCons.Node.Node):
def __init__(self):
SCons.Node.Node.__init__(self)
self.Decider(changed_since_last_build_node)
def changed_since_last_build_node(node, target, prev_ni):
return True
def makeEmitCompilationDbEntry(comstr):
user_action = SCons.Action.Action(comstr)
def EmitCompilationDbEntry(target, source, env):
dbtarget = __CompilationDbNode()
entry = env.__COMPILATIONDB_Entry(
target=dbtarget,
source=[],
__COMPILATIONDB_UTARGET=target,
__COMPILATIONDB_USOURCE=source,
__COMPILATIONDB_UACTION=user_action,
__COMPILATIONDB_ENV=env)
# TODO: Technically, these next two lines should not be required: it should be fine to
# cache the entries. However, they don't seem to update properly. Since they are quick
# to re-generate disable caching and sidestep this problem.
env.AlwaysBuild(entry)
env.NoCache(entry)
__COMPILATION_DB_ENTRIES.append(dbtarget)
return target, source
return EmitCompilationDbEntry
def CompilationDbEntryAction(target, source, env, **kw):
command = env['__COMPILATIONDB_UACTION'].strfunction(
target=env['__COMPILATIONDB_UTARGET'],
source=env['__COMPILATIONDB_USOURCE'],
env=env['__COMPILATIONDB_ENV'],)
entry = {
"directory": env.Dir('#').abspath,
"command": command,
"file": str(env['__COMPILATIONDB_USOURCE'][0])
}
setattr(target[0], '__COMPILATION_DB_ENTRY', entry)
def WriteCompilationDb(target, source, env):
entries = []
for s in __COMPILATION_DB_ENTRIES:
entries.append(getattr(s, '__COMPILATION_DB_ENTRY'))
with open(str(target[0]), 'w') as target_file:
json.dump(entries, target_file,
sort_keys=True,
indent=4,
separators=(',', ': '))
def ScanCompilationDb(node, env, path):
return __COMPILATION_DB_ENTRIES
def generate(env, **kwargs):
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
# TODO: Is there a way to obtain the configured suffixes for C and C++
# from the existing obj builders? Seems unfortunate to re-iterate them.
CSuffixes = ['.c']
CXXSuffixes = ['.cc', '.cxx', '.cpp']
env['COMPILATIONDB_COMSTR'] = kwargs.get(
'COMPILATIONDB_COMSTR', 'Building compilation database $TARGET')
components_by_suffix = itertools.chain(
itertools.product(CSuffixes, [
(static_obj, SCons.Defaults.StaticObjectEmitter, '$CCCOM'),
(shared_obj, SCons.Defaults.SharedObjectEmitter, '$SHCCCOM'),
]),
itertools.product(CXXSuffixes, [
(static_obj, SCons.Defaults.StaticObjectEmitter, '$CXXCOM'),
(shared_obj, SCons.Defaults.SharedObjectEmitter, '$SHCXXCOM'),
]),
)
for entry in components_by_suffix:
suffix = entry[0]
builder, base_emitter, command = entry[1]
builder.add_emitter(
suffix, SCons.Builder.ListEmitter(
[
makeEmitCompilationDbEntry(command),
base_emitter,
]
))
env['BUILDERS']['__COMPILATIONDB_Entry'] = SCons.Builder.Builder(
action=SCons.Action.Action(CompilationDbEntryAction, None),
)
env['BUILDERS']['__COMPILATIONDB_Database'] = SCons.Builder.Builder(
action=SCons.Action.Action(WriteCompilationDb, "$COMPILATIONDB_COMSTR"),
target_scanner=SCons.Scanner.Scanner(
function=ScanCompilationDb,
node_class=None)
)
def CompilationDatabase(env, target):
result = env.__COMPILATIONDB_Database(
target=target,
source=[])
env.AlwaysBuild(result)
env.NoCache(result)
return result
env.AddMethod(CompilationDatabase, 'CompilationDatabase')
def exists(env):
return True
|
[
"laudrup@stacktrace.dk"
] |
laudrup@stacktrace.dk
|
28c7ad7cfbee7406630c4df8f7f5f21f5dc1b433
|
e3a2b6499a8f094f18366b1183448925728a206f
|
/test/integration/simio_tables.py
|
bc0aac20c23eb6999b98017749cdb60f02922194
|
[
"MIT"
] |
permissive
|
mielgosez/simio-lisa
|
01b394ab7ce333ad3b1d72a5ea9b1a518a253c2f
|
c0eb8d4e2a44e9cf4ed7bd7af4ab5cbff17daebc
|
refs/heads/main
| 2023-08-16T01:47:15.605192
| 2021-09-30T16:22:13
| 2021-09-30T16:22:13
| 400,244,116
| 0
| 0
|
MIT
| 2021-09-30T16:22:35
| 2021-08-26T16:58:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,199
|
py
|
import os
from simio_lisa.simio_tables import SimioTables
def test_smoke_output_tables():
env_project_path = os.environ['SIMIOPROJECTPATH']
env_project_file = os.environ['SIMIOPROJECTNAME']
env_model_name = os.environ['MODELNAME']
env_export_dir = os.environ['EXPORTDIR']
if not os.path.exists(env_export_dir):
os.mkdir(env_export_dir)
simio_tables = SimioTables(path_to_project=env_project_path,
model_file_name=env_project_file,
model_name=env_model_name)
simio_tables.load_output_tables()
for table_name, table_df in simio_tables.output_tables.items():
print(os.path.join(env_export_dir, f'{table_name}.csv'))
try:
for col_name, col_type in table_df.dtypes.items():
if col_type.name == 'datetime64[ns]':
table_df[col_name] = table_df[col_name].dt.strftime('%d-%m-%Y %X')
table_df.to_csv(os.path.join(env_export_dir, f'{table_name}.csv'), index=False, decimal='.')
except AttributeError:
print("This was empty")
assert True
def test_smoke_input_tables():
env_project_path = os.environ['SIMIOPROJECTPATH']
env_project_file = os.environ['SIMIOPROJECTNAME']
env_model_name = os.environ['MODELNAME']
env_export_dir = os.environ['EXPORTDIR']
if not os.path.exists(env_export_dir):
os.mkdir(env_export_dir)
simio_tables = SimioTables(path_to_project=env_project_path,
model_file_name=env_project_file,
model_name=env_model_name)
simio_tables.load_input_tables()
for table_name, table_df in simio_tables.input_tables.items():
print(os.path.join(env_export_dir, f'{table_name}.csv'))
try:
for col_name, col_type in table_df.dtypes.items():
if col_type.name == 'datetime64[ns]':
table_df[col_name] = table_df[col_name].dt.strftime('%d-%m-%Y %X')
table_df.to_csv(os.path.join(env_export_dir, f'{table_name}.csv'), index=False, decimal='.')
except AttributeError:
print("This was empty")
assert True
|
[
"mielgosez@gmail.com"
] |
mielgosez@gmail.com
|
0cdbf24a9061d6ee5c34d89b548326daf6223a2b
|
1446727878165dbce3fa447922f5ba7de93ca700
|
/rewrite_handler/parseFiles.py
|
83289ed600942a5dbf77088a80541fc9231f5f56
|
[] |
no_license
|
ssrg-vt/sail2pvs
|
3df218d17ad8e7a0043ade0f37be0642bcf780cf
|
7d0c19440f375fd7bffee651254e5e20370f1bd6
|
refs/heads/master
| 2020-05-09T08:30:02.595080
| 2019-04-12T11:34:04
| 2019-04-12T11:34:04
| 180,994,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,687
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import copy
from . import util
from . import parseDatatype
from . import parseFunction
from typing import List, Tuple
S_STATE, S_DATATYPE = range(2)
function_dict = {}
type_dict = {}
type_dict['list'] = (['T'], {'cons': '[T,list[T]]', 'null': ''})
file_dict = {}
def gen_deps(dep_dict):
mod_list = list(dep_dict.keys())
mod_num = len(mod_list)
module_list = []
while len(module_list) < mod_num:
for mod in mod_list:
dep_list = list(dep_dict[mod])
if not dep_list and mod not in module_list:
module_list.append(mod)
dep_dict[mod] = util.diff_list(dep_list, module_list)
return module_list
def gen_dep_for_src_files(module_dict, src_data, lib_name):
modules = list(module_dict.keys())
new_mod_dict = copy.deepcopy(module_dict)
file_list = []
dep_dict = {}
for module in modules:
dep_dict[module] = []
src = src_data[module]
modules = re.findall(util.importPat, src)
for line in modules:
module_name = line.strip()
if module_name in list(new_mod_dict.keys()):
dep_dict[module].append(module_name)
module_list = gen_deps(dep_dict)
for mod in module_list:
file_list.append(new_mod_dict[mod])
file_dict[lib_name] = file_list
# Check whether the library path is valid
# If so, generate dependency relations between different files under the library
def gen_lib_deps(lib_path: str):
lib_name = lib_path.split('/')[-1]
if not os.path.isdir(lib_path):
print('There does not exist such library named ' + lib_name)
return
src_data = {}
module_dict = {}
files = [f for f in os.listdir(lib_path) if f.endswith('.pvs') and not f.endswith('_adt.pvs')]
for file_name in files:
theory_name = file_name.split('.')[0]
module_dict[theory_name] = file_name
with open(os.path.join(lib_path, file_name)) as f:
src_data[theory_name] = f.read()
gen_dep_for_src_files(module_dict, src_data, lib_name)
def parse_file(file_path):
updated_content = ''
with open(file_path) as f:
lines = f.readlines()
var_dict = {}
type_dec = []
state = S_STATE
for line in lines:
if line.startswith('%') or 'LEMMA' in line:
pass
elif state == S_STATE:
if util.varPat.search(line.replace(' ', '')):
updated_content += line
decs = line.split(':')
variables = decs[0].replace(' ', '').split(',')
util.logger.debug('declarations:')
util.logger.debug(line)
vtype = decs[1].split("VAR")[1]
for v in variables:
var_dict[v] = vtype.replace(' ', '')
elif 'DATATYPE' in line:
updated_content += line
type_dec.clear()
type_dec.append(line)
state = S_DATATYPE
elif ':TYPE=' in line.replace(' ', '') or ':TYPE+=' in line.replace(' ', '') or ':TYPE\n' in line.replace(' ', '') or (': THEORY' in line and ']' in line) or (util.constPat.search(line) and '->' not in line and 'THEORY' not in line and 'LAMBDA' not in line and not line.endswith(',\n') and '(#' not in line):
pass
elif (util.pvsDefPat.search(line) or util.constPat.search(line)) and ':' in line and '=' in line:
func_name, arg_list = parseFunction.process_line(var_dict, line)
function_dict[func_name] = arg_list
elif state == S_DATATYPE:
to_continue = True
if 'END ' in line:
name, (tvs, contents) = parseDatatype.parse_datatype(var_dict, type_dec)
type_dict[name] = (tvs, contents)
state = S_STATE
to_continue = False
if to_continue:
type_dec.append(line)
def infer_exp_type(exp, arg_list):
exp = exp.strip()
for arg in arg_list:
# If the expression is already in the argument type list
if exp == arg[util.u_name]:
return arg[util.u_annotation]
if util.functionAppPat.search(exp):
func_name = exp.split('(')[0].split('[')[0]
if func_name in function_dict.keys():
func_arg_list = function_dict[func_name]
ret_type = func_arg_list[-1][1]
return ret_type
elif exp.startswith('(') and exp.endswith(')'):
return '[' + exp.split('(', 1)[1].rsplit(')', 1)[0].strip() + ']'
return None
# Simple type inference
# exp: the expression that requires type information
# arg_list: current mapping from variables to their types
# output: type (tuple, function, datatype, or other), the annotation of type, datatype name (if it is a datatype)
def exp_type_string(exp, arg_list):
exp_type = infer_exp_type(exp, arg_list)
if not exp_type:
return None, None, None
exp_type_split = util.split_sep_bks(exp_type, '->')
if len(exp_type_split) > 1:
return util.t_function, exp_type, None
elif exp_type.startswith('['):
return util.t_tuple, exp_type, None
else:
e_name = exp_type.split('[')[0].strip()
if e_name in type_dict.keys():
return util.t_type, exp_type, e_name
return None, None, None
# Return the type variable and fields information of a datatype
def lookup_fields(dt_name: str) -> List[Tuple[str, str]]:
return type_dict[dt_name]
|
[
"xxan15@vt.edu"
] |
xxan15@vt.edu
|
4d6e1be9f6135ef80a446ec3e15a01ac3270fa2e
|
639130874cfa5ea1a696c8561b35e65f8d8c7778
|
/yeti/core/entities/incident.py
|
ff49cd1205433fab39e0d914fb4a063875568080
|
[
"Apache-2.0"
] |
permissive
|
yeti-platform/TibetanBrownBear
|
1e40ff7de97141f31f022cf4542a17fb64a0814c
|
902db77e0f9dce7b8870ed653a8f7670864d146d
|
refs/heads/master
| 2023-02-22T14:52:37.030298
| 2022-10-28T15:52:30
| 2022-10-28T15:52:30
| 114,285,124
| 11
| 11
|
Apache-2.0
| 2023-02-16T06:10:46
| 2017-12-14T18:49:32
|
Python
|
UTF-8
|
Python
| false
| false
| 832
|
py
|
"""Detail Yeti's incident object structure."""
from stix2 import CustomObject, properties
from .entity import Entity
@CustomObject('x-incident', [
('x_internal_references', properties.ListProperty(properties.StringProperty)),
('name', properties.StringProperty()),
('description', properties.StringProperty()),
])
class StixIncident():
_collection_name = 'entities'
type = 'x-incident'
@property
def internal_references(self):
return self._stix_object.internal_references
class Incident(Entity):
"""Incident Yeti object."""
_collection_name = 'entities'
type = 'x-incident'
@property
def name(self):
return self._stix_object.name
@property
def description(self):
return self._stix_object.description
Entity.datatypes[Incident.type] = Incident
|
[
"tomchop@gmail.com"
] |
tomchop@gmail.com
|
62f1abdae4762392bf5b391e0b0155daf648a940
|
d181c2d529d0e01359c9a7f4d3d2de97bcb9ab42
|
/tutorial/spiders/nyu_spider.py
|
1266efb77a5e9d7bf72a53ab028a88141f60faf3
|
[] |
no_license
|
gerardoxia/crawMyprof
|
6c2ce98aecbf41c22e2c041a078a00f7080ab841
|
979e8dea99c16e25c656e503ef0228211c3a5f9a
|
refs/heads/master
| 2021-01-10T11:20:13.137789
| 2016-04-02T19:18:36
| 2016-04-02T19:18:36
| 53,879,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 907
|
py
|
import scrapy
import re
from tutorial.items import ProfessorItem
class nyu(scrapy.Spider):
name = "nyu"
start_urls = [
"http://www.cs.nyu.edu/dynamic/people/faculty/",
]
def parse(self, response):
for index in range(1,50):
str1=".//*[@id='wrap']/div/div/div[2]/div/div/div[3]/ul/li[%d]" % index
sel=response.xpath(str1)
item = ProfessorItem()
if(sel.css(".name a::text")):
item['name']=sel.css(".name a::text").extract()[0]
item['url']=sel.css(".name a::attr('href')").extract()[0]
else:
item['name']=sel.css(".name::text").extract()[0]
item['title']=sel.css(".title::text").extract()[0]
item['img']="http://www.cs.nyu.edu"+sel.css("img::attr('src')").extract()[0]
item['email']=sel.css(".info").re(r"Email:(.*)<br>")[0]
item['area']=sel.css(".info").re(r"<br> \n(.*?)\n")[1].strip()
yield item
|
[
"gerardo.xia@outlook.com"
] |
gerardo.xia@outlook.com
|
b3d228c100561deccceefc1d80a27d2f3d80c0f2
|
5362ab432a03da3951d77d5c829ae952790f7f23
|
/algm_682.py
|
bd3519ad5fa295d690f0400691e40c3c930d3c83
|
[] |
no_license
|
Xiweiy/leet
|
5db77f9d8684476160a5f6940f211735745f1007
|
77e2693275bda9a206671ec1d2679910a9fb0fda
|
refs/heads/master
| 2021-01-23T14:04:56.812917
| 2018-04-30T00:42:26
| 2018-04-30T00:42:26
| 58,281,993
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
class Solution(object):
def calPoints(self, ops):
history = []
total = 0
for i in ops:
try:
history.append(int(i))
except:
if i=="C":
history.pop()
elif i=="+":
history.append(sum(history[-2:]))
else:
history.append(history[-1]*2)
return sum(history)
|
[
"xiweiyan@Siddharths-MacBook-Pro.local"
] |
xiweiyan@Siddharths-MacBook-Pro.local
|
bc11f57f9885605837ee9220756d23631744cd5f
|
877e698447f4371b5d43420fd281ecf60520f660
|
/pyproject/student_grade.py
|
792907e34abb269980b097c19ac239a5e93ae262
|
[] |
no_license
|
ankitm4/Contribute-
|
20e1069ae272adc387ba5601f534b4b95ea3c93c
|
4cfb5de30bfa1803ca156b139a359339f980ecb3
|
refs/heads/main
| 2023-08-16T05:34:17.718113
| 2021-10-07T12:58:39
| 2021-10-07T12:58:39
| 412,424,071
| 0
| 3
| null | 2021-10-05T16:12:05
| 2021-10-01T10:33:19
|
Python
|
UTF-8
|
Python
| false
| false
| 703
|
py
|
english = float(input(" Please enter English Marks: "))
math = float(input(" Please enter Math score: "))
computers = float(input(" Please enter Computer Marks: "))
physics = float(input(" Please enter Physics Marks: "))
chemistry = float(input(" Please enter Chemistry Marks: "))
total = english + math + computers + physics + chemistry
percentage = (total / 500) * 100
print("Total Marks = %.2f" %total)
print("Marks Percentage = %.2f" %percentage)
if(percentage >= 90):
print("A Grade")
elif(percentage >= 80):
print("B Grade")
elif(percentage >= 70):
print("C Grade")
elif(percentage >= 60):
print("D Grade")
elif(percentage >= 40):
print("E Grade")
else:
print("Fail")
|
[
"sabbugiri50@gmail.com"
] |
sabbugiri50@gmail.com
|
d30464daed017260d2c1af1c80a769545555f9dd
|
66cff6c4ad4c5fd6ecdfb723614f0475e27a5b38
|
/akshare/obor/cons.py
|
7193fe79b7a18b2fb8aed9523a6bb5d9526bab10
|
[
"MIT"
] |
permissive
|
ifzz/akshare
|
a862501b314f2b5aeab22af86771dbeee34cfdb8
|
70cf20680b580c8bacab55a0b7d792d06e299628
|
refs/heads/master
| 2022-12-02T18:36:33.754645
| 2020-08-24T05:16:42
| 2020-08-24T05:16:42
| 289,834,570
| 1
| 0
|
MIT
| 2020-08-24T05:17:09
| 2020-08-24T05:17:09
| null |
UTF-8
|
Python
| false
| false
| 8,220
|
py
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2019/9/30 13:58
Desc: 一带一路配置文件
"""
countries_dict = {
'obor_report_all': [
'中国',
'蒙古国',
'韩国',
'新西兰',
'东帝汶',
'新加坡',
'马来西亚',
'泰国',
'印度尼西亚',
'菲律宾',
'文莱',
'柬埔寨',
'缅甸',
'老挝',
'越南',
'哈萨克斯坦',
'乌兹别克斯坦',
'土库曼斯坦',
'塔吉克斯坦',
'吉尔吉斯斯坦',
'格鲁吉亚',
'阿塞拜疆',
'亚美尼亚',
'伊朗',
'伊拉克',
'土耳其',
'叙利亚',
'约旦',
'黎巴嫩',
'以色列',
'巴勒斯坦',
'沙特阿拉伯',
'也门',
'阿曼',
'阿联酋',
'卡塔尔',
'科威特',
'巴林',
'印度',
'巴基斯坦',
'孟加拉',
'阿富汗',
'斯里兰卡',
'马尔代夫',
'尼泊尔',
'不丹',
'俄罗斯',
'乌克兰',
'白俄罗斯',
'摩尔多瓦',
'波兰',
'立陶宛',
'爱沙尼亚',
'拉脱维亚',
'捷克',
'斯洛伐克',
'匈牙利',
'斯洛文尼亚',
'克罗地亚',
'波黑',
'黑山',
'塞尔维亚',
'阿尔巴尼亚',
'罗马尼亚',
'保加利亚',
'马其顿',
'南非',
'摩洛哥',
'埃塞俄比亚',
'马达加斯加',
'巴拿马',
'埃及'],
'obor_report_plates_countries': {
'亚洲大洋洲地区': [
'中国',
'蒙古国',
'韩国',
'新西兰',
'东帝汶',
'新加坡',
'马来西亚',
'泰国',
'印度尼西亚',
'菲律宾',
'文莱',
'柬埔寨',
'缅甸',
'老挝',
'越南'],
'中亚地区': [
'哈萨克斯坦',
'乌兹别克斯坦',
'土库曼斯坦',
'塔吉克斯坦',
'吉尔吉斯斯坦'],
'西亚地区': [
'格鲁吉亚',
'阿塞拜疆',
'亚美尼亚',
'伊朗',
'伊拉克',
'土耳其',
'叙利亚',
'约旦',
'黎巴嫩',
'以色列',
'巴勒斯坦',
'沙特阿拉伯',
'也门',
'阿曼',
'阿联酋',
'卡塔尔',
'科威特',
'巴林'],
'南亚地区': [
'印度',
'巴基斯坦',
'孟加拉',
'阿富汗',
'斯里兰卡',
'马尔代夫',
'尼泊尔',
'不丹'],
'东欧地区': [
'俄罗斯',
'乌克兰',
'白俄罗斯',
'摩尔多瓦',
'波兰',
'立陶宛',
'爱沙尼亚',
'拉脱维亚',
'捷克',
'斯洛伐克',
'匈牙利',
'斯洛文尼亚',
'克罗地亚',
'波黑',
'黑山',
'塞尔维亚',
'阿尔巴尼亚',
'罗马尼亚',
'保加利亚',
'马其顿'],
'非洲及拉美地区': [
'南非',
'摩洛哥',
'埃塞俄比亚',
'马达加斯加',
'巴拿马',
'埃及']},
'invest_countries_index_list': [
'中国',
'丹麦',
'乌克兰',
'乌干达',
'以色列',
'伊拉克',
'俄罗斯',
'保加利亚',
'克罗地亚',
'冰岛',
'加拿大',
'匈牙利',
'南非',
'博茨瓦纳',
'卡塔尔',
'卢旺达',
'卢森堡',
'印度',
'印度尼西亚',
'厄瓜多尔',
'台湾',
'哈萨克斯坦',
'哥伦比亚',
'哥斯达黎加',
'土耳其',
'坦桑尼亚',
'埃及',
'塞尔维亚',
'塞浦路斯',
'墨西哥',
'奥地利',
'委内瑞拉',
'孟加拉国',
'尼日利亚',
'巴勒斯坦领土',
'巴基斯坦',
'巴林',
'巴西',
'希腊',
'德国',
'意大利',
'拉脱维亚',
'挪威',
'捷克',
'摩洛哥',
'斯洛伐克',
'斯洛文尼亚',
'斯里兰卡',
'新加坡',
'新西兰',
'日本',
'智利',
'比利时',
'毛里求斯',
'沙特阿拉伯',
'法国',
'波兰',
'波黑',
'泰国',
'津巴布韦',
'澳大利亚',
'爱尔兰',
'爱沙尼亚',
'牙买加',
'瑞典',
'瑞士',
'科威特',
'科特迪亚',
'秘鲁',
'突尼斯',
'立陶宛',
'约旦',
'纳米比亚',
'罗马尼亚',
'美国',
'肯尼亚',
'芬兰',
'英国',
'荷兰',
'菲律宾',
'葡萄牙',
'蒙古',
'西班牙',
'赞比亚',
'越南',
'阿拉伯联合酋长国',
'阿曼',
'阿根廷',
'韩国',
'香港',
'马尔他',
'马拉维',
'马来西亚',
'黎巴嫩',
'黑山'
],
'report_map_invest': {
"中国": "中国",
"蒙古国": "蒙古",
"韩国": "韩国",
"新西兰": "新西兰",
"东帝汶": "",
"新加坡": "新加坡",
"马来西亚": "马来西亚",
"泰国": "泰国",
"印度尼西亚": "印度尼西亚",
"菲律宾": "菲律宾",
"文莱": "",
"柬埔寨": "",
"缅甸": "",
"老挝": "",
"越南": "越南",
"哈萨克斯坦": "哈萨克斯坦",
"乌兹别克斯坦": "",
"土库曼斯坦": "",
"塔吉克斯坦": "",
"吉尔吉斯斯坦": "",
"格鲁吉亚": "",
"阿塞拜疆": "",
"亚美尼亚": "",
"伊朗": "",
"伊拉克": "伊拉克",
"土耳其": "土耳其",
"叙利亚": "",
"约旦": "约旦",
"黎巴嫩": "黎巴嫩",
"以色列": "以色列",
"巴勒斯坦": "巴勒斯坦领土",
"沙特阿拉伯": "沙特阿拉伯",
"也门": "",
"阿曼": "阿曼",
"阿联酋": "阿拉伯联合酋长国",
"卡塔尔": "卡塔尔",
"科威特": "科威特",
"巴林": "巴林",
"印度": "印度",
"巴基斯坦": "巴基斯坦",
"孟加拉": "孟加拉国",
"阿富汗": "",
"斯里兰卡": "斯里兰卡",
"马尔代夫": "",
"尼泊尔": "",
"不丹": "",
"俄罗斯": "俄罗斯",
"乌克兰": "乌克兰",
"白俄罗斯": "",
"摩尔多瓦": "",
"波兰": "波兰",
"立陶宛": "立陶宛",
"爱沙尼亚": "爱沙尼亚",
"拉脱维亚": "拉脱维亚",
"捷克": "捷克",
"斯洛伐克": "斯洛伐克",
"匈牙利": "匈牙利",
"斯洛文尼亚": "斯洛文尼亚",
"克罗地亚": "克罗地亚",
"波黑": "波黑",
"黑山": "黑山",
"塞尔维亚": "塞尔维亚",
"阿尔巴尼亚": "",
"罗马尼亚": "罗马尼亚",
"保加利亚": "保加利亚",
"马其顿": "",
"南非": "南非",
"摩洛哥": "摩洛哥",
"埃塞俄比亚": "",
"马达加斯加": "",
"巴拿马": "",
"埃及": "埃及"
}
}
|
[
"jindaxiang@163.com"
] |
jindaxiang@163.com
|
663db3904d257f8945a91a4d06debc05572251ec
|
da622b0923c94e9fa6e52269492fb41b0c43df33
|
/climate_app.py
|
b78aaa69d0d438fbe06b3c47952c83e0369603f3
|
[] |
no_license
|
ronbfernandez/sqlalchemy-challenge
|
e7a00935600ca8fd4fe6d3dbc800f908e7f139ad
|
2f3d6187db218fb6c169dc2d4f3f9ea8deadca1a
|
refs/heads/main
| 2023-04-07T01:52:30.754464
| 2021-04-16T19:27:33
| 2021-04-16T19:27:33
| 357,422,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,847
|
py
|
######################
# Import dependencies
######################
import pandas as pandas
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
import datetime as dt
#Import Flask
from flask import Flask, redirect, jsonify
#######################
# Database Setup
#######################
# Create connection the sqllite
engine = create_engine("sqlite:///hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
#######################
# Flask Setup
#######################
app = Flask(__name__)
########################
# Flask Routes
########################
""" * Home page.
* List all routes that are available."""
@app.route("/")
def home():
print("Server received request for 'Home' page.")
return ("Welcome to the Surfs Up Weather API!<br><br>"
f"Available Routes:<br>"
f"/api/v1.0/precipitation<br>"
f"/api/v1.0/Station<br>"
f"/api/v1.0/tobs<br>"
f"/api/v1.0/(Y-M-D)<br>"
f"/api/v1.0(start=Y-M-D)/(end=Y-M-D)<br>"
)
""" * Convert the query results to a Dictionary using date as the key and prcp as the value.
* Return the JSON representation of your dictonary."""
@app.route("/api/v1.0/precipitation")
def precipitation():
# Query all Measurements
results = session.query(Measurement).all()
#Close the Query
session.close()
# Create a dictionary using 'date' as the key and 'prcp' as the value.
year_prcp = []
for result in results:
year_prcp_dict = {}
year_prcp_dict["date"] = result.date
year_prcp_dict["prcp"] = result.prcp
year_prcp.append(year_prcp_dict)
# Jsonify summary
return jsonify(year_prcp)
""" * Return a JSON list of stations from the dataset."""
@app.route("/api/v1.0/Station")
def stations():
"""Return a list of all station names"""
# Query all stations
results = session.query(Station.station).all()
# Close the Query
session.close()
# Convert list of tuples into normal list
all_station = list(np.ravel(results))
# Jsonify summary
return jsonify(all_station)
""" * Query the dates and temperature observations of the most active station for the last year of data.
* Return a JSON list of Temperature Observations (tobs) for the previous year."""
@app.route("/api/v1.0/tobs")
def temperature():
# Find last date in database then subtract one year
Last_Year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
# Query temperature observations
temperature_results = session.query(Measurement.tobs).filter(Measurement.date > Last_Year).all()
# Close the Query
session.close()
# Convert list of tuples into normal list
temperature_list = list(np.ravel(temperature_results))
# Jsonify summary
return jsonify(temperature_list)
""" * Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
* When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.
* When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive."""
@app.route("/api/v1.0/<start>")
def single_date(start):
# Set up for user to enter date
Start_Date = dt.datetime.strptime(start,"%Y-%m-%d")
#Query Min, Max, and Avg based on date
summary_stats = session.query(func.min(Measurement.tobs),func.max(Measurement.tobs),func.round(func.avg(Measurement.tobs))).\
filter(Measurement.date >= Start_Date).all()
#Close the Query
session.close()
summary = list(np.ravel(summary_stats))
# Jsonify summary
return jsonify(summary)
# Same as above with the inclusion of an end date
@app.route("/api/v1.0/<start>/<end>")
def trip_dates(start,end):
# Setup for user to enter dates
Start_Date = dt.datetime.strptime(start, "%Y-%m-%d")
End_Date = dt.datetime.strptime(end,"%Y-%m-%d")
# Query Min, Max, and Avg based on dates
summary_stats = session.query(func.min(Measurement.tobs),func.max(Measurement.tobs),func.round(func.avg(Measurement.tobs))).\
filter(Measurement.date.between(Start_Date,End_Date)).all()
# Close the Query
session.close()
summary = list(np.ravel(summary_stats))
# Jsonify summary
return jsonify(summary)
if __name__ == "__main__":
app.run(debug=True)
|
[
"ronbfernandez@gmail.com"
] |
ronbfernandez@gmail.com
|
b208c49f7a5f1a04b28d5a11c3bd6d1d73cd42b2
|
3cd5e6706a7638bdc72750bfde977c4292e32d6f
|
/ch4/mysite/migrations/0003_product.py
|
9fa04dbd7d41b371effecca17126a39408dbfb3c
|
[] |
no_license
|
kairulin/Study-Django
|
149a86227ae4179da3026186ae634c3167b621c6
|
b478f96c349eb64286f05d2d567e510141c7575f
|
refs/heads/master
| 2022-09-30T06:40:41.687626
| 2019-07-14T14:17:31
| 2019-07-14T14:17:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
# Generated by Django 2.2.2 on 2019-07-01 09:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mysite', '0002_auto_20190701_1716'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sku', models.CharField(max_length=5)),
('name', models.CharField(max_length=20)),
('price', models.PositiveIntegerField()),
('size', models.CharField(choices=[('S', 'Smaill'), ('M', 'Medium'), ('L', 'Large')], max_length=1)),
],
),
]
|
[
"10646033@ntub.edu.tw"
] |
10646033@ntub.edu.tw
|
77b6e017c7e358defb524581e88e75f906ae18f7
|
a097ecf40fee329cfa9e3f77e4b6e9e29a8f148a
|
/20_section/20_2.py
|
a6f1bce3a75f2d2201078b7309e5b1b92534cbb6
|
[] |
no_license
|
FumihisaKobayashi/The_self_taught_python
|
1e7008b17050db3e615c2f3aa68df2edc7f93192
|
329d376689029b75da73a6f98715cc7e83e8cc2c
|
refs/heads/master
| 2021-01-06T16:04:13.382955
| 2020-07-28T14:39:24
| 2020-07-28T14:39:24
| 241,389,313
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
import urllib.request
from bs4 import BeautifulSoup
class Scraper:
def __init__(self, site):
self.site = site
def scrape(self):
r = urllib.request.urlopen(self.site)
html = r.read()
#unrlopen関数はWEBサイトのリクエストを行う。
#response.readメソッドを呼ぶとHTMLデータが、Responseオブジェクトから返す。
|
[
"ruruku717@gmial.com"
] |
ruruku717@gmial.com
|
80d15851e50886a0953750ccc365cfa788a5dc34
|
dfac2f15641a99278ae2e8549362f9fe05cbd75b
|
/PSET 2/paying-Fixed.py
|
67c9849655c8e4950d81f76fd7ba6417fd9330fa
|
[] |
no_license
|
Srishruthik/MIT-6.001x
|
a9dc5cde6be91902a65dee486c955f370dba56ee
|
47731e6fc3937c0ff199928f15c534b11507d0ea
|
refs/heads/master
| 2022-12-12T07:16:18.052947
| 2020-09-08T22:40:19
| 2020-09-08T22:40:19
| 281,755,489
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
balance = 320000
annualInterestRate = 0.2
current_balance = balance
monthlyinterest = annualInterestRate / 12.0
lowerBound = balance / 12
upperBound = balance * ((1 + (annualInterestRate / 12.0)) ** 12) / 12
while abs(balance) >= 0.01:
balance = current_balance
current_payment = (lowerBound + upperBound)/2.0
for i in range(12):
balance = balance - current_payment
balance += balance * monthlyinterest
if balance < 0:
upperBound = current_payment
elif balance > 0:
lowerBound = current_payment
else:
break
print("Lowest Payment: " + str(round(current_payment,2)))
|
[
"68618564+Srishruthik@users.noreply.github.com"
] |
68618564+Srishruthik@users.noreply.github.com
|
b7e07d3962e41d243b3037ca5f0f2db1acfe6d78
|
cc53c3d4df1feea254c44defac74fb5e86d8bbd8
|
/schedule/schedule/tasks.py
|
1aab91a1cafe8fa417b1f043b9127394e799f323
|
[] |
no_license
|
Maxbey/schedule-backend
|
2fd82ea7627ec7e7671b58425100c07bf3924245
|
5013520ea79a7a3aab5b1d2dddbe55dff380a133
|
refs/heads/develop
| 2020-04-05T09:16:48.377689
| 2017-10-05T13:54:06
| 2017-10-05T13:54:06
| 81,668,153
| 0
| 0
| null | 2017-10-05T13:54:07
| 2017-02-11T17:13:30
|
Python
|
UTF-8
|
Python
| false
| false
| 287
|
py
|
from datetime import datetime
from celery import shared_task
from .builder import ScheduleBuilder
@shared_task
def build_schedule(date, term_length):
date_instance = datetime.strptime(date, '%Y-%m-%d')
builder = ScheduleBuilder()
builder.build(date_instance, term_length)
|
[
"mbeiner@crystalnix.com"
] |
mbeiner@crystalnix.com
|
5b7111b5f571a5291d0d691459eb3795303df796
|
55b57d64ec547869835334318f3059fbb507558c
|
/Fred2/Data/pssms/tepitopepan/mat/DRB1_1491_9.py
|
529f9822dcb55f2f590e83101fb86aceb8378059
|
[
"BSD-3-Clause"
] |
permissive
|
FRED-2/Fred2
|
9845f6678d4011cb746c7a5a6f283eea68077a02
|
b3e54c8c4ed12b780b61f74672e9667245a7bb78
|
refs/heads/master
| 2021-07-12T05:05:54.515427
| 2020-05-25T06:56:25
| 2020-05-25T06:56:25
| 16,275,425
| 42
| 35
| null | 2021-07-07T12:05:11
| 2014-01-27T10:08:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,175
|
py
|
DRB1_1491_9 = {0: {'A': -999.0, 'E': -999.0, 'D': -999.0, 'G': -999.0, 'F': -0.98558, 'I': -0.014418, 'H': -999.0, 'K': -999.0, 'M': -0.014418, 'L': -0.014418, 'N': -999.0, 'Q': -999.0, 'P': -999.0, 'S': -999.0, 'R': -999.0, 'T': -999.0, 'W': -0.98558, 'V': -0.014418, 'Y': -0.98558}, 1: {'A': 0.0, 'E': 0.1, 'D': -1.3, 'G': 0.5, 'F': 0.8, 'I': 1.1, 'H': 0.8, 'K': 1.1, 'M': 1.1, 'L': 1.0, 'N': 0.8, 'Q': 1.2, 'P': -0.5, 'S': -0.3, 'R': 2.2, 'T': 0.0, 'W': -0.1, 'V': 2.1, 'Y': 0.9}, 2: {'A': 0.0, 'E': -1.2, 'D': -1.3, 'G': 0.2, 'F': 0.8, 'I': 1.5, 'H': 0.2, 'K': 0.0, 'M': 1.4, 'L': 1.0, 'N': 0.5, 'Q': 0.0, 'P': 0.3, 'S': 0.2, 'R': 0.7, 'T': 0.0, 'W': 0.0, 'V': 0.5, 'Y': 0.8}, 3: {'A': 0.0, 'E': -1.413, 'D': -0.59541, 'G': -1.0962, 'F': 0.089501, 'I': 0.63965, 'H': -0.27703, 'K': -0.40522, 'M': 0.75757, 'L': 0.73443, 'N': 0.051289, 'Q': -0.2262, 'P': -1.4623, 'S': -0.34089, 'R': -0.59039, 'T': -0.76496, 'W': -0.25713, 'V': 0.17454, 'Y': -0.54047}, 4: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 5: {'A': 0.0, 'E': -1.4124, 'D': -2.3809, 'G': -0.70879, 'F': -1.3948, 'I': 0.68888, 'H': -0.11719, 'K': 1.2505, 'M': -0.90135, 'L': 0.18327, 'N': -0.57408, 'Q': -0.31604, 'P': 0.49269, 'S': -0.085018, 'R': 0.9563, 'T': 0.81302, 'W': -1.3936, 'V': 1.1943, 'Y': -1.3991}, 6: {'A': 0.0, 'E': -0.79277, 'D': -1.2459, 'G': -0.7096, 'F': -0.15733, 'I': 0.066354, 'H': -0.47376, 'K': -0.82466, 'M': 0.67126, 'L': 0.33385, 'N': 0.0045172, 'Q': -0.361, 'P': -0.45654, 'S': -0.19575, 'R': -0.74293, 'T': -0.43948, 'W': -0.75274, 'V': -0.18667, 'Y': -0.43394}, 7: {'A': 0.0, 'E': 0.0, 'D': 0.0, 'G': 0.0, 'F': 0.0, 'I': 0.0, 'H': 0.0, 'K': 0.0, 'M': 0.0, 'L': 0.0, 'N': 0.0, 'Q': 0.0, 'P': 0.0, 'S': 0.0, 'R': 0.0, 'T': 0.0, 'W': 0.0, 'V': 0.0, 'Y': 0.0}, 8: {'A': 0.0, 'E': -0.90493, 'D': -1.0724, 'G': -0.2193, 'F': 0.057698, 'I': 0.22715, 'H': -0.2517, 'K': -0.30896, 'M': 0.44032, 'L': -0.40372, 'N': -0.9256, 'Q': 0.057376, 'P': -0.75109, 'S': 0.87213, 'R': -0.19283, 'T': -0.76762, 'W': -0.99887, 'V': -0.12038, 'Y': -0.27794}}
|
[
"schubert@informatik.uni-tuebingen.de"
] |
schubert@informatik.uni-tuebingen.de
|
1bb9536caf5d22ac9551ca39916696001cf2e8f3
|
c32a96bb06f46d4519f6db0ef6f4051491f007d3
|
/day_8/puzzle.py
|
eba7f668934f4d1d0d6a974915ee431ff5e72507
|
[] |
no_license
|
noMad1717/aoc_2020
|
ae01594a2b5a8483ad637957d4f8de1f449dfbae
|
4741eb74990f370c0b993ee94decacd6cfda6973
|
refs/heads/master
| 2023-02-10T06:05:55.718854
| 2020-12-30T19:02:36
| 2020-12-30T19:02:36
| 317,627,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
"""
Advent of Code 2020 - Day 8
"""
import util
def parseInstruction(inst):
parts = inst.split()
return parts[0], int(parts[1])
def executeInstruction(action, offset, nextInst, acc):
if action == 'nop':
return acc, nextInst + 1
elif action == 'jmp':
return acc, nextInst + offset
else:
return acc + offset, nextInst + 1
def partOne(data):
executed = []
acc = nextInst = 0
while nextInst not in executed:
executed.append(nextInst)
action, offset = parseInstruction(data[nextInst])
acc, nextInst = executeInstruction(action, offset, nextInst, acc)
return acc
def partTwo(data):
executed = []
acc = prevAcc = nextInst = prevInst = 0
changed = False
while nextInst < len(data):
executed.append(nextInst)
action, offset = parseInstruction(data[nextInst])
if action != 'acc' and changed == False:
prevAcc, prevInst, changed = acc, nextInst, True
action = 'nop' if action == 'jmp' else 'jmp'
acc, nextInst = executeInstruction(action, offset, nextInst, acc)
if nextInst in executed:
changed = False
executed = executed[0:executed.index(prevInst)]
action, offset = parseInstruction(data[prevInst])
acc, nextInst = executeInstruction(action, offset, prevInst, prevAcc)
return acc
bootCode = util.fileToStringList('input')
print('Part one: Acc = %d!' % partOne(bootCode))
print('Part two: Acc = %d!' % partTwo(bootCode))
|
[
"erik.jonsson17@gmail.com"
] |
erik.jonsson17@gmail.com
|
91cec64cdb46a995121bc7bb0615b910cd8f8f32
|
dda3bece634321edb4cfb6483c95eac83ca7cf35
|
/lutin_ege.py
|
7d34eb2a4b8f1534b292a524f901a4d08860f381
|
[
"Apache-2.0"
] |
permissive
|
BlenderCN-Org/ege
|
6a6bf47b5f1b5365f28320ef24541f1f6fdf111d
|
73558e63ec485e9e45015bf46ee4302f58fc8ebc
|
refs/heads/master
| 2020-05-23T01:11:27.473726
| 2016-10-23T13:02:20
| 2016-10-26T21:47:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,106
|
py
|
#!/usr/bin/python
import lutin.debug as debug
import lutin.tools as tools
def get_type():
return "LIBRARY"
def get_desc():
return "Ewol Game engine (based on bullet lib)"
def get_licence():
return "APACHE-2"
def get_compagny_type():
return "com"
def get_compagny_name():
return "atria-soft"
def get_maintainer():
return "authors.txt"
def get_version():
return "version.txt"
def configure(target, my_module):
my_module.add_src_file([
'ege/debug.cpp',
'ege/AudioElement.cpp',
'ege/AudioEngine.cpp',
'ege/camera/Camera.cpp',
'ege/camera/View.cpp',
'ege/camera/FPS.cpp',
'ege/CollisionShapeCreator.cpp',
'ege/physics/Engine.cpp',
'ege/elements/Element.cpp',
'ege/elements/ElementBase.cpp',
'ege/elements/ElementPhysic.cpp',
'ege/Particule.cpp',
'ege/ParticuleEngine.cpp',
'ege/ParticuleSimple.cpp',
'ege/widget/Mesh.cpp',
'ege/widget/Scene.cpp',
'ege/Environement.cpp',
'ege/resource/Mesh.cpp',
'ege/resource/MeshEmf.cpp',
'ege/resource/MeshGird.cpp',
'ege/resource/MeshCube.cpp',
'ege/resource/MeshObj.cpp',
'ege/resource/ParticuleMesh.cpp',
'ege/resource/tools/icoSphere.cpp',
'ege/resource/tools/isoSphere.cpp',
'ege/resource/tools/viewBox.cpp',
'ege/Light.cpp',
'ege/Material.cpp',
'ege/physicsShape/PhysicsShape.cpp',
'ege/physicsShape/PhysicsBox.cpp',
'ege/physicsShape/PhysicsCapsule.cpp',
'ege/physicsShape/PhysicsCone.cpp',
'ege/physicsShape/PhysicsConvexHull.cpp',
'ege/physicsShape/PhysicsCylinder.cpp',
'ege/physicsShape/PhysicsSphere.cpp',
'ege/Ray.cpp',
])
my_module.copy_path('data/ParticuleMesh.*')
my_module.add_depend(['ewol', 'bullet-physics'])
my_module.add_flag('c++', [
'-Wno-write-strings',
'-Wmissing-field-initializers',
'-Wall'])
my_module.add_header_file([
'ege/debug.hpp',
'ege/AudioElement.hpp',
'ege/AudioEngine.hpp',
'ege/camera/Camera.hpp',
'ege/camera/View.hpp',
'ege/camera/FPS.hpp',
'ege/CollisionShapeCreator.hpp',
'ege/physics/Engine.hpp',
'ege/elements/Element.hpp',
'ege/elements/ElementBase.hpp',
'ege/elements/ElementPhysic.hpp',
'ege/Particule.hpp',
'ege/ParticuleEngine.hpp',
'ege/ParticuleSimple.hpp',
'ege/widget/Mesh.hpp',
'ege/widget/Scene.hpp',
'ege/Environement.hpp',
'ege/resource/Mesh.hpp',
'ege/resource/ParticuleMesh.hpp',
'ege/resource/tools/icoSphere.hpp',
'ege/resource/tools/isoSphere.hpp',
'ege/resource/tools/viewBox.hpp',
'ege/resource/tools/Face.hpp',
'ege/resource/tools/FaceIndexing.hpp',
'ege/Light.hpp',
'ege/Material.hpp',
'ege/physicsShape/PhysicsShape.hpp',
'ege/physicsShape/PhysicsBox.hpp',
'ege/physicsShape/PhysicsCapsule.hpp',
'ege/physicsShape/PhysicsCone.hpp',
'ege/physicsShape/PhysicsConvexHull.hpp',
'ege/physicsShape/PhysicsCylinder.hpp',
'ege/physicsShape/PhysicsSphere.hpp',
'ege/Ray.hpp',
])
my_module.add_path(".")
return True
|
[
"yui.heero@gmail.com"
] |
yui.heero@gmail.com
|
653f76c90b37e1c287509ee72476693d05375c91
|
ceb8047b5c18a558eccf983ab6ac0b4340040f84
|
/OpenCV_Python/pillow_crop.py
|
80619d24cd5adcfad3053ba148e0fb97e634bef5
|
[] |
no_license
|
ksehan89/LinuxGUIEx
|
9f8b97d86d88308db84f8ebf2185434d92670927
|
9728712cc70cceac5707bf31b081aa32f0a29f10
|
refs/heads/master
| 2020-07-16T11:12:10.673977
| 2019-09-09T09:28:39
| 2019-09-09T09:28:39
| 205,778,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
from PIL import Image
# image open
im = Image.open('data/lena.jpg')
cropImage = im.crop((100, 100, 350, 350))
cropImage.show()
# save image to as .JPG
cropImage.save('data/lena-crop.jpg')
|
[
"ksehan89@naver.com"
] |
ksehan89@naver.com
|
dda3cb403939ba5b2472fd6fcdc1003cb2dbcb42
|
4410bb34f5d210892fbdf225df2932892ad1d5c7
|
/params.py
|
6c4cda21c13bb572e96346821905b93fb3d8d983
|
[] |
no_license
|
hotpepper/CLION
|
5a3e45cdd6b5dc06362187c30fc13c4de5e531ff
|
8c2795d32edf55024cb8d87a5a93cd0444004034
|
refs/heads/master
| 2021-01-19T05:33:12.331720
| 2019-11-07T22:14:08
| 2019-11-07T22:14:08
| 87,434,876
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
from collections import defaultdict
__author__ = 'SHostetter'
# set up base globals
DB_HOST = # database host
DB_NAME = # Database
WORKING_SCHEMA = 'working' # 'working'
FINAL_SCHEMA = 'public'
ARCHIVE_SCHEMA = 'archive'
LION = 'lion'
NODE = 'node'
RPL = 'tbl_rpl'
RPL_TXT = 'RPL.txt'
VERSION = '18d' # '15b'
PRECINCTS = 'districts_police_precincts'
BOROUGHS = 'districts_boroughs'
HIGHWAYS = True
SRID = 2263
FOLDER = # working folder
# global dictionaries
def st_name_factory():
return [set(), 0]
nodeStreetNames = defaultdict(st_name_factory) # {node: [{street names}, masterid}
nodeIsIntersection = {} # {node: True or False}
nodeNextSteps = defaultdict(lambda: defaultdict(set)) # {node: {street: fromNode, toNode}}
segmentBlocks = {} # {segmentID: fromMaster, toMaster}
nodeMaster = {} # {node: masterid}
masterNodes = defaultdict(list) # {masterid: [nodeid, nodeid, ...]}
clusterIntersections = defaultdict(st_name_factory) # {sorted-street-names: [set([nodes]), masterID]}
mfts = []
coordFromMaster = {} # {master: [x,y]
# minor datastores - can be deleted after use?
streetSet = []
mft1Dict = defaultdict(list) # mft: [segmentid, segmentid]
altGraph = defaultdict(list)
|
[
"aasere@gmail.com"
] |
aasere@gmail.com
|
600bcfc54ec52a7e9ed018b6588b6f80e757078a
|
ec4eebc7be09002d1128d695b0b8eccb4296286c
|
/templates/library_project/hooks/post_gen_project.py
|
e75e0aa687d118230ce7f0f7979c63964ccebf40
|
[
"MIT"
] |
permissive
|
cmstead/python-automation
|
b11288b85b641aec0c12e32ff6dceabb85e13617
|
4238ebf76b2e0ef62e34f360c47447a775264b73
|
refs/heads/master
| 2023-03-27T13:09:11.244132
| 2021-04-02T22:18:13
| 2021-04-02T22:18:13
| 295,536,736
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53
|
py
|
from os import system
system("pipenv install --dev")
|
[
"cmstead@gmail.com"
] |
cmstead@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.