blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
19f79b9b91ce7137e633b48e72d90d58347c037b
|
fa6f5f76ba65ce885a5b7bf41e81295a117e8822
|
/login/views.py
|
279f6a6c31d0f2deb44ec69f6bf54758835d0cde
|
[] |
no_license
|
royce1121/login-page
|
0f2bd462dc859a803589e29b05f2434cd684f9ff
|
6d5a440adbdb62bc4e9bbacfd7ab1ad8f0c686cd
|
refs/heads/master
| 2023-03-02T22:26:41.093143
| 2021-02-16T04:58:28
| 2021-02-16T04:58:28
| 339,283,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,655
|
py
|
from .forms import BaseAccountForm
from .forms import FirstStepForm
from .forms import SecondStepForm
from .forms import ThirdStepForm
from .forms import UserForm
from .models import BaseAccountModel
from braces.views import AjaxResponseMixin
from braces.views import JsonRequestResponseMixin
from django.contrib import messages
from django.contrib.auth import login
from django.contrib.auth import logout
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.shortcuts import render
from django.urls import reverse
from django.utils.translation import gettext as _
from django.views.generic import CreateView
from django.views.generic import FormView
from django.views.generic import ListView
from django.views.generic import TemplateView
from django.views.generic import UpdateView
from django.views.generic import View
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
class ViewsDispatcher(View):
def dispatch(self, request, *args, **kwargs):
user = self.request.user
basic_info = BaseAccountModel.objects.filter(
user=user
).first()
if basic_info:
if not basic_info.is_data_complete():
if basic_info.is_step_two_complete():
return redirect(reverse(
'steps_3',
))
if basic_info.is_step_one_complete():
return redirect(reverse(
'steps_2',
))
else:
return redirect(reverse(
'steps',
))
return super().dispatch(request, *args, **kwargs)
class StartPageView(FormView):
template_name = "index.html"
form_class = UserForm
def dispatch(self, request, *args, **kwargs):
if self.request.user.is_authenticated:
return redirect(reverse(
'landing_page',
))
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(
StartPageView, self
).get_context_data(**kwargs)
form = UserForm()
context.update({
'login_type': 'Login',
'form': form,
})
return context
def post(self, request, *args, **kwargs):
username = self.request.POST.get('username')
password = self.request.POST.get('password')
login_type = self.request.POST.get('login_type')
form = UserForm(
data=self.request.POST,
)
if login_type == 'Register':
if form.is_valid():
user = User.objects.create_user(username,None,password)
login(request, user)
else:
return render(
request,
'index.html',
{
'form': form,
'login_type': 'Register'
}
)
else:
user = authenticate(username=username, password=password)
if user:
login(request, user)
else:
messages.error(self.request, _('User not existing in the database.'))
return render(
request,
'index.html',
{
'form': form,
'login_type': 'Login'
}
)
return redirect(reverse(
'landing_page',
))
class LandingPageView(
LoginRequiredMixin,
FormView,
ViewsDispatcher,
):
template_name = "landing_page.html"
login_url = 'start_page'
form_class = BaseAccountForm
def get_object(self):
basic_account = BaseAccountModel.objects.filter(user=self.request.user).first()
return basic_account
def get_form_kwargs(self, **kwargs):
extra_kwargs = super(LandingPageView, self).get_form_kwargs(**kwargs)
extra_kwargs.update({
'user': self.request.user,
'instance': self.get_object(),
})
return extra_kwargs
def get_context_data(self, **kwargs):
context = super(
LandingPageView, self
).get_context_data(**kwargs)
context.update({
'object': self.get_object(),
})
return context
def post(self, request, *args, **kwargs):
logout(request)
return redirect(reverse(
'start_page',
))
class StepPageView(
LoginRequiredMixin,
FormView,
):
template_name = "steps.html"
form_class = FirstStepForm
prev_page = None
next_page = 'steps_2'
def dispatch(self, request, *args, **kwargs):
user = self.request.user
basic_info = BaseAccountModel.objects.filter(
user=user
).first()
if basic_info:
if basic_info.is_data_complete():
return redirect(reverse(
'landing_page',
))
return super().dispatch(request, *args, **kwargs)
def get_object(self):
basic_account = BaseAccountModel.objects.filter(user=self.request.user)
if basic_account:
return basic_account.first()
else:
return None
def get_form_kwargs(self, **kwargs):
extra_kwargs = super(StepPageView, self).get_form_kwargs(**kwargs)
extra_kwargs.update({
'user': self.request.user,
'instance': self.get_object(),
})
return extra_kwargs
def get_context_data(self, **kwargs):
context = super(
StepPageView, self
).get_context_data(**kwargs)
steps = [
{
'active': True,
'url': reverse('steps'),
'text': 'First Step',
},
]
if self.get_object():
if self.get_object().is_step_one_complete():
url = reverse('steps_2')
active = True
else:
url = ''
active = False
steps.append(
{
'active': active,
'url': url,
'text': 'Second Step',
}
)
if self.get_object().is_step_two_complete():
url = reverse('steps_3')
active = True
else:
url = ''
active = False
steps.append(
{
'active': active,
'url': url,
'text': 'Third Step',
}
)
else:
steps.append(
{
'active': False,
'url': '',
'text': 'Second Step',
}
)
steps.append(
{
'active': False,
'url': '',
'text': 'Third Step',
}
)
context.update({
'prev_page_url': self.prev_page,
'steps': steps,
})
return context
def post(self, request, *args, **kwargs):
if 'next' in self.request.POST:
form = self.form_class(
instance=self.get_object(),
data=self.request.POST,
user=self.request.user,
)
if form.is_valid():
form.save()
if 'logout' in self.request.POST:
logout(request)
return redirect(reverse(
'start_page',
))
return redirect(self.get_success_url())
def get_success_url(self):
return reverse(
self.next_page,
)
class SecondStepPageView(
StepPageView,
):
form_class = SecondStepForm
prev_page = 'steps'
next_page = 'steps_3'
def dispatch(self, request, *args, **kwargs):
user = self.request.user
basic_info = BaseAccountModel.objects.filter(
user=user
).first()
if basic_info:
if not basic_info.is_step_one_complete():
return redirect(reverse(
'steps',
))
return super(SecondStepPageView, self).dispatch(request, *args, **kwargs)
class ThirdStepPageView(
StepPageView,
):
form_class = ThirdStepForm
prev_page = 'steps_2'
next_page = 'landing_page'
def dispatch(self, request, *args, **kwargs):
user = self.request.user
basic_info = BaseAccountModel.objects.filter(
user=user
).first()
if basic_info:
if not basic_info.is_step_two_complete():
return redirect(reverse(
'steps_2',
))
return super(ThirdStepPageView, self).dispatch(request, *args, **kwargs)
class AccountDataAjax(
JsonRequestResponseMixin,
AjaxResponseMixin,
View,
):
def get_ajax(self, request, *args, **kwargs):
pk = request.GET.get('pk', None)
user_data = BaseAccountModel.objects.get(pk=pk)
data = {
'f_name': user_data.first_name,
'l_name': user_data.last_name,
'adrress': user_data.address,
'gender': user_data.gender,
'date_of_birth': user_data.date_of_birth,
'email': user_data.email,
}
return self.render_json_response(data)
def post_ajax(self, request, *args, **kwargs):
user = self.request.user
basic_info = BaseAccountModel.objects.filter(
user=user
).first()
if basic_info:
form = BaseAccountForm(
data=self.request.POST,
instance=basic_info,
user=user,
)
else:
form = BaseAccountForm(
data=self.request.POST,
user=user,
)
if form.is_valid():
person_data = form.save()
return self.render_json_response({
"status": "OK",
"success": False,
'name': person_data.full_name(),
'adrress': person_data.address,
'gender': person_data.get_gender_display(),
'date_of_birth': person_data.date_of_birth.strftime("%b. %d, %Y"),
'email': person_data.email,
})
else:
error_dict = form.errors.as_json()
return self.render_json_response({
"status": "OK",
"success": False,
"message": error_dict
})
|
[
"royce@test.com"
] |
royce@test.com
|
053e43676a2bf4488dd298cc5c303b57165e7714
|
251f998a2feae210d573ebd489c066f81be3d1e1
|
/test/functional/rpc_invalidateblock.py
|
b4ce401a99f58832818d0e83cdc1a9aca06297e2
|
[
"MIT"
] |
permissive
|
Yash02012/bitkincoin
|
93a7f4aa8781da0b3fa811416733af6aaa99550d
|
dcfd8575e03ffe654e16cabb5ff6daabae7e24ab
|
refs/heads/master
| 2022-01-06T09:51:36.114441
| 2019-05-09T10:19:23
| 2019-05-09T10:19:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,937
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitkincoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the invalidateblock RPC."""
import time
from test_framework.test_framework import BitkincoinTestFramework
from test_framework.util import assert_equal, connect_nodes_bi, sync_blocks
class InvalidateTest(BitkincoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
self.log.info("Mine 4 blocks on Node 0")
self.nodes[0].generate(4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
self.log.info("Mine competing 6 blocks on Node 1")
self.nodes[1].generate(6)
assert(self.nodes[1].getblockcount() == 6)
self.log.info("Connect nodes to force a reorg")
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
self.log.info("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
self.log.info("Make sure we won't reorg to a lower work chain:")
connect_nodes_bi(self.nodes,1,2)
self.log.info("Sync node 2 to node 1 so both have 6 blocks")
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
self.log.info("Invalidate block 5 on node 1 so its tip is now at 4")
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
self.log.info("Invalidate block 3 on node 2, so its tip is now 2")
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
self.log.info("..and then mine a block")
self.nodes[2].generate(1)
self.log.info("Verify all nodes are at the right height")
time.sleep(5)
assert_equal(self.nodes[2].getblockcount(), 3)
assert_equal(self.nodes[0].getblockcount(), 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
|
[
"josephdisuja08@gmail.com"
] |
josephdisuja08@gmail.com
|
68aabc2c27d0560ddba1c52b5fc0655252d361dc
|
7021151239ac415b16799fc81b7ff6d905a108eb
|
/arrus.py
|
5d564e0ebfa9a9812a408e057b194b380931c3b5
|
[] |
no_license
|
mahmoudzeyada/problemsolving-leanring
|
e6e9798b3010df3b45384fa4eddc2e36ecd4db4c
|
4e66f301494855183c793fff9903dd93a1e3c329
|
refs/heads/master
| 2020-04-22T20:58:54.201887
| 2019-05-07T16:03:31
| 2019-05-07T16:03:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 494
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the hourglassSum function below.
def hourglassSum(arr):
subset = []
temp = []
for i in range(4):
for j in range(4):
temp = arr[i][j:j+3]
temp.append(arr[i+1][j+8], arr[i+2][j:j+3])
print(sum(temp))
return(max(subset))
if __name__ == '__main__':
arr = []
for _ in range(6):
arr.append(list(map(int, input().rstrip().split())))
|
[
"mahmoudzeyada440@gmail.com"
] |
mahmoudzeyada440@gmail.com
|
0ddabb1bbb824ae1a30fc43b16800b8d22816ba1
|
dafba3fc1e816dbe18c8ec402f5d8108da75ddd7
|
/eas/api/tests/int/test_secret_santa.py
|
d2095983f4c509cdbfdfbb479c543ee6e06d89b9
|
[] |
no_license
|
etcaterva/eas-backend
|
5a76c6545504078968928f3c5715e1da162d96a8
|
cc47396c5b8b0a8bfea17ccb7f605ed43a79b76d
|
refs/heads/master
| 2023-05-04T03:41:24.973929
| 2023-02-03T21:55:40
| 2023-02-04T07:55:49
| 139,360,853
| 0
| 1
| null | 2023-04-21T22:37:27
| 2018-07-01T20:43:49
|
Python
|
UTF-8
|
Python
| false
| false
| 9,568
|
py
|
import datetime as dt
from unittest import mock
import freezegun
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APILiveServerTestCase
from eas.api import models
NOW = dt.datetime.now()
class SecretSantaTest(APILiveServerTestCase):
def setUp(self):
self.list_url = reverse("secret-santa-list")
self.client.default_format = "json"
self.secret_santa_data = {
"language": "en",
"participants": [
{"name": "First Name", "email": "email@address1.com"},
{"name": "Second Name", "email": "email@address2.com"},
{"name": "Third Name", "email": "email@address2.com"},
],
}
boto_patcher = mock.patch("eas.api.amazonsqs.boto3")
self.sqs = boto_patcher.start().client.return_value
self.addCleanup(boto_patcher.stop)
def test_create_secret_santa(self):
response = self.client.post(self.list_url, self.secret_santa_data)
self.assertEqual(
response.status_code, status.HTTP_201_CREATED, response.content
)
assert response.json() == {"id": mock.ANY}
def test_create_with_exclusions(self):
self.secret_santa_data = {
"language": "en",
"participants": [
{
"name": "First Name",
"email": "email@address1.com",
"exclusions": ["Third Name"],
},
{
"name": "Second Name",
"email": "email@address2.com",
"exclusions": ["First Name"],
},
{
"name": "Third Name",
"email": "email@address2.com",
"exclusions": ["Second Name"],
},
],
}
response = self.client.post(self.list_url, self.secret_santa_data)
self.assertEqual(
response.status_code, status.HTTP_201_CREATED, response.content
)
results = list(models.SecretSantaResult.objects.all())
assert len(results) == 3
print(results)
assert any(
r.source == "First Name" and r.target == "Second Name" for r in results
)
assert any(
r.source == "Second Name" and r.target == "Third Name" for r in results
)
assert any(
r.source == "Third Name" and r.target == "First Name" for r in results
)
def test_create_impossible(self):
self.secret_santa_data = {
"language": "en",
"participants": [
{
"name": "First Name",
"email": "email@address1.com",
"exclusions": ["Third Name"],
},
{
"name": "Second Name",
"email": "email@address2.com",
"exclusions": ["Third Name"],
},
{
"name": "Third Name",
"email": "email@address2.com",
"exclusions": ["Second Name"],
},
],
}
response = self.client.post(self.list_url, self.secret_santa_data)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, response.content
)
assert response.json() == {
"general": [{"message": "Unable to match participants", "code": "invalid"}]
}
def test_retrieve(self):
result = models.SecretSantaResult(source="From name", target="To Name")
result.save()
url = reverse("secret-santa-detail", kwargs=dict(pk=result.id))
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
self.assertEqual(response.data, {"source": "From name", "target": "To Name"})
def test_missing_fields(self):
secret_santa_data = {}
response = self.client.post(self.list_url, secret_santa_data)
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, response.content
)
assert response.json() == {
"schema": {
"participants": [
{"message": "This field is required.", "code": "required"}
],
"language": [
{"message": "This field is required.", "code": "required"}
],
}
}
def test_fecth_secret_santa_admin(self):
# Create draw
response = self.client.post(self.list_url, self.secret_santa_data)
self.assertEqual(
response.status_code, status.HTTP_201_CREATED, response.content
)
draw_id = response.json()["id"]
# Fetch admin
response = self.client.get(
reverse("secret-santa-admin", kwargs=dict(pk=draw_id))
)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = response.json()
assert "id" in result
assert "created_at" in result
original_participants = {
p["name"] for p in self.secret_santa_data["participants"]
}
returned_participants = {
p["name"]: p["revealed"] for p in result["participants"]
}
assert set(returned_participants) == original_participants
assert not any(returned_participants.values())
assert result["participants"][0]["id"]
# Fetch one result
draw = models.SecretSanta.objects.get(pk=result["id"])
draw_result = models.SecretSantaResult.objects.filter(draw=draw).all()[0]
response = self.client.get(
reverse("secret-santa-detail", kwargs=dict(pk=draw_result.id))
)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
fetched_participant = response.json()["source"]
# Fetch admin
response = self.client.get(
reverse("secret-santa-admin", kwargs=dict(pk=draw_id))
).json()
returned_participants = {
p["name"]: p["revealed"] for p in response["participants"]
}
assert len([1 for x in returned_participants.values() if x])
assert returned_participants[fetched_participant]
def test_resend_email_success(self):
draw = models.SecretSanta()
draw.save()
result = models.SecretSantaResult(
source="From name", target="To Name", draw=draw
)
result.save()
assert self.sqs.send_message.call_count == 0
url = reverse(
"secret-santa-resend-email",
kwargs=dict(draw_pk=draw.id, result_pk=result.id),
)
with freezegun.freeze_time(NOW + dt.timedelta(days=3)):
response = self.client.post(
url, {"language": "en", "email": "mail@mail.com"}
)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
assert self.sqs.send_message.call_count == 1
def test_resend_email_unlinked_result_fails(self):
draw = models.SecretSanta()
draw.save()
result = models.SecretSantaResult(
source="From name", target="To Name", draw=draw
)
result.save()
draw2 = models.SecretSanta()
draw2.save()
result2 = models.SecretSantaResult(
source="From name", target="To Name", draw=draw2
)
result2.save()
url = reverse(
"secret-santa-resend-email",
kwargs=dict(draw_pk=draw2.id, result_pk=result.id),
)
response = self.client.post(url, {"language": "en", "email": "mail@mail.com"})
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, response.content
)
url = reverse(
"secret-santa-resend-email",
kwargs=dict(draw_pk=draw.id, result_pk=result2.id),
)
response = self.client.post(url, {"language": "en", "email": "mail@mail.com"})
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, response.content
)
def test_resend_email_revealed_result_fails(self):
draw = models.SecretSanta()
draw.save()
result = models.SecretSantaResult(
source="From name", target="To Name", draw=draw
)
result.save()
response = self.client.get(
reverse("secret-santa-detail", kwargs=dict(pk=result.id))
)
url = reverse(
"secret-santa-resend-email",
kwargs=dict(draw_pk=draw.id, result_pk=result.id),
)
response = self.client.post(url, {"language": "en", "email": "mail@mail.com"})
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, response.content
)
def test_resend_email_too_recent_fails(self):
draw = models.SecretSanta()
draw.save()
result = models.SecretSantaResult(
source="From name", target="To Name", draw=draw
)
result.save()
url = reverse(
"secret-santa-resend-email",
kwargs=dict(draw_pk=draw.id, result_pk=result.id),
)
response = self.client.post(url, {"language": "en", "email": "mail@mail.com"})
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST, response.content
)
|
[
"mariocj89@gmail.com"
] |
mariocj89@gmail.com
|
6ba85a9a9247a3d8c945dc9f5b324c4bc61f4c92
|
82bccfd9c7db0fed0954ef31aadb7444f155d45c
|
/draw.py
|
57decbfb08bd075ac867590271d2975aeaba9497
|
[] |
no_license
|
Mabedin00/3d
|
cb0ca938ed70f28e7cd81441de32184e8908538b
|
50c09ffc194769bc8f3406e7833915182a5aee9c
|
refs/heads/master
| 2021-03-27T06:01:42.376243
| 2020-03-16T19:57:50
| 2020-03-16T19:57:50
| 247,794,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,593
|
py
|
from display import *
from matrix import *
# ====================
# add the points for a rectagular prism whose
# upper-left corner is (x, y, z) with width,
# height and depth dimensions.
# ====================
def add_box( points, x, y, z, width, height, depth ):
add_edge(points, x, y, z, x + width, y, z)
add_edge(points, x, y, z, x, y - height, z)
add_edge(points, x + width, y, z, x + width, y - height, z)
add_edge(points, x, y - height, z, x + width, y - height, z)
add_edge(points, x, y, z - depth, x + width, y, z - depth)
add_edge(points, x, y, z - depth, x, y - height, z - depth)
add_edge(points, x + width, y, z - depth, x + width, y - height, z - depth)
add_edge(points, x, y - height, z - depth, x + width, y - height, z - depth)
add_edge(points, x, y, z, x, y, z - depth)
add_edge(points, x + width, y, z, x + width, y, z - depth)
add_edge(points, x, y - height, z, x, y - height, z - depth)
add_edge(points, x + width, y - height, z, x + width, y - height, z - depth)
# ====================
# Generates all the points along the surface
# of a sphere with center (cx, cy, cz) and
# radius r.
# Returns a matrix of those points
# ====================
def generate_sphere( points, cx, cy, cz, r, step ):
rot = 0
cir = 0
while rot <= 1:
while cir <= 1:
x = r * math.cos(math.pi * cir) + cx
y = r * math.sin(math.pi * cir) * math.cos(2 * math.pi * rot) + cy
z = r * math.sin(math.pi * cir) * math.sin(2 * math.pi * rot) + cz
add_point(points, x, y, z)
cir += step
rot += step
cir = 0
return points
# ====================
# adds all the points for a sphere with center
# (cx, cy, cz) and radius r to points
# should call generate_sphere to create the
# necessary points
# ====================
def add_sphere( points, cx, cy, cz, r, step ):
new = new_matrix(0, 0)
mat = generate_sphere(new, cx, cy, cz, r, step)
# print_matrix(points)
for point in mat:
add_point(points, point[0], point[1], point[2])
# ====================
# Generates all the points along the surface
# of a torus with center (cx, cy, cz) and
# radii r0 and r1.
# Returns a matrix of those points
# ====================
def generate_torus( points, cx, cy, cz, r0, r1, step ):
rot = 0
cir = 0
while rot <= 1:
while cir <= 2:
x = math.cos(2 * math.pi * rot) * (r0 * math.cos(math.pi * cir) + r1) + cx
y = r0 * math.sin(math.pi * cir) + cy
z = -1 * math.sin(2 * math.pi * rot) * (r0 * math.cos(math.pi * cir) + r1) + cz
add_point(points, x, y, z)
cir += step
rot += step
cir = 0
return points
# ====================
# adds all the points for a torus with center
# (cx, cy, cz) and radii r0, r1 to points
# should call generate_torus to create the
# necessary points
# ====================
def add_torus( points, cx, cy, cz, r0, r1, step ):
new = new_matrix(0, 0)
mat = generate_torus(new, cx, cy, cz, r0, r1, step)
for point in mat:
add_point(points, point[0], point[1], point[2])
def add_circle( points, cx, cy, cz, r, step ):
x0 = r + cx
y0 = cy
i = 1
while i <= step:
t = float(i)/step
x1 = r * math.cos(2*math.pi * t) + cx;
y1 = r * math.sin(2*math.pi * t) + cy;
add_edge(points, x0, y0, cz, x1, y1, cz)
x0 = x1
y0 = y1
i+= 1
def add_curve( points, x0, y0, x1, y1, x2, y2, x3, y3, step, curve_type ):
xcoefs = generate_curve_coefs(x0, x1, x2, x3, curve_type)[0]
ycoefs = generate_curve_coefs(y0, y1, y2, y3, curve_type)[0]
i = 1
while i <= step:
t = float(i)/step
x = t * (t * (xcoefs[0] * t + xcoefs[1]) + xcoefs[2]) + xcoefs[3]
y = t * (t * (ycoefs[0] * t + ycoefs[1]) + ycoefs[2]) + ycoefs[3]
#x = xcoefs[0] * t*t*t + xcoefs[1] * t*t + xcoefs[2] * t + xcoefs[3]
#y = ycoefs[0] * t*t*t + ycoefs[1] * t*t + ycoefs[2] * t + ycoefs[3]
add_edge(points, x0, y0, 0, x, y, 0)
x0 = x
y0 = y
i+= 1
def draw_lines( matrix, screen, color ):
if len(matrix) < 2:
print('Need at least 2 points to draw')
return
point = 0
while point < len(matrix) - 1:
draw_line( int(matrix[point][0]),
int(matrix[point][1]),
int(matrix[point+1][0]),
int(matrix[point+1][1]),
screen, color)
point+= 2
def add_edge( matrix, x0, y0, z0, x1, y1, z1 ):
add_point(matrix, x0, y0, z0)
add_point(matrix, x1, y1, z1)
def add_point( matrix, x, y, z=0 ):
matrix.append( [x, y, z, 1] )
def draw_line( x0, y0, x1, y1, screen, color ):
#swap points if going right -> left
if x0 > x1:
xt = x0
yt = y0
x0 = x1
y0 = y1
x1 = xt
y1 = yt
x = x0
y = y0
A = 2 * (y1 - y0)
B = -2 * (x1 - x0)
#octants 1 and 8
if ( abs(x1-x0) >= abs(y1 - y0) ):
#octant 1
if A > 0:
d = A + B/2
while x < x1:
plot(screen, color, x, y)
if d > 0:
y+= 1
d+= B
x+= 1
d+= A
#end octant 1 while
plot(screen, color, x1, y1)
#end octant 1
#octant 8
else:
d = A - B/2
while x < x1:
plot(screen, color, x, y)
if d < 0:
y-= 1
d-= B
x+= 1
d+= A
#end octant 8 while
plot(screen, color, x1, y1)
#end octant 8
#end octants 1 and 8
#octants 2 and 7
else:
#octant 2
if A > 0:
d = A/2 + B
while y < y1:
plot(screen, color, x, y)
if d < 0:
x+= 1
d+= A
y+= 1
d+= B
#end octant 2 while
plot(screen, color, x1, y1)
#end octant 2
#octant 7
else:
d = A/2 - B;
while y > y1:
plot(screen, color, x, y)
if d > 0:
x+= 1
d+= A
y-= 1
d-= B
#end octant 7 while
plot(screen, color, x1, y1)
#end octant 7
#end octants 2 and 7
#end draw_line
|
[
"mabedin00@stuy.edu"
] |
mabedin00@stuy.edu
|
0db846a6e5146c39dbdb5d2ce4971b61cc766b67
|
78392e1fbb5e5cb73148c9972c9c1df418fbccc0
|
/hello.py
|
d101a41630eeb92d77c7ce6c5f0f493320af10c9
|
[] |
no_license
|
gaomiup20/mine
|
9ebfc1db6e1642e3ca3b6b22320314d0509f8169
|
948b5d9932209be16e9ee85553b0de4f55de1a97
|
refs/heads/master
| 2021-01-21T10:52:56.929494
| 2017-02-28T02:42:57
| 2017-02-28T02:42:57
| 83,496,567
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 47
|
py
|
#!/usr/bin /env python
#
print "hello world"
|
[
"tom@up.com"
] |
tom@up.com
|
722c89f7c8bd05cab2f40bb54b795413c498ec55
|
223632474c834264c9f7e4c4c525cf19ca923f7e
|
/Practices/Practice01/mysite/mysite/urls.py
|
de5f63a6e035fc2a500f82ec3bef07be0eed2cd8
|
[] |
no_license
|
englam/Django
|
2a2a6fd374169a5f5bb779138e4f5c13d3a987b3
|
db267371bac955254fe76e86447396107d69dfe3
|
refs/heads/master
| 2021-01-19T15:35:01.512703
| 2017-05-28T08:42:44
| 2017-05-28T08:42:44
| 88,223,570
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from views import here, add, math, math2, math3, math4, math5
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'mysite.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^here1/$',here), #path , object
url(r'^(\d{1,2})/plus/(\d{1,2})/$',add),
url(r'^(\d{1,2})/math/(\d{1,2})/$',math),
url(r'^(\d{1,2})/math2/(\d{1,2})/$',math2),
url(r'^(\d{1,2})/math3/(\d{1,2})/$',math3),
url(r'^(\d{1,2})/math4/(\d{1,2})/$',math4),
url(r'^(\d{1,2})/math5/(\d{1,2})/$',math5),
)
|
[
"englam3345678@gmail.com"
] |
englam3345678@gmail.com
|
d95a888b85475398916a2bb4c33af9ac9d29e373
|
491d92e941aea7b07390a9310a595d685d83632b
|
/setup.py
|
7ab03c954dd08a8eaadc948f409a16aba1a86ccf
|
[] |
no_license
|
williamschmitt21/ncan_bibrun
|
94dc7cbb7ace4d2ffb28e3b3f58de891a91559bc
|
5c15877c5d5e5324528d1561972b4106ab337e78
|
refs/heads/master
| 2021-09-16T01:52:34.079686
| 2018-06-14T20:48:13
| 2018-06-14T20:48:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
# Setup File for Python Module ncan-bib-assess
# Created by Billy Schmitt
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='ncan_bibrun',
version='0.2',
description='NCAN Bibliometric Assessment',
url='http://github.com/Schmill731/NCAN-Bibliometric-Analysis',
author='Billy Schmitt',
author_email='williamschmitt@college.harvard.edu',
license='NCAN',
packages=['ncan_bibrun'],
install_requires=['requests', 'xlsxwriter'],
scripts=['bin/ncan-bibrun'],
include_package_data=True,
long_description=readme(),
zip_safe=False)
|
[
"williamschmitt@college.harvard.edu"
] |
williamschmitt@college.harvard.edu
|
0d5ca9d725f861d5c49cc304d16055ca776924a7
|
02c5c3329fd00a4261c69dd65bd6b19a8caeaa3e
|
/lc/hymns/views.py
|
36a302e1ba444964605eb6314334a80e4030c102
|
[] |
no_license
|
pantlmn/liturcorpora
|
c8246f8295fc1cb63c34b68d87335937f8f4949d
|
4ac52d588e8d21b8e24402663c7b9eec9fe33ac4
|
refs/heads/github
| 2022-05-09T17:19:49.654601
| 2021-04-10T00:29:55
| 2021-04-10T00:29:55
| 188,422,528
| 2
| 1
| null | 2022-04-22T21:20:07
| 2019-05-24T13:00:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,324
|
py
|
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from django.core.paginator import Paginator
from django.db.models import Q
import re
from .models import TextBlock, Paragraph
def index(request, block_id=None):
path = []
paragraphs = None
if block_id is None:
blocks = TextBlock.objects.filter(parent_block=None)
else:
blocks = TextBlock.objects.filter(parent_block__id=block_id).order_by('order_id')
block = TextBlock.objects.get(id=block_id)
while block is not None:
path = [[block.id, block.name_short]] + path
block = block.parent_block
paragraphs = Paragraph.objects.filter(parent_block__id=block_id).order_by('order_id')
return render(request, 'textblocks.html', {'path' : path, 'blocks' : blocks, 'paragraphs' : paragraphs})
def paragraph(request, paragraph_id=None):
paragraph = Paragraph.objects.get(id=paragraph_id)
block = paragraph.parent_block
path = []
while block is not None:
path = [[block.id, block.name_short]] + path
block = block.parent_block
return render(request, 'paragraph.html', {'path' : path, 'paragraph' : paragraph})
|
[
"pantlmn@gmail.com"
] |
pantlmn@gmail.com
|
c3332593ef978c5b3b55b4b58e3d4b49a1116ba5
|
392fe549572572b77f9fc1b852a34ebae519087b
|
/project/settings.py
|
55254c5029fae879f43669a2b1cae589d513f37b
|
[] |
no_license
|
mamee93/like_disqus
|
cd2cd90c1cf456015d6ee9d51a56a84fc42494f2
|
9516d714804b067bbaa20e35d8bbf8779bebaa67
|
refs/heads/main
| 2022-12-26T12:14:24.904770
| 2020-10-11T19:49:36
| 2020-10-11T19:49:36
| 303,204,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,218
|
py
|
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-9(k9d3k_qw7rj1(iv@k+d$$_fwaf-ht$o*)30=#ohv3#wa403'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'TopicComm',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / "static",
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
|
[
"mameeal@gmail.com"
] |
mameeal@gmail.com
|
31ed002b7e046f083d95c8d5950e4686b457017a
|
67c26df6c59f2564082e9cf7caf04ab25c929cb5
|
/ika_classifier/app/api/tests/Main.py
|
76bfff3535ff87cadb7fb025d08aea0809fdfe3f
|
[
"Apache-2.0"
] |
permissive
|
Harisonm/Ika
|
46e5f51212999a16cadb619c3aafb8c11e39964a
|
243ceab532007ee4fb05b205e1125fab5d3d325b
|
refs/heads/master
| 2023-05-29T16:55:41.854929
| 2021-01-13T14:48:49
| 2021-01-13T14:48:49
| 198,652,172
| 6
| 0
|
Apache-2.0
| 2021-06-10T15:54:31
| 2019-07-24T14:30:21
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,076
|
py
|
from api.classifier_mail.model.KMeansModel import *
from api.classifier_mail.model.Metrics import *
from ikamail.GmailHelper import GmailHelper
import pandas as pd
import nltk
import sys
import os
PATH = os.environ.get("PATH_SAVE_TRANSFORM", default=False)
nltk.download("punkt")
nltk.download("stopwords")
def build_label_mail(data):
"""
Args:
data:
Returns:
"""
train = pd.read_csv(PATH + data, encoding="utf-8")
clean_train_reviews = pre_processing_dataset(train)
vocab_frame = build_vocab_frame(clean_train_reviews)
tfidf_matrix, tfidf_vectorizer = build_tfidf_matrix_vector(clean_train_reviews)
# calculating the within clusters sum-of-squares for 19 cluster amounts
sum_of_squares = calculate_wcss(tfidf_matrix)
# calculating the optimal number of clusters
n_clusters = optimal_number_of_clusters(sum_of_squares)
clusters, k_means_model = build_cluster_from_model(n_clusters, tfidf_matrix)
# cluster_labels, silhouette_avg, sample_silhouette_values = predict_clustering_group(k_means_model, tfidf_matrix)
labels = build_label_mails(
vocab_frame,
k_means_model,
tfidf_vectorizer,
clusters,
clean_train_reviews,
n_clusters,
)
for mail in clean_train_reviews:
for lbl in mail["label"][:1]:
GmailHelper("dev").create_label(
"me",
name_label=lbl,
label_list_visibility="labelShow",
message_list_visibility="show",
)
labels_ids = GmailHelper("dev").get_label_ids("me", mail["label"])
GmailHelper("dev").modify_message(
user_id="me",
mail_id=mail["idMail"],
mail_labels=create_msg_labels(labels_ids[:1]),
)
def create_msg_labels(labels_ids):
"""Create object to update labels.
Returns:
A label update object.
"""
return {"removeLabelIds": [], "addLabelIds": labels_ids}
if __name__ == "__main__":
arg1 = str(sys.argv[1])
build_label_mail(arg1)
|
[
"mranaivoharison@gmail.com"
] |
mranaivoharison@gmail.com
|
4dc41f57067c9b4327b7884014ede075cf83c66b
|
509bc8038c4ce8f04130b583d4c70bd851f9b83d
|
/python/weather/weather.py
|
e86676654ea84e974a4efdbaaa0291eadc55dae8
|
[] |
no_license
|
AlbertoCastelo/weather-kata
|
4162fd87ea0f12ab65909f0ef7e3e4201cb2c38f
|
faa78305050152104d81b3554c0c59e5a2b9d0de
|
refs/heads/master
| 2020-03-22T11:17:09.946722
| 2018-07-06T07:12:29
| 2018-07-06T07:12:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
import urllib.request
import json
import datetime
class Weather:
def predict(self, city, aDateTime = None, wind = False):
# When date is not provided we look for the current prediction
if aDateTime is None:
aDateTime = datetime.datetime.now()
# If there are predictions
if (aDateTime < (datetime.datetime.now() + datetime.timedelta(days=6)).replace(hour=0, minute=0, second=0)):
# Find the id of the city on metawheather
woeid = json.loads(urllib.request.urlopen(
"https://www.metaweather.com/api/location/search/?query=" + city).read())[0]['woeid']
# Find the predictions for the city
results = json.loads(urllib.request.urlopen(
"https://www.metaweather.com/api/location/" + str(woeid)).read())['consolidated_weather']
for result in results:
# When the date is the expected
if result['applicable_date'] == aDateTime.strftime('%Y-%m-%d'):
# If we have to return the wind information
if wind:
return result['wind_speed']
else:
return result['weather_state_name']
else:
return ""
|
[
"luisrovirosa@gmail.com"
] |
luisrovirosa@gmail.com
|
f7e6e122d6cea6eb411ade6ecff40bd031ddd6bd
|
5e4d4a4f2c27c5ed4b86e67464d5890873857798
|
/setup/02-robot_syntax_rule.py
|
9fbe741963dc1c4ad38dd87cc2a6b0682ed2c4b4
|
[
"MIT"
] |
permissive
|
venetastefanova/rf-katas
|
a4941cb278c173bb09e567e962a801c9e02290bd
|
ce693e31303c19d73ed6e3e779a46f7d364a2069
|
refs/heads/master
| 2022-12-24T11:27:53.519564
| 2020-10-07T08:51:54
| 2020-10-07T08:51:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
from rflint.common import SuiteRule, TestRule, ERROR
MUST_KEYWORDS = [
"Open Browser",
"Input Text",
"Page Should Contain"
]
class TestCaseImplementation02(TestRule):
severity = ERROR
def apply(self, test):
report = False
test_steps = []
for step in test.steps:
if len(step) > 1:
test_steps.append(step[1])
for keyword in MUST_KEYWORDS:
if not keyword.strip() in test_steps:
report = True
if report:
self.report(test, "Did you find all keywords from seleniumlibrary documentation?, expected: {}".format(", ".join(MUST_KEYWORDS)), test.linenumber)
class CheckTestCasesName02(SuiteRule):
severity = ERROR
def apply(self, suite):
expected_name = "Welcome Page Should Be Visible After Successful Login"
for testcase in suite.testcases:
if testcase.name.lower() != expected_name.lower():
self.report(suite, "Check test case name: {}, expected: {}".format(testcase.name, expected_name), 0)
|
[
"joonas.jauhiainen@eficode.com"
] |
joonas.jauhiainen@eficode.com
|
5ec57948a7c2ac91e0df862f3e5ca07b9d830b37
|
e7340c09cfea3270b067f667c58757f5aa6db449
|
/users/models.py
|
546dd7c513c706b437b64ba6098034257824ef12
|
[] |
no_license
|
emma-code187/blogprojects
|
737a338e08fa1ea2642139076fa625bba8d82c8e
|
36cb6880608dbadd946e335a86751371b08cb931
|
refs/heads/main
| 2023-03-20T01:55:58.978864
| 2021-03-21T13:55:25
| 2021-03-21T13:55:25
| 305,835,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
from django.db import models
from django.contrib.auth.models import User
from PIL import Image
from django.db.models.signals import post_save
from django.dispatch import receiver
#from django.utils.translation import ugettext_lazy as _
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default='default.jpg', upload_to='profile_pics')
def __str__(self):
return f'{self.user.username} Profile'
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
|
[
"noreply@github.com"
] |
emma-code187.noreply@github.com
|
993c350eb67aa4ef8bfc9ea7648e760612129dac
|
2d31c6cc2c0f6280e91ca22ef1f4c6da9a0bb165
|
/src/Filter.py
|
c70e9e9222777a6b7d396a02e61b9b17dbf19eab
|
[] |
no_license
|
787890/Music-alib
|
717e025d02f663d28bed7d86470ef0f0b81050fa
|
260cc6305835beec42c8ac64cd6ca8a96b0b3ce3
|
refs/heads/master
| 2020-04-12T14:44:56.193918
| 2018-12-24T09:08:38
| 2018-12-24T09:08:38
| 162,560,298
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
# -*- coding: utf-8 -*-
class SongFilter:
DEFAULT_MIN_BITRATE = 320
DEFAULT_MIN_SIMILARITY = 0.5
def __init__(self, min_bitrate=DEFAULT_MIN_BITRATE, min_similarity=DEFAULT_MIN_SIMILARITY):
self.min_bitrate = min_bitrate
self.min_similarity = min_similarity
|
[
"787890@gmail.com"
] |
787890@gmail.com
|
51b7086bc2acd7748614797c73980218e910f92a
|
766a6fdd83608d8faf283b302fa7f6e24b7c4f85
|
/WHIProject/urls.py
|
1c6162554a8cc241acb7f37900ea57d09f49ec9f
|
[] |
no_license
|
harshakhmk/WorldHappinessIndexAPI
|
d4d1877d6fdb18e058fa6065f4de668178c51057
|
724532cd1360024a87df3040a013c9028f6764fb
|
refs/heads/master
| 2023-04-06T14:28:28.200813
| 2021-04-12T11:06:08
| 2021-04-12T11:06:08
| 355,083,395
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
"""WHIProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
from utils.views import *
from django.conf.urls import (
handler400, handler403, handler404, handler500)
urlpatterns = [
path('admin/', admin.site.urls),
path('v1/',include('HappinessApi.urls')),
path('',include('HappinessApi.urls')),
]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
handler404 = error404
handler500 = error500
handler403 = error403
handler400 = error400
|
[
"kharshakashyap@gmail.com"
] |
kharshakashyap@gmail.com
|
3685cda859c392bd6c76ef33e00dde9c7acda5af
|
d0360de3fac36e17d4e423844a78c3fe654677ff
|
/CF/Q.py
|
f0314366c6c2856b352eb959f0e668f4b49dc78b
|
[] |
no_license
|
niki999922/Machine-Learning
|
958109a6a2bb82f9ad2d531bc952509634f568a0
|
9f5f13774a12a4e32697e10de6372d1251776007
|
refs/heads/master
| 2023-02-10T01:32:18.307707
| 2021-01-11T20:34:34
| 2021-01-11T20:34:34
| 298,417,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
import math
if __name__ == '__main__':
k1, k2 = map(int, input().split(" "))
n = int(input())
xs, ys = [], []
ans = 0
p = [0 for _ in range(k1)]
m = {}
for _ in range(n):
x, y = map(int, input().split(" "))
p[x - 1] += 1 / n
if not m.__contains__(f'{x - 1} {y - 1}'):
m[f'{x - 1} {y - 1}'] = 0
m[f'{x - 1} {y - 1}'] += 1 / n
for key in m:
x, _ = map(int, key.split(" "))
ans += -m[key] * math.log(m[key] / p[x])
print(ans)
|
[
"nikita.kochetkov@jetbrains.com"
] |
nikita.kochetkov@jetbrains.com
|
c2d6d0cecb6ab276a78410a0afe6e8ca0c4e41b4
|
505a155fe624fd9d028cbd960ed4c819dbfac2ed
|
/backendbase.py
|
569028aae98304a9ad1dde2beedf5e20acd58710
|
[] |
no_license
|
WalkingMachine/vizbox
|
054332c1e30297faac3084af312edbb42d1cca54
|
1d290581f16d5b775e55f5bcc4f3d03993bc6331
|
refs/heads/master
| 2021-04-03T06:48:33.881853
| 2018-05-22T20:25:32
| 2018-05-22T20:25:32
| 124,595,894
| 0
| 0
| null | 2018-05-22T19:37:02
| 2018-03-09T21:35:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,990
|
py
|
def call_callbacks_in(cb_list, converter):
def callback(message):
converted = converter(message)
for cb in cb_list:
cb(converted)
return callback
class BackendBase(object):
@staticmethod
def get_instance():
raise NotImplementedError()
def __init__(self):
self.on_operator_text = []
self.on_robot_text = []
self.on_challenge_step = []
self.on_image = []
self.on_story = []
def attach_operator_text(self, callback):
self.on_operator_text += [callback]
def attach_robot_text(self, callback):
self.on_robot_text += [callback]
def attach_challenge_step(self, callback):
self.on_challenge_step += [callback]
def detach_operator_text(self, callback):
self.on_operator_text.remove(callback)
def detach_robot_text(self, callback):
self.on_robot_text.remove(callback)
def detach_challenge_step(self, callback):
self.on_challenge_step.remove(callback)
def accept_command(self, command_text):
raise NotImplementedError()
def attach_image(self, callback):
"""
Add a callback for when an Image is received
:param callback: function accepting a base64-encoded image
:return:
"""
self.on_image += [callback]
def detach_image(self, callback):
"""
Remove a callback from when an Image is received
:param callback:
:return:
"""
self.on_image.remove(callback)
def attach_story(self, callback):
"""
Add a callback for when a Story is received
:param callback: function accepting a tuple of (title: str, storyline: [str])
:return:
"""
self.on_story += [callback]
def detach_story(self, callback):
"""
Remove a callback from when a Story is received
:param callback:
:return:
"""
self.on_story.remove(callback)
|
[
"loy.vanbeek@gmail.com"
] |
loy.vanbeek@gmail.com
|
9b5752bb27cebacb768b2952a976016fc3e1f6ed
|
6649efd4a95645938221eca58404db5663cd2491
|
/official/vision/losses/retinanet_losses.py
|
91aaecf082d4d5d390a2445c5e3290767401c578
|
[
"Apache-2.0"
] |
permissive
|
Dithn/models
|
8447866855959946358f2e5160b7d31aaafcfc98
|
36a140b8765eaa07525ac42a00cbd01a8b03b98e
|
refs/heads/master
| 2023-09-01T07:41:28.596877
| 2022-03-16T18:12:00
| 2022-03-16T18:13:23
| 228,201,096
| 1
| 0
|
Apache-2.0
| 2021-09-23T21:19:16
| 2019-12-15T14:52:24
|
Python
|
UTF-8
|
Python
| false
| false
| 8,097
|
py
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses used for detection models."""
# Import libraries
import tensorflow as tf
def focal_loss(logits, targets, alpha, gamma):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
Args:
logits: A float32 tensor of size
[batch, d_1, ..., d_k, n_classes].
targets: A float32 tensor of size
[batch, d_1, ..., d_k, n_classes].
alpha: A float32 scalar multiplying alpha to the loss from positive examples
and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
Returns:
loss: A float32 Tensor of size
[batch, d_1, ..., d_k, n_classes] representing
normalized loss on the prediction map.
"""
with tf.name_scope('focal_loss'):
positive_label_mask = tf.equal(targets, 1.0)
cross_entropy = (
tf.nn.sigmoid_cross_entropy_with_logits(labels=targets, logits=logits))
probs = tf.sigmoid(logits)
probs_gt = tf.where(positive_label_mask, probs, 1.0 - probs)
# With small gamma, the implementation could produce NaN during back prop.
modulator = tf.pow(1.0 - probs_gt, gamma)
loss = modulator * cross_entropy
weighted_loss = tf.where(positive_label_mask, alpha * loss,
(1.0 - alpha) * loss)
return weighted_loss
class FocalLoss(tf.keras.losses.Loss):
"""Implements a Focal loss for classification problems.
Reference:
[Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
"""
def __init__(self,
alpha,
gamma,
num_classes,
reduction=tf.keras.losses.Reduction.AUTO,
name=None):
"""Initializes `FocalLoss`.
Args:
alpha: The `alpha` weight factor for binary class imbalance.
gamma: The `gamma` focusing parameter to re-weight loss.
num_classes: Number of foreground classes.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'retinanet_class_loss'.
"""
self._num_classes = num_classes
self._alpha = alpha
self._gamma = gamma
super(FocalLoss, self).__init__(reduction=reduction, name=name)
def call(self, y_true, y_pred):
"""Invokes the `FocalLoss`.
Args:
y_true: Ordered Dict with level to [batch, height, width, num_anchors].
for example,
{3: tf.Tensor(shape=[32, 512, 512, 9], dtype=tf.float32),
4: tf.Tensor([shape=32, 256, 256, 9, dtype=tf.float32])}
y_pred: Ordered Dict with level to [batch, height, width, num_anchors *
num_classes]. for example,
{3: tf.Tensor(shape=[32, 512, 512, 9], dtype=tf.int64),
4: tf.Tensor(shape=[32, 256, 256, 9 * 21], dtype=tf.int64)}
Returns:
Summed loss float `Tensor`.
"""
flattened_cls_outputs = []
flattened_labels = []
batch_size = None
for level in y_pred.keys():
cls_output = y_pred[level]
label = y_true[level]
if batch_size is None:
batch_size = cls_output.shape[0] or tf.shape(cls_output)[0]
flattened_cls_outputs.append(
tf.reshape(cls_output, [batch_size, -1, self._num_classes]))
flattened_labels.append(tf.reshape(label, [batch_size, -1]))
cls_outputs = tf.concat(flattened_cls_outputs, axis=1)
labels = tf.concat(flattened_labels, axis=1)
cls_targets_one_hot = tf.one_hot(labels, self._num_classes)
return focal_loss(
tf.cast(cls_outputs, dtype=tf.float32),
tf.cast(cls_targets_one_hot, dtype=tf.float32), self._alpha,
self._gamma)
def get_config(self):
config = {
'alpha': self._alpha,
'gamma': self._gamma,
'num_classes': self._num_classes,
}
base_config = super(FocalLoss, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RetinanetBoxLoss(tf.keras.losses.Loss):
"""RetinaNet box Huber loss."""
def __init__(self,
delta,
reduction=tf.keras.losses.Reduction.AUTO,
name=None):
"""Initializes `RetinanetBoxLoss`.
Args:
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'retinanet_class_loss'.
"""
self._huber_loss = tf.keras.losses.Huber(
delta=delta, reduction=tf.keras.losses.Reduction.NONE)
self._delta = delta
super(RetinanetBoxLoss, self).__init__(reduction=reduction, name=name)
def call(self, y_true, y_pred):
"""Computes box detection loss.
Computes total detection loss including box and class loss from all levels.
Args:
y_true: Ordered Dict with level to [batch, height, width,
num_anchors * 4] for example,
{3: tf.Tensor(shape=[32, 512, 512, 9 * 4], dtype=tf.float32),
4: tf.Tensor([shape=32, 256, 256, 9 * 4, dtype=tf.float32])}
y_pred: Ordered Dict with level to [batch, height, width,
num_anchors * 4]. for example,
{3: tf.Tensor(shape=[32, 512, 512, 9 * 4], dtype=tf.int64),
4: tf.Tensor(shape=[32, 256, 256, 9 * 4], dtype=tf.int64)}
Returns:
an integer tensor representing total box regression loss.
"""
# Sums all positives in a batch for normalization and avoids zero
# num_positives_sum, which would lead to inf loss during training
flattened_box_outputs = []
flattened_labels = []
batch_size = None
for level in y_pred.keys():
box_output = y_pred[level]
label = y_true[level]
if batch_size is None:
batch_size = box_output.shape[0] or tf.shape(box_output)[0]
flattened_box_outputs.append(tf.reshape(box_output, [batch_size, -1, 4]))
flattened_labels.append(tf.reshape(label, [batch_size, -1, 4]))
box_outputs = tf.concat(flattened_box_outputs, axis=1)
labels = tf.concat(flattened_labels, axis=1)
loss = self._huber_loss(labels, box_outputs)
return loss
def get_config(self):
config = {
'delta': self._delta,
}
base_config = super(RetinanetBoxLoss, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
023837f61204966e0a2fbe0fd3a4300a089a5156
|
bf461a8a1b29b305073f7123205a0071f439fe37
|
/Day 9/travel_log.py
|
5e1c98b53d984fe2036c59abde4a214af6250898
|
[] |
no_license
|
d0ntblink/100-Days-of-Code
|
ab45b878d2a3de57a5f7dacd706b16425e9497aa
|
f8c00469284934e65672f6f2925784225b0e8e99
|
refs/heads/master
| 2023-07-04T12:21:10.169523
| 2021-02-23T06:49:34
| 2021-02-23T06:49:34
| 341,440,513
| 0
| 0
| null | 2021-07-27T22:24:37
| 2021-02-23T05:34:16
|
Python
|
UTF-8
|
Python
| false
| false
| 849
|
py
|
travel_log = [
{
"country": "France",
"visits": 12,
"cities": ["Paris", "Lille", "Dijon"]
},
{
"country": "Germany",
"visits": 5,
"cities": ["Berlin", "Hamburg", "Stuttgart"]
},
]
#🚨 Do NOT change the code above
'''
add_new_country("Russia", 2, ["Moscow", "Saint Petersburg"])
add_new_country("Russia", 2, ["Moscow", "Saint Petersburg"])
You've visited Russia 2 times.
You've been to Moscow and Saint Petersburg.
'''
#TODO: Write the function that will allow new countries
#to be added to the travel_log. 👇
def add_new_country(country, visits, city) :
new_country = {}
new_country["country"] = country
new_country["visits"] = visits
new_country["cities"] = city
travel_log.append(new_country)
#🚨 Do not change the code below
add_new_country("Russia", 2, ["Moscow", "Saint Petersburg"])
print(travel_log)
|
[
"33300872+d0ntblink@users.noreply.github.com"
] |
33300872+d0ntblink@users.noreply.github.com
|
71974246c15cd9d899006c0243f48b88b426b3cf
|
cad2b9873e641862c0896c68cfc12620a34cce03
|
/src/controller.py
|
398e578f49070f28f614651dbc0dac291c21e7c7
|
[] |
no_license
|
xiaoniaoyou/Galieo
|
de5113f5dcf20518d966c3a474066bda3cf408a7
|
5c9cac0914b871245dc75d63714fd6eaf2a20ad5
|
refs/heads/master
| 2020-03-12T12:29:37.243459
| 2018-04-23T02:38:23
| 2018-04-23T02:38:23
| 130,619,592
| 0
| 0
| null | 2018-04-23T02:38:24
| 2018-04-23T00:27:08
| null |
UTF-8
|
Python
| false
| false
| 3,221
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
此界面用于上传指定目录的文件夹,打包该文件夹并压缩,然后上传到远程服务器上指定目录(默认桌面下)
版本3.0.0,此版本用于上传打包压缩后的文件
版本2.0.0,此版本用于上传文件夹
'''
__version__ = '3.0.2'
__author__ = 'Eric'
import MainFrame as mainFrame
import wx
from wx.lib.pubsub import pub
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import model
import view
class Controller:
def __init__(self, app):
self.model = model.Model()
self.main_view = view.MainView(None)
self.main_view.Show()
self.main_view.m_button_config.Bind(wx.EVT_BUTTON, self.on_show_config_dlg)
self.main_view.m_button_choose_dir.Bind(wx.EVT_BUTTON, self.on_choose_dir)
self.main_view.m_button_choose_single_file.Bind(wx.EVT_BUTTON, self.on_choose_file)
self.main_view.m_button_upload.Bind(wx.EVT_BUTTON, self.on_upload_file)
self.main_view.m_button_quit.Bind(wx.EVT_BUTTON, self.on_quit_main_frame)
self.config_view = None
pub.subscribe(self.show_main_msg, 'show_main_msg')
pub.subscribe(self.show_dlg_msg, 'show_dlg_msg')
# msg = self.model.display_main_msg()
# self.show_main_msg(str=msg)
self.model.display_main_msg()
def on_show_config_dlg(self, evt):
self.config_view = view.ConfigView(None)
self.config_view.m_button_ip.Bind(wx.EVT_BUTTON, self.on_config_ip)
self.config_view.m_button_file_path.Bind(wx.EVT_BUTTON, self.on_config_file_path)
self.config_view.m_button_password.Bind(wx.EVT_BUTTON, self.on_config_password)
self.config_view.m_button_quit_dlg.Bind(wx.EVT_BUTTON, self.on_quit_config_dlg)
# msg = self.model.display_dlg_msg()
# self.show_dlg_msg(str=msg)
self.model.display_dlg_msg()
self.config_view.ShowModal()
def on_quit_main_frame(self, evt):
self.main_view.Destroy()
def on_choose_dir(self, evt):
self.model.on_choose_dir()
def on_choose_file(self, evt):
self.model.on_choose_file()
def on_upload_file(self, evt):
self.model.on_upload_file()
# --------------------------------------------
def on_config_ip(self, evt):
server_address = self.config_view.m_textCtrl_server_address.GetValue()
self.model.on_config_ip(server_address)
def on_config_file_path(self, evt):
server_file_dir = self.config_view.m_textCtrl_server_file_dir.GetValue()
self.model.on_config_file_path(server_file_dir)
def on_config_password(self, evt):
password = self.config_view.m_textCtrl_password.GetValue()
self.model.on_config_password(password)
def on_quit_config_dlg(self, evt):
self.config_view.Destroy()
self.model.display_main_msg()
# --------------------------------------------
def show_main_msg(self, str):
self.main_view.show_main_msg(str)
def show_dlg_msg(self, str):
self.config_view.show_dlg_msg(str)
if __name__ == '__main__':
app = wx.App()
frame = Controller(app)
app.MainLoop()
|
[
"wu_yj@piec.com.cn"
] |
wu_yj@piec.com.cn
|
b834c33ade319d41d9a8a61c437fabc082d3bf4f
|
2527a4d313cf10e38a1efdd90ec8031aa6538be7
|
/examples/juniper_bring_up_interface/post_check_delete_disable_statement.py
|
9150fae9c45e7965bdc34edee455d4ec17c0d67b
|
[] |
no_license
|
syynack/moss
|
db4d9b2203494374b62891ab8cb32a033baa760b
|
5cc3cb1c99af3edf78a002d37ae02bdab7062da0
|
refs/heads/master
| 2021-03-27T20:08:00.479428
| 2018-05-08T17:42:56
| 2018-05-08T17:42:56
| 73,017,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,544
|
py
|
#! /usr/bin/env python
# Created by mcli.
# This file should be used as a template for any user created modules.
from moss import ModuleResult, execute_device_operation, register
# ModuleResult can be used to influence the outcome of a task.
# ModuleResult.end Module will be marked as successful but task will not continue
# ModuleResult.branch Module will branch to another module
# ModuleResult.fail Module will be marked as a failure and task will not continue
# ModuleResult.success Module will be marked as a success and will continue (this is implicit)
# ModuleResult.retry Module will be retried
# It is not required that a module result must be returned, by default the module will
# be marked as a success if not specified otherwise.
#
VENDOR = 'juniper'
@register(vendor = VENDOR)
def post_check_delete_disable_statement(connection, store):
''' Checks configuration to make sure the deactivation config has been applied '''
configuration_check_output = execute_device_operation(
'juniper_check_configuration',
connection,
config_statements = ["disable"],
area = 'interfaces {}'.format(store["arguments"]["interface"])
)
if configuration_check_output["result"] == "fail":
return ModuleResult.fail
for statement in configuration_check_output["stdout"]["present_config_statements"]:
if "disable;" in statement:
return ModuleResult.fail
return ModuleResult.success
|
[
"mlhome96@gmail.com"
] |
mlhome96@gmail.com
|
9692ae2f3ad46ef6e51bf5de0d8e41074f3d0c79
|
5141a0ec55675aebf6b2a3f2454022e3fd8ad639
|
/bin/ExpRunner
|
e4640a0b471ef966b328437e4779c8d8f89c1edf
|
[
"Apache-2.0"
] |
permissive
|
bbastardes/TractSeg
|
303871586c682983b451885e069a75822a6c09db
|
70714fca0e0dc241946ffc704f05b65095c7effb
|
refs/heads/master
| 2020-05-27T03:02:27.806224
| 2019-05-16T14:50:12
| 2019-05-16T14:50:12
| 188,459,492
| 0
| 0
| null | 2019-05-24T17:04:07
| 2019-05-24T17:04:06
| null |
UTF-8
|
Python
| false
| false
| 14,469
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is for training the model. See Readme.md for more details about training your own model.
How to use this module:
#Run local:
ExpRunner --config=XXX
#Run slurm cluster:
sbatch --job-name=XXX ~/runner.sh (runner.sh not provided)
#Predicting with new config setup:
ExpRunner --train=False --seg --lw --config=XXX
ExpRunner --train=False --test=True --lw --config=XXX
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
warnings.simplefilter("ignore", UserWarning) #hide scipy warnings
warnings.simplefilter("ignore", FutureWarning) #hide h5py warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed") #hide Cython benign warning
warnings.filterwarnings("ignore", message="numpy.ufunc size changed") #hide Cython benign warning
import os
import importlib
import argparse
import pickle
import time
from pprint import pprint
import distutils.util
from os.path import join
import nibabel as nib
import numpy as np
import pickle as pkl
from tractseg.libs import direction_merger
from tractseg.libs import exp_utils
from tractseg.libs import img_utils
from tractseg.libs import metric_utils
from tractseg.libs.system_config import SystemConfig as C
from tractseg.libs import trainer
from tractseg.data.data_loader_training import DataLoaderTraining as DataLoaderTraining2D
from tractseg.data.data_loader_training_3D import DataLoaderTraining as DataLoaderTraining3D
from tractseg.data.data_loader_inference import DataLoaderInference
from tractseg.models.base_model import BaseModel
def main():
parser = argparse.ArgumentParser(description="Train a network on your own data to segment white matter bundles.",
epilog="Written by Jakob Wasserthal. Please reference 'Wasserthal et al. "
"TractSeg - Fast and accurate white matter tract segmentation. "
"https://doi.org/10.1016/j.neuroimage.2018.07.070)'")
parser.add_argument("--config", metavar="name", help="Name of configuration to use")
parser.add_argument("--train", metavar="True/False", help="Train network",
type=distutils.util.strtobool, default=True)
parser.add_argument("--test", metavar="True/False", help="Test network",
type=distutils.util.strtobool, default=False)
parser.add_argument("--seg", action="store_true", help="Create binary segmentation")
parser.add_argument("--probs", action="store_true", help="Create probmap segmentation")
parser.add_argument("--lw", action="store_true", help="Load weights of pretrained net")
parser.add_argument("--en", metavar="name", help="Experiment name")
parser.add_argument("--fold", metavar="N", help="Which fold to train when doing CrossValidation", type=int)
parser.add_argument("--verbose", action="store_true", help="Show more intermediate output", default=True)
args = parser.parse_args()
Config = getattr(importlib.import_module("tractseg.experiments.base"), "Config")()
if args.config:
# Config.__dict__ does not work properly
Config = getattr(importlib.import_module("tractseg.experiments.custom." + args.config), "Config")()
if args.en:
Config.EXP_NAME = args.en
Config.TRAIN = bool(args.train)
Config.TEST = bool(args.test)
if args.seg:
Config.SEGMENT = True
if args.probs:
Config.GET_PROBS = True
if args.lw:
Config.LOAD_WEIGHTS = args.lw
if args.fold:
Config.CV_FOLD= args.fold
Config.VERBOSE = args.verbose
Config.MULTI_PARENT_PATH = join(C.EXP_PATH, Config.EXP_MULTI_NAME)
Config.EXP_PATH = join(C.EXP_PATH, Config.EXP_MULTI_NAME, Config.EXP_NAME)
Config.TRAIN_SUBJECTS, Config.VALIDATE_SUBJECTS, Config.TEST_SUBJECTS = exp_utils.get_cv_fold(Config.CV_FOLD,
dataset=Config.DATASET)
if Config.WEIGHTS_PATH == "":
Config.WEIGHTS_PATH = exp_utils.get_best_weights_path(Config.EXP_PATH, Config.LOAD_WEIGHTS)
#Autoset input dimensions based on settings
Config.INPUT_DIM = exp_utils.get_correct_input_dim(Config)
Config = exp_utils.get_labels_filename(Config)
if Config.EXPERIMENT_TYPE == "peak_regression":
Config.NR_OF_CLASSES = 3*len(exp_utils.get_bundle_names(Config.CLASSES)[1:])
else:
Config.NR_OF_CLASSES = len(exp_utils.get_bundle_names(Config.CLASSES)[1:])
if Config.TRAIN:
Config.EXP_PATH = exp_utils.create_experiment_folder(Config.EXP_NAME, Config.MULTI_PARENT_PATH, Config.TRAIN)
if Config.DIM == "2D":
Config.EPOCH_MULTIPLIER = 1
else:
Config.EPOCH_MULTIPLIER = 3
if Config.VERBOSE:
print("Hyperparameters:")
exp_utils.print_Configs(Config)
with open(join(Config.EXP_PATH, "Hyperparameters.txt"), "w") as f:
Config_dict = {attr: getattr(Config, attr) for attr in dir(Config)
if not callable(getattr(Config, attr)) and not attr.startswith("__")}
pprint(Config_dict, f)
Config = exp_utils.get_correct_labels_type(Config) # do after saving Hyperparameters as txt
pkl.dump(Config, open(join(Config.EXP_PATH, "Hyperparameters.pkl"), "wb"))
def test_whole_subject(Config, model, subjects, type):
metrics = {
"loss_" + type: [0],
"f1_macro_" + type: [0],
}
# Metrics per bundle
metrics_bundles = {}
for bundle in exp_utils.get_bundle_names(Config.CLASSES)[1:]:
metrics_bundles[bundle] = [0]
for subject in subjects:
print("{} subject {}".format(type, subject))
start_time = time.time()
data_loader = DataLoaderInference(Config, subject=subject)
img_probs, img_y = trainer.predict_img(Config, model, data_loader, probs=True)
# img_probs_xyz, img_y = DirectionMerger.get_seg_single_img_3_directions(Config, model, subject=subject)
# img_probs = DirectionMerger.mean_fusion(Config.THRESHOLD, img_probs_xyz, probs=True)
print("Took {}s".format(round(time.time() - start_time, 2)))
if Config.EXPERIMENT_TYPE == "peak_regression":
f1 = metric_utils.calc_peak_length_dice(Config, img_probs, img_y,
max_angle_error=Config.PEAK_DICE_THR,
max_length_error=Config.PEAK_DICE_LEN_THR)
peak_f1_mean = np.array([s for s in f1.values()]).mean() # if f1 for multiple bundles
metrics = metric_utils.calculate_metrics(metrics, None, None, 0, f1=peak_f1_mean,
type=type, threshold=Config.THRESHOLD)
metrics_bundles = metric_utils.calculate_metrics_each_bundle(metrics_bundles, None, None,
exp_utils.get_bundle_names(Config.CLASSES)[1:],
f1, threshold=Config.THRESHOLD)
else:
img_probs = np.reshape(img_probs, (-1, img_probs.shape[-1])) #Flatten all dims except nrClasses dim
img_y = np.reshape(img_y, (-1, img_y.shape[-1]))
metrics = metric_utils.calculate_metrics(metrics, img_y, img_probs, 0,
type=type, threshold=Config.THRESHOLD)
metrics_bundles = metric_utils.calculate_metrics_each_bundle(metrics_bundles, img_y, img_probs,
exp_utils.get_bundle_names(Config.CLASSES)[1:],
threshold=Config.THRESHOLD)
metrics = metric_utils.normalize_last_element(metrics, len(subjects), type=type)
metrics_bundles = metric_utils.normalize_last_element_general(metrics_bundles, len(subjects))
print("WHOLE SUBJECT:")
pprint(metrics)
print("WHOLE SUBJECT BUNDLES:")
pprint(metrics_bundles)
with open(join(Config.EXP_PATH, "score_" + type + "-set.txt"), "w") as f:
pprint(metrics, f)
f.write("\n\nWeights: {}\n".format(Config.WEIGHTS_PATH))
f.write("type: {}\n\n".format(type))
pprint(metrics_bundles, f)
pickle.dump(metrics, open(join(Config.EXP_PATH, "score_" + type + ".pkl"), "wb"))
return metrics
model = BaseModel(Config)
if Config.DIM == "2D":
data_loader = DataLoaderTraining2D(Config)
else:
data_loader = DataLoaderTraining3D(Config)
if Config.TRAIN:
print("Training...")
model = trainer.train_model(Config, model, data_loader)
# After Training
if Config.TRAIN:
# Have to load other weights, because after training it has the weights of the last epoch
print("Loading best epoch: {}".format(Config.BEST_EPOCH))
Config.WEIGHTS_PATH = Config.EXP_PATH + "/best_weights_ep" + str(Config.BEST_EPOCH) + ".npz"
Config.LOAD_WEIGHTS = True
model.load_model(join(Config.EXP_PATH, Config.WEIGHTS_PATH))
model_test = model
else:
# Weight_path already set to best model (when reading program parameters) -> will be loaded automatically
model_test = model
if Config.SEGMENT:
exp_utils.make_dir(join(Config.EXP_PATH, "segmentations"))
# all_subjects = Config.VALIDATE_SUBJECTS + Config.TEST_SUBJECTS
all_subjects = ["599469"]
for subject in all_subjects:
print("Get_segmentation subject {}".format(subject))
if Config.EXPERIMENT_TYPE == "peak_regression":
data_loader = DataLoaderInference(Config, subject=subject)
img_probs, img_y = trainer.predict_img(Config, model_test, data_loader,
probs=True) # only x or y or z
img_seg = img_utils.peak_image_to_binary_mask(img_probs,
len_thr=0.4) # thr: 0.4 slightly better than 0.2
else:
# returns probs not binary seg
img_seg, img_y = direction_merger.get_seg_single_img_3_directions(Config, model, subject)
img_seg = direction_merger.mean_fusion(Config.THRESHOLD, img_seg, probs=False)
#TractSeg
# img_utils.save_multilabel_img_as_multiple_files(Config, img_seg,
# img_utils.get_dwi_affine(Config.DATASET, Config.RESOLUTION),
# join(Config.EXP_PATH, "segmentations_" + subject))
#Tract Beginnings and Endings
# img_utils.save_multilabel_img_as_multiple_files_endings(Config, img_seg,
# img_utils.get_dwi_affine(Config.DATASET, Config.RESOLUTION),
# join(Config.EXP_PATH, "segmentations_" + subject))
img = nib.Nifti1Image(img_seg.astype(np.uint8), img_utils.get_dwi_affine(Config.DATASET, Config.RESOLUTION))
nib.save(img, join(Config.EXP_PATH, "segmentations", subject + "_segmentation.nii.gz"))
if Config.TEST:
test_whole_subject(Config, model_test, Config.VALIDATE_SUBJECTS, "validate")
# test_whole_subject(Config, model_test, Config.TEST_SUBJECTS, "test")
if Config.GET_PROBS:
exp_utils.make_dir(join(Config.EXP_PATH, "probmaps"))
# all_subjects = Config.TRAIN_SUBJECTS + Config.VALIDATE_SUBJECTS + Config.TEST_SUBJECTS
all_subjects = ["599469", "992774", "994273"]
for subject in all_subjects:
print("Get_probs subject {}".format(subject))
data_loader = DataLoaderInference(Config, subject=subject)
if Config.EXPERIMENT_TYPE == "peak_regression":
# trainerSingle = Trainer(model_test, dataManagerSingle)
img_probs, img_y = trainer.predict_img(Config, model_test, data_loader, probs=True)
img_probs = img_utils.remove_small_peaks(img_probs, len_thr=0.4)
else:
img_probs, img_y = direction_merger.get_seg_single_img_3_directions(Config, model, subject=subject)
img_probs = direction_merger.mean_fusion(Config.THRESHOLD, img_probs, probs=True)
# img_utils.save_multilabel_img_as_multiple_files_peaks(Config, img_probs,
# img_utils.get_dwi_affine(Config.DATASET, Config.RESOLUTION),
# join(Config.EXP_PATH, "probmaps_" + subject))
# img_utils.save_multilabel_img_as_multiple_files(Config, img_probs,
# img_utils.get_dwi_affine(Config.DATASET, Config.RESOLUTION),
# join(Config.EXP_PATH, "probmaps_" + subject))
img = nib.Nifti1Image(img_probs, img_utils.get_dwi_affine(Config.DATASET, Config.RESOLUTION))
nib.save(img, join(Config.EXP_PATH, "probmaps", subject + "_peak.nii.gz"))
eval_script_path = join(os.path.expanduser("~"), "dev/bsp/eval_nonHCP.py")
if os.path.exists(eval_script_path):
print("Evaluating on non HCP data...")
os.system("python " + eval_script_path + " " + Config.EXP_NAME)
if __name__ == '__main__':
main()
|
[
"j.wasserthal@dkfz.de"
] |
j.wasserthal@dkfz.de
|
|
8b6ffcd96094ac867fd890ed8f60f53285d739e4
|
90bce847e6b10f5a71c88231059b3ab8d35d9f0a
|
/4.Talleres/Taller2/EjemploEntradaP3.py
|
adcdf9c734954e3616388812406163ec273048da
|
[] |
no_license
|
smonsalve/ST0240-20162
|
66f929e7e541845df658206d25640367eb03f379
|
74ee1f2c9d2106fb7ec7a736b6f33d70325219b4
|
refs/heads/master
| 2020-04-13T04:02:19.854744
| 2016-10-07T19:27:42
| 2016-10-07T19:27:42
| 61,818,937
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
t = int(raw_input())
def zanb(lista):
impo = 0
### aca es donde ustedes escriben su algoritmo que calcula las tortugas importadas
return impo
for i in range(t):
# a = [int(x) for x in raw_input().split()] #Esta linea hace lo mismo que las siguientes lineas
a = raw_input()
b = a.split()
c = []
for item in b:
d = int(item)
c.append(d)
#print c
print zanb(c)
|
[
"smonsalve@gmail.com"
] |
smonsalve@gmail.com
|
87c550f3b569961d6e961f57d086f3d2d140b32b
|
1f38af9bae11acbe20dd8f5057b374b9760e6659
|
/pyscf/grad/ucisd.py
|
74df32da3f812e9fb33e3a66d1db2e66c274c27d
|
[
"Apache-2.0"
] |
permissive
|
highlight0112/pyscf
|
d36104ef727f593d46fbfd3e5d865c6cd0316d84
|
4afbd42bad3e72db5bb94d8cacf1d5de76537bdd
|
refs/heads/master
| 2020-03-25T01:16:59.927859
| 2019-03-06T01:11:59
| 2019-03-06T01:11:59
| 143,229,588
| 0
| 0
|
Apache-2.0
| 2019-03-06T01:12:00
| 2018-08-02T02:05:59
|
Python
|
UTF-8
|
Python
| false
| false
| 4,303
|
py
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
UCISD analytical nuclear gradients
'''
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.ci import ucisd
from pyscf.grad import cisd as cisd_grad
from pyscf.grad import uccsd as uccsd_grad
def kernel(myci, civec=None, eris=None, atmlst=None, mf_grad=None,
verbose=logger.INFO):
if civec is None: civec = mycc.ci
nocc = myci.nocc
nmo = myci.nmo
d1 = ucisd._gamma1_intermediates(myci, civec, nmo, nocc)
d2 = ucisd._gamma2_intermediates(myci, civec, nmo, nocc)
dovov, dovOV, dOVov, dOVOV = d2[0]
dvvvv, dvvVV, dVVvv, dVVVV = d2[1]
doooo, dooOO, dOOoo, dOOOO = d2[2]
doovv, dooVV, dOOvv, dOOVV = d2[3]
dovvo, dovVO, dOVvo, dOVVO = d2[4]
dvvov, dvvOV, dVVov, dVVOV = d2[5]
dovvv, dovVV, dOVvv, dOVVV = d2[6]
dooov, dooOV, dOOov, dOOOV = d2[7]
nocca, nvira, noccb, nvirb = dovOV.shape
dvvvv = dvvvv + dvvvv.transpose(1,0,2,3)
dvvvv = ao2mo.restore(4, dvvvv, nvira) * .5
dvvVV = dvvVV + dvvVV.transpose(1,0,2,3)
dvvVV = lib.pack_tril(dvvVV[numpy.tril_indices(nvira)]) * .5
dVVVV = dVVVV + dVVVV.transpose(1,0,2,3)
dVVVV = ao2mo.restore(4, dVVVV, nvirb) * .5
d2 = ((dovov, dovOV, dOVov, dOVOV),
(dvvvv, dvvVV, dVVvv, dVVVV),
(doooo, dooOO, dOOoo, dOOOO),
(doovv, dooVV, dOOvv, dOOVV),
(dovvo, dovVO, dOVvo, dOVVO),
(dvvov, dvvOV, dVVov, dVVOV),
(dovvv, dovVV, dOVvv, dOVVV),
(dooov, dooOV, dOOov, dOOOV))
t1 = t2 = l1 = l2 = civec
return uccsd_grad.kernel(myci, t1, t2, l1, l2, eris, atmlst, mf_grad,
d1, d2, verbose)
class Gradients(cisd_grad.Gradients):
def kernel(self, civec=None, eris=None, atmlst=None, mf_grad=None,
verbose=None):
return cisd_grad.Gradients.kernel(self, civec, eris, atmlst, mf_grad,
verbose, _kern=kernel)
Grad = Gradients
ucisd.UCISD.Gradients = lib.class_as_method(Gradients)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
mol = gto.M(
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '631g',
spin = 2,
)
mf = scf.UHF(mol).run()
myci = ucisd.UCISD(mf).run()
g1 = myci.Gradients().kernel()
# O 0.0000000000 -0.0000000000 0.1456473095
# H -0.0000000000 0.1107223084 -0.0728236548
# H 0.0000000000 -0.1107223084 -0.0728236548
print(lib.finger(g1) - -0.22651886837710072)
print('-----------------------------------')
mol = gto.M(
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '631g',
spin = 2,
)
mf = scf.UHF(mol).run()
myci = ucisd.UCISD(mf)
myci.frozen = [0,1,10,11,12]
myci.max_memory = 1
myci.kernel()
g1 = Gradients(myci).kernel()
# O -0.0000000000 -0.0000000000 0.1540204772
# H 0.0000000000 0.1144196177 -0.0770102386
# H 0.0000000000 -0.1144196177 -0.0770102386
print(lib.finger(g1) - -0.23578589551312196)
mol = gto.M(
atom = 'H 0 0 0; H 0 0 1.76',
basis = '631g',
unit='Bohr')
mf = scf.UHF(mol).run(conv_tol=1e-14)
myci = ucisd.UCISD(mf)
myci.conv_tol = 1e-10
myci.kernel()
g1 = Gradients(myci).kernel()
#[[ 0. 0. -0.07080036]
# [ 0. 0. 0.07080036]]
|
[
"osirpt.sun@gmail.com"
] |
osirpt.sun@gmail.com
|
039bf8cf22fcf317f864eef838cb0f33bc452080
|
155fa6aaa4ef31cc0dbb54b7cf528f36743b1663
|
/Exam/project/baked_food/bread.py
|
c8aab8182c29a939a53facfa397544e410d27da2
|
[] |
no_license
|
GBoshnakov/SoftUni-OOP
|
efe77b5e1fd7d3def19338cc7819f187233ecab0
|
0145abb760b7633ca326d06a08564fad3151e1c5
|
refs/heads/main
| 2023-07-13T18:54:39.761133
| 2021-08-27T08:31:07
| 2021-08-27T08:31:07
| 381,711,275
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
from project.baked_food.baked_food import BakedFood
class Bread(BakedFood):
def __init__(self, name, price):
super().__init__(name, 200, price)
|
[
"boshnakov.g@gmail.com"
] |
boshnakov.g@gmail.com
|
60d6d4f3fdc42b1fe667839a45df723147035b3a
|
a058b1a86822a3ebd324908b4cf07529b9bb615b
|
/make_data/gen_data.py
|
8ce63d53a120ace052e493870069b801cb0744e0
|
[] |
no_license
|
powern90/Category-Model
|
8f085640782313de2ec85b75b248374b113944c9
|
664474eccf08d319a58659ab21d8380aeab0b7de
|
refs/heads/main
| 2023-05-08T20:55:55.435135
| 2021-06-07T10:29:11
| 2021-06-07T10:29:11
| 374,625,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,370
|
py
|
from konlpy.tag import Mecab
import pandas as pd
import pymysql
import re
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
metadata = pd.read_csv("metadata_1.tsv", delimiter='\t')
vector = pd.read_csv("vectors_1.tsv", delimiter='\t')
with open('stop_words.csv', 'r', encoding='utf-8') as f:
stop_words = []
while x := f.readline():
stop_words.append(x.replace('\n', ''))
tags = ['NNG', 'NNP', 'NNB', 'XR']
def get_vector(sentence):
selected = []
pos = mecab.pos(
re.sub('<.+?>', '', sentence, 0, re.I | re.S).replace('[', '').replace(']', '').replace('\r', '').replace('\n',
''))
for one in pos:
idx = metadata.index[metadata['word'] == one[0]].tolist()
if one[1] in tags and len(idx) == 1 and one[0] not in stop_words:
selected.append(vector.iloc[idx[0]].tolist())
return selected
if __name__ == "__main__":
data = []
label = []
count = {}
conn = pymysql.connect(host='192.168.1.10', user='gajok', password='1234!', charset='utf8', db='crolls')
cursor = conn.cursor()
sql = "SELECT title, content, etc FROM data_set2"
cursor.execute(sql)
res = cursor.fetchall()
mecab = Mecab()
for one in tqdm(res):
article = get_vector(one[0])
article += get_vector(one[1])
word_count = len(article)
if word_count > 40:
data.append(article[0:40])
elif word_count < 40:
for i in range(0, 40-word_count):
article.append([0 for j in range(0, 16)])
data.append(article)
else:
data.append(article)
if one[2].split(',')[0] == "장애인":
label.append([1, 0, 0, 0, 0])
elif one[2].split(',')[0] == "저소득":
label.append([0, 1, 0, 0, 0])
elif one[2].split(',')[0] == "다문화":
label.append([0, 0, 1, 0, 0])
elif one[2].split(',')[0] == "고령자":
label.append([0, 0, 0, 1, 0])
elif one[2].split(',')[0] == "한부모":
label.append([0, 0, 0, 0, 1])
data = np.array(data)
label = np.array(label)
np.save("data2", data)
np.save("label2", label)
plt.plot(count.values(), count.keys())
plt.show()
|
[
"rlghd1996@gmail.com"
] |
rlghd1996@gmail.com
|
b40d7372d41bbefde5b77bb5572d16513ce31e82
|
eed1701c99b2fdf778f24898c793c77f3dfcd9eb
|
/a32_nbc/mnb_spam.py
|
f96064fdabf37cb723d42c1770a925dcc3cf033f
|
[] |
no_license
|
hnmspirit/mlcb
|
b47eef614229752d0cef1e24336b3c5c39c1e624
|
1a27462053b003c5631526040734e845744a3f76
|
refs/heads/master
| 2020-06-27T21:56:34.282786
| 2020-03-01T05:04:48
| 2020-03-01T05:04:48
| 200,060,596
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 919
|
py
|
import os
from os.path import join
import numpy as np
from scipy.sparse import coo_matrix as cmat
from sklearn.naive_bayes import MultinomialNB as MNB, BernoulliNB as BNB
from sklearn.metrics import accuracy_score
path = 'ex6_prep/'
X_trn_fn = join(path, 'train-features.txt')
y_trn_fn = join(path, 'train-labels.txt')
X_tst_fn = join(path, 'test-features.txt')
y_tst_fn = join(path, 'test-labels.txt')
nwords = 2500
def read_data(X_fn, y_fn):
X = np.loadtxt(X_fn)
y = np.loadtxt(y_fn)
row = X[:,0] - 1
col = X[:,1] - 1
dat = X[:,2]
X = cmat((dat, (row, col)), shape=(len(y), nwords))
return X, y
X_trn, y_trn = read_data(X_trn_fn, y_trn_fn)
X_tst, y_tst = read_data(X_tst_fn, y_tst_fn)
print('train size: ', y_trn.shape)
print('test size: ', y_tst.shape)
model = MNB()
model.fit(X_trn, y_trn)
y_prd = model.predict(X_tst)
score = accuracy_score(y_tst, y_prd)
print('score: ', score)
|
[
"52270404+hnmspirit@users.noreply.github.com"
] |
52270404+hnmspirit@users.noreply.github.com
|
8ead285ef7462584e08b05468fc7e005a53f560f
|
25ebf19f163e707f9adbac81503a65912248c614
|
/CompareView.pyw
|
fad96963b88809b8426bcbade5b5add21f5bfc0d
|
[] |
no_license
|
clalanliu/CompareView
|
373bd2d40f7b7fa15225ab55b263d0d8efebba6b
|
a1f2e4b415d92260323d2c26e599b3a2182775a1
|
refs/heads/master
| 2020-03-18T06:14:51.055388
| 2019-10-07T06:38:14
| 2019-10-07T06:38:14
| 134,383,904
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,562
|
pyw
|
import tkinter as tk
from tkinter import ttk
from tkinter.filedialog import askopenfilename
import glob
import Util, GlobalVar,ImgSeries,ThumbNailSeries
# root and frame container
GlobalVar.width = 540
GlobalVar.height =360
root = tk.Tk()
root.title('CompareView')
root.geometry("540x360")
GlobalVar.DefClr = root.cget("bg")
imgSeri = ImgSeries.ImgSeries(root)
# Thumbnail window
thumbWin = tk.Toplevel(root)
thumbWin.geometry("%dx%d+%d+%d" % (240, root.winfo_screenheight(), root.winfo_screenwidth()-240, 0))
thumbWin.title('Thumbnails')
thumbWin.protocol('WM_DELETE_WINDOW', lambda:None)
thumbWin.resizable(False, False)
thumbNails = ThumbNailSeries.ThumbNailSeries(thumbWin, imgSeri)
### mouse and keyboard event
def HotkeySetRef(event):
imgSeri.SetReference()
def HotkeyMSE(event):
imgSeri.CompareMSE()
def HotkeyPSNR(event):
imgSeri.ComparePSNR()
def HotkeyOpenFile(event):
thumbNails.AddTN(imgSeri.OpenFile())
def LeftKey(event):
imgSeri.Last()
thumbNails.Last()
def RightKey(event):
imgSeri.Next()
thumbNails.Next()
def DeleteKey(event):
imgSeri.Delete()
thumbNails.Delete()
def Reload(event):
imgSeri.Reload()
thumbNails.Reload()
def MouseWheel(event):
global mouseCount
# respond to Linux or Windows wheel event
if event.num == 5 or event.delta == -120:
RightKey(event)
if event.num == 4 or event.delta == 120:
LeftKey(event)
# menu bar
menubar = tk.Menu(root)
filemenu = tk.Menu(menubar, tearoff=0)
compmenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label = 'Open', command = lambda: thumbNails.AddTN(imgSeri.OpenFile()),accelerator='Ctrl+O')
filemenu.add_command(label = 'Reload', command = lambda:Reload,accelerator='R')
filemenu.add_command(label = 'Help', command = Util.HelpCallBack)
filemenu.add_command(label = 'Exit', command = lambda:exit())
menubar.add_cascade(label = 'File', menu = filemenu)
compmenu.add_command(label = 'ZoomIn', command = imgSeri.ZoomIn,accelerator='Z')
compmenu.add_command(label = 'ZoomOut', command = imgSeri.ZoomBack)
compmenu.add_command(label = 'SetReference', command = imgSeri.SetReference,accelerator='S')
compmenu.add_command(label = 'MSE', command = imgSeri.CompareMSE,accelerator='M')
compmenu.add_command(label = 'PSNR', command = imgSeri.ComparePSNR,accelerator='P')
compmenu.add_command(label = 'SSIM', command = imgSeri.CompareSSIM)
menubar.add_cascade(label = 'Compare', menu = compmenu)
root.config(menu=menubar)
### Hot key
root.bind_all('<Control-O>', HotkeyOpenFile)
root.bind_all('<Control-o>', HotkeyOpenFile)
root.bind_all('<S>', HotkeySetRef)
root.bind_all('<s>', HotkeySetRef)
root.bind_all('<M>', HotkeyMSE)
root.bind_all('<m>', HotkeyMSE)
root.bind_all('<P>', HotkeyPSNR)
root.bind_all('<p>', HotkeyPSNR)
### switch
root.bind_all('<Left>', LeftKey)
root.bind_all('<Right>', RightKey)
root.bind_all('<Up>', LeftKey)
root.bind_all('<Down>', RightKey)
root.bind_all('<Delete>', DeleteKey)
root.bind_all('<R>', Reload)
root.bind_all('<r>', Reload)
root.bind_all('<MouseWheel>', MouseWheel)
thumbWin.bind_all('<Left>', LeftKey)
thumbWin.bind_all('<Right>', RightKey)
thumbWin.bind_all('<Up>', LeftKey)
thumbWin.bind_all('<Down>', RightKey)
thumbWin.bind_all('<Delete>', DeleteKey)
thumbWin.bind_all('<R>', Reload)
thumbWin.bind_all('<r>', Reload)
thumbWin.bind_all('<MouseWheel>', MouseWheel)
### zoom
root.bind('<Double-Button-1>', imgSeri.ZoomBack)
root.bind("<z>", imgSeri.ZoomIn)
root.bind("<Z>", imgSeri.ZoomIn)
for i in range(9):
root.bind(str(i), imgSeri.ZoomClone)
root.mainloop()
|
[
"b05901017@ntu.edu.tw"
] |
b05901017@ntu.edu.tw
|
b3242bf14d3c53d2d0cb82d43a20ee2c7a29b957
|
1f0557882b8e545625ad3f6f33bfe62647a010b1
|
/main.py
|
0d5de2199e65b133821b8f6aabdaefe1ab2d90d4
|
[] |
no_license
|
imdeepansht/DRS-Review-System
|
a026368960e5e6ea2e49b7b2d63cd64159996249
|
091675f21043b731e5e6eb1d509f10aff3c70097
|
refs/heads/main
| 2023-06-04T18:30:46.496304
| 2021-06-22T07:53:10
| 2021-06-22T07:53:10
| 379,182,675
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,234
|
py
|
import tkinter
import cv2 # pip install opencv-python
import PIL.Image, PIL.ImageTk # pip install pillow
from functools import partial
import threading
import time
import imutils # pip install imutils
stream = cv2.VideoCapture("VIDEO.mp4")
flag = True
def play(speed):
global flag
print("You clicked on play. Speed is ", speed)
# Play the video in reverse mode
frame1 = stream.get(cv2.CAP_PROP_POS_FRAMES)
stream.set(cv2.CAP_PROP_POS_FRAMES, frame1 + speed)
grabbed, frame = stream.read()
if not grabbed:
exit()
frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)
frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image = frame
canvas.create_image(0, 0, image=frame, anchor=tkinter.NW)
if flag:
canvas.create_text(134, 26, fill="black", font="Times 26 bold", text="Decision Pending")
flag = not flag
def pending(decision):
# 1. Display decision pending image
frame = cv2.cvtColor(cv2.imread("DECISIONPENDING.jpg"), cv2.COLOR_BGR2RGB)
frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)
frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image = frame
canvas.create_image(0, 0, image=frame, anchor=tkinter.NW)
# 2. Wait for 1 second
time.sleep(1.5)
# 4. Wait for 1.5 second
time.sleep(2.5)
# 5. Display out/notout image
if decision == 'out':
decisionImg = ("out.png")
else:
decisionImg = ("NotOut.png")
frame = cv2.cvtColor(cv2.imread(decisionImg), cv2.COLOR_BGR2RGB)
frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)
frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image = frame
canvas.create_image(0, 0, image=frame, anchor=tkinter.NW)
def out():
thread = threading.Thread(target=pending, args=("out",))
thread.daemon = 1
thread.start()
print("Player is out")
cv2.imshow("out.png")
def not_out():
thread = threading.Thread(target=pending, args=("not out",))
thread.daemon = 1
thread.start()
print("Player is not out")
cv2.imshow("NotOut.png")
# Width and height of our main screen
SET_WIDTH = 650
SET_HEIGHT = 368
# Tkinter gui starts here
window = tkinter.Tk()
window.title("DRS System")
cv_img = cv2.cvtColor(cv2.imread("DRS.png"), cv2.COLOR_BGR2RGB)
canvas = tkinter.Canvas(window, width=SET_WIDTH, height=SET_HEIGHT)
photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img))
image_on_canvas = canvas.create_image(0, 0, ancho=tkinter.NW, image=photo)
canvas.pack()
# Buttons to control playback
btn = tkinter.Button(window, text="<< Previous (fast)", width=50, command=partial(play, -25))
btn.pack()
btn = tkinter.Button(window, text="<< Previous (slow)", width=50, command=partial(play, -2))
btn.pack()
btn = tkinter.Button(window, text="Next (slow) >>", width=50, command=partial(play, 2))
btn.pack()
btn = tkinter.Button(window, text="Next (fast) >>", width=50, command=partial(play, 25))
btn.pack()
btn = tkinter.Button(window, text="Give Out", width=50, command=out)
btn.pack()
btn = tkinter.Button(window, text="Give Not Out", width=50, command=not_out)
btn.pack()
window.mainloop()
|
[
"noreply@github.com"
] |
imdeepansht.noreply@github.com
|
2ed84ab34e0aedb307ed52ec2436fdd561b03530
|
949c118b663b626ce8e9966b45226bb506da9939
|
/facebooklikes/__init__.py
|
f80d7b526864ccbe41d34c1ae91d7e776b6832ca
|
[
"MIT"
] |
permissive
|
sloev/facebooklikes
|
6d4b3ecfcea854a72e6fee6829986d639e031359
|
1e9a29db63d5310ed27fdd9527878a53934dec68
|
refs/heads/master
| 2021-07-07T01:38:52.302443
| 2020-09-02T21:43:57
| 2020-09-02T21:43:57
| 170,492,814
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
__author__='johannes.valbjorn+facebooklikes@gmail.com'
__version__='0.1.1'
import urllib.parse
import urllib.request
__unit_lookup = {
'k':1000,
'm':1000000
}
def get_facebook_likes_for_page(page='OfficialEyeHateGod'):
url = "https://www.facebook.com/plugins/like.php?locale=en_US&{}%2F&width=450&layout=standard&action=like&size=small&show_faces=false&share=false&height=35&appId"
args = { 'href': 'https://www.facebook.com/{}'.format(page)}
encoded_args = urllib.parse.urlencode(args)
source = urllib.request.urlopen(url.format(encoded_args)).read().decode('ascii', 'ignore')
likes = source[:source.index('people like')].rsplit('span>', 1)[1].strip().lower()
unit = likes[-1]
if not unit.isdigit():
likes, unit = likes[:-1], likes[-1:]
multiplier = __unit_lookup[unit]
likes = float(likes) * multiplier
return likes
return int(likes)
def get_facebook_likes_from_facebook_url(facebook_url):
if facebook_url.endswith('/'):
facebook_url = facebook_url[:-1]
page = facebook_url.rsplit('/', 1)[1]
return get_facebook_likes_for_page(page)
|
[
"666@nabovarme.dk"
] |
666@nabovarme.dk
|
f240b0a0fff15b9d505a726b8e0ac80da6530f31
|
70739cea3abb3dc45beb71d2df776830dce5e01d
|
/Tagger/taggersql.py
|
6d4ecd7375642ad09bb38c0213d088a61a698a0b
|
[
"MIT"
] |
permissive
|
Jonas-Wennerstrom/Tagger
|
f5b6e6ab6cd0c653631dd74f8ed6c9ad9af62b6f
|
4720304a07e58c7c4d390ff0c0703d936d195187
|
refs/heads/main
| 2023-01-31T19:37:01.111265
| 2020-12-18T08:07:16
| 2020-12-18T08:07:16
| 317,174,833
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,374
|
py
|
from sqlalchemy import insert, select, asc, func, exists
from taggermodels import *
##Selection
#Selects a subset of File
def select_file(session, taglist):
"""Returns a list of all File entries with entries in Match matching
all tags in taglist.
Parameters:
session: An SQLAlchemy database session.
taglist (string list): A list of tag names.
Returns:
q (SQLAlchemy object): Information on entries in table File with
entries in table Match corresponding to entries in table Tag
corresponding to all 'tags' in taglist.
"""
q = session.query(File).join(
File.contains).filter(
Tag.name.in_(taglist)).group_by(
File.title).having(
func.count()==len(taglist)).all()
return q
def get_tags_from_names(session,taglist):
"""Returns all tag objects with Tag.name in taglist.
"""
return session.query(Tag).filter(Tag.name.in_(taglist)).all()
def get_file(session,title):
"""Returns file object with File.title == title.
"""
return session.query(File).filter(
File.title == title).scalar()
def get_all_file_titles(session):
"""Returns all file.title fields in session.
"""
return session.query(File.title).all()
def get_all_tag_names(session):
"""Returns all tag.name fields in session.
"""
return session.query(Tag.name).all()
##Insertion
def insert_file(session, fileinfo, taglist):
"""Inserts fileinfo into table File in session and entries in table
Match for each tag in taglist coupled with the new File entry.
Parameters:
session: An SQLAlchemy database session.
fileinfo (list): Length, Title, Link to be entered into session.
taglist (string list): A list of tag names.
Returns:
True if insertion was successful, else False.
Side-effects:
Entry in table File created with fileinfo. Entries in table
Match created coupling new File entry with each 'tag' in
taglist.
"""
unique = not file_exists(session,fileinfo[1])
if unique:
q = get_tags_from_names(session,taglist)
new_file = File(length=fileinfo[0],
title=fileinfo[1],
link=fileinfo[2])
for t in q:
new_file.contains.append(t)
session.add(new_file)
session.commit()
return True
else:
return False
def insert_tag (session,taglist):
"""Inserts each 'tag' in taglist into table Tag in session.
Parameters:
session: An SQLAlchemy database session.
taglist (string list): A list of tag names.
Returns:
q (list): A sorted list of all tags in taglist which already
exist in table Tag.
Side-effects:
New entries created in table Tag in session for each
non-duplicate tag in taglist.
"""
insert_list = []
skipped_list = []
for new_tag in taglist:
if not tag_exists(session,new_tag):
insert_list.append(new_tag)
else:
skipped_list.append(new_tag)
session.execute(Tag.__table__.insert(),
[{"name": t} for t in insert_list])
session.commit()
return sorted(skipped_list)
##Deletion
def delete_tag(session,taglist):
"""Deletes all 'tags' in taglist from session.
Parameters:
session: An SQLAlchemy database session.
taglist (string list): A list of tag names.
Side-effects:
All entries in table Tag in session with name in taglist
deleted.
All entries in table Match in session matching tags in
taglist deleted.
"""
for t in taglist:
session.query(Tag.name==t).delete()
session.commit()
def delete_file(session,title):
"""Deletes File.title == title from session.
Parameters:
session: An SQLAlchemy database session.
title (string): A title to be deleted.
Side-effects:
Any entry in table File in session with title==title deleted
All entries in table Match in session matching File.title
deleted.
"""
session.query(File).filter(File.title == title).delete()
session.commit()
def cleanup_files(session):
"""Deletes all entries in table File in session without any entry
in table Match.
Parameters:
session: An SQLAlchemy database session.
Side-effects:
All entries in table File whose id do not exist in Match.file_id
deleted.
"""
s = session.query(File.id).filter(~File.contains.any()).all()
if s:
session.execute(File.__table__.delete(),
[{"id": t[0]} for t in s])
session.commit()
##Confirm functions
#These functions check if data exists in db
def file_exists(session,title):
"""Returns true if a file with title == title exists, else false."""
return session.query(exists().where(File.title == title)).scalar()
def tag_exists(session,name):
"""Returns true if a tag with name == name exists, else false"""
return session.query(exists().where(Tag.name == name)).scalar()
|
[
"noreply@github.com"
] |
Jonas-Wennerstrom.noreply@github.com
|
b1160545d138dab92ee143df02286fc7352ef441
|
17a2401c7d61f0c57f94926ba74e58152fc6bf6e
|
/lstmcpipe/config/dl1ab_tuning.py
|
75efcce9e516b678a8c84510149ef5965dadeb49
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
cta-observatory/lstmcpipe
|
f2af8e06e8f1c0b0b1c1c70c83d4fc938ed0e7ce
|
c15c16fa60c437b8b018d2353cc8ea614d1c3677
|
refs/heads/master
| 2023-08-31T13:08:14.954315
| 2023-08-30T08:12:30
| 2023-08-30T08:12:30
| 207,280,754
| 4
| 12
|
MIT
| 2023-09-02T12:05:50
| 2019-09-09T10:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,120
|
py
|
import json
import subprocess
import logging
log = logging.getLogger(__name__)
def create_dl1ab_tuned_config(base_config_path, config_output_path, observed_data_path, mc_path):
"""
Create a new lstchain config with noise parameters added.
If there are noise parameters in the input config,
this will fail with an Exception.
Parameters:
-----------
base_config_path: str or Path-like object
Input config. Will not be overwritten.
config_output_path: str or Path-like object
Where to save the new config file.
observed_data_path: str or Path-like object
Path to a dl1 file with observed data from which to
calculate the parameters.
observed_mc_path: str or Path-like object
Path to a dl1 file with simulated data from which to
calculate the parameters.
Return:
-------
config_output_path
"""
with open(base_config_path) as f:
base = json.load(f)
log.info("Running lstchain_tune_nsb to get the noise tuning parameters")
log.info(f"Comparing mc file {mc_path} and observed file {observed_data_path}")
cmd = [
"lstchain_tune_nsb",
f"--config={base_config_path}",
f"--input-mc={mc_path}",
f"--input-data={observed_data_path}",
]
res = subprocess.run(cmd, capture_output=True)
# Log output is directed to stderr not stdout
log_output = res.stderr.decode()
# The script dumps the dict as the very last output
dict_string = "{" + log_output.rsplit("{", -1)[-1].strip()
# This should now load into json
parameters = json.loads(dict_string)
modifier_settings = base.get("image_modifier", {})
for key, value in parameters.items():
if key in modifier_settings:
log.warning(
f"Overwriting image modifier parameter {key} from the base config with " f"the new value {value}."
)
modifier_settings[key] = value
base["image_modifier"] = modifier_settings
with open(config_output_path, "w") as f:
json.dump(base, f, indent=2)
return config_output_path
|
[
"noreply@github.com"
] |
cta-observatory.noreply@github.com
|
d0add2b46580ce0c3ba728f98b030a8d847cc95a
|
307634b38988a23ecc560e5820ccd0ce02bc326f
|
/hog/tests/05.py
|
aca49047a1922d7256340ce3735456e4c7045f67
|
[] |
no_license
|
wbailey1/CS61A
|
4c6c4b6a71d3f1f3b087646831d9d44f40046acc
|
130c0ff12ad62dbc1a36a9e947092b4f9139122e
|
refs/heads/master
| 2020-03-11T06:14:51.787442
| 2018-04-17T01:06:17
| 2018-04-17T01:06:17
| 129,825,422
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,074
|
py
|
test = {
'name': 'Question 5',
'points': 3,
'suites': [
{
'cases': [
{
'answer': 'While score0 and score1 are both less than goal',
'choices': [
'While score0 and score1 are both less than goal',
'While at least one of score0 or score1 is less than goal',
'While score0 is less than goal',
'While score1 is less than goal'
],
'hidden': False,
'locked': False,
'question': r"""
The variables score0 and score1 are the scores for both
players. Under what conditions should the game continue?
"""
},
{
'answer': 'strategy1(score1, score0)',
'choices': [
'strategy1(score1, score0)',
'strategy1(score0, score1)',
'strategy1(score1)',
'strategy1(score0)'
],
'hidden': False,
'locked': False,
'question': r"""
If strategy1 is Player 1's strategy function, score0 is
Player 0's current score, and score1 is Player 1's current
score, then which of the following demonstrates correct
usage of strategy1?
"""
}
],
'scored': False,
'type': 'concept'
},
{
'cases': [
{
'code': r"""
>>>
>>> # Play function stops at goal
>>> s0, s1 = hog.play(always(5), always(3), score0=91, score1=10)
>>> s0
106
>>> s1
10
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>>
>>> # Goal score is not hardwired
>>> s0, s1 = hog.play(always(5), always(5), goal=10)
>>> s0
0
>>> s1
20
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>>
>>> # Swine Swap
>>> s0,s1 = hog.play(always(5), always(3), score0=36, score1=15, goal=50)
>>> s0
15
>>> s1
51
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import hog
>>> hog.four_sided = hog.make_test_dice(1)
>>> hog.six_sided = hog.make_test_dice(3)
>>> always = hog.always_roll
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>>
>>> # Swine swap applies to 3 digit scores
>>> s0, s1 = hog.play(always(5), always(3), score0=98, score1=31)
>>> s0
31
>>> s1
113
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>>
>>> # Goal edge case
>>> s0, s1 = hog.play(always(4), always(3), score0=88, score1=20)
>>> s0
100
>>> s1
20
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>>
>>> # Player 1 win
>>> s0, s1 = hog.play(always(4), always(4), score0=87, score1=88)
>>> s0
87
>>> s1
104
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>>
>>> # Swine swap applies during Player 1 turn
>>> s0, s1 = hog.play(always(3), always(5), score0=22, score1=98)
>>> s0
113
>>> s1
31
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Check strategies are actually used correctly
>>> strat0 = lambda score, opponent: opponent % 10
>>> strat1 = lambda score, opponent: opponent // 10
>>> s0, s1 = hog.play(strat0, strat1, score0=40, score1=92)
>>> s0
46
>>> s1
104
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>>
>>> # Free bacon refers to correct opponent score
>>> s0, s1 = hog.play(always(0), always(0), score0=11, score1=99)
>>> s0
21
>>> s1
104
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import hog
>>> always = hog.always_roll
>>> hog.four_sided = hog.make_test_dice(1)
>>> hog.six_sided = hog.make_test_dice(3)
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> random.seed(1337)
>>> i = 0
>>> secret = 1
>>> while i < 100:
... s0, s1 = hog.play(make_random_strat(), make_random_strat())
... secret = pow(s0 * s1, secret, 1234567891)
... i += 1
>>> secret
477587826
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> # Fuzz Testing
>>> # Plays a lot of random games, and calculates a secret value.
>>> # Failing this test means something is wrong, but you should
>>> # look at other tests to see where the problem might be.
>>> # Hint: make sure you're only calling take_turn once per turn!
>>>
>>> import hog
>>> import random
>>> hog.four_sided = lambda: random.randrange(1, 5)
>>> hog.six_sided = lambda: random.randrange(1, 7)
>>> def make_random_strat():
... seed = random.randint(1, 65535)
... def random_strat(score, opponent_score):
... return pow(score * opponent_score, seed, 11)
... return random_strat
""",
'teardown': r"""
>>> random.seed()
""",
'type': 'doctest'
}
]
}
|
[
"williambailey97@berkeley.edu"
] |
williambailey97@berkeley.edu
|
a20f3c7273117c730c78f820a3df633014a81ed4
|
a4de57b6d17fb35cde263c025ddb5ef93a5c3475
|
/src/robot_emulator/scripts/gate.py
|
1896c4c3736caf3ae65338a36fcc84ac442cfd35
|
[] |
no_license
|
ericdanz/simpleRosG
|
99264dd4a7c2c49128cb2dc2200961799baeabd1
|
d06f81ff36fd798c859535eb2640e4ed89aabb06
|
refs/heads/master
| 2020-06-03T08:55:22.013337
| 2014-07-09T03:42:49
| 2014-07-09T03:42:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,165
|
py
|
#!/usr/bin/env python
from robot_emulator.msg import *
from modulemodel import *
import moduleconnection as mc
from geometry_msgs.msg import Twist
import rospy
import time, serial, sys
import maketranslations as mt
class Gate:
def __init__(self, gnumber=0):
rospy.Subscriber('reqs', Request, self.parseReq)
self.number = gnumber
self.module = Module()
#is gnumber going to be port number as well?
port = '/dev/ttyACM{}'.format(self.number)
self.serPer = serial.Serial(port, baudrate=57600, timeout=.4)
def parseReq(self,data):
rospy.loginfo('this is parse Req')
thisReq = data.request
if thisReq == 'boot':
rospy.loginfo(thisReq)
self.bootResponder()
if (self.module.mtype == "locomotion"):
print 'gate is locomotion'
rospy.Subscriber('locomotionInputs', Twist, self.doInput)
#set up elifs eventually
#else:
# print "didn't get the type{}".format mc.readunreliable('b#',self.number))
def bootResponder(self):
rospy.loginfo('inside boot responder')
if ( mc.bootModule(self.serPer) == 'l' ):
self.module.settype( "locomotion" )
print self.module.mtype
bootPub = rospy.Publisher('boot', BootResponse, queue_size=1, latch=True)
#need a name inside the boot message, so this module will
#be able to identify messages sent to itself
bootString = BootResponse()
bootString.gatenumber = self.number
bootString.gatetype = self.module.mtype
bootPub.publish(bootString)
def doInput(self,data):
#check the name on the input, if it matches this module
#do the input if possible or publish an error
#inputs will become custom type - gatenumber, gatetype, and Twist
#will add gate names later
#print 'at input'
#need more efficiency - only numbers transmitted
#format is lx,ly,lz/ax,ay,az
inputString = mt.makeLocInput(data)
#inputString = 'i/{},{}#'.format(data.linear.x, data.angular.z)
rospy.loginfo('instring {}'.format(inputString))
rospy.loginfo( mc.readunreliable(inputString, self.serPer) )
#mc.sendblind(inputString, self.serPer)
if __name__ == '__main__':
rospy.init_node('gate', anonymous=True)
gate = Gate(0)
rospy.loginfo("Gate Node Started")
rospy.spin()
|
[
"ericdanziger@gmail.com"
] |
ericdanziger@gmail.com
|
55108395c637ef48281003c0edffe500a4968d13
|
5d45364c56d80ceb63d4cea3bf37d2b7bc20710b
|
/Q4.py
|
b649ae2824326cae3430b0845188264e3a6e7197
|
[] |
no_license
|
Anamikaswt/file
|
d3dcfee9817b6934c19f432287087e89c15750aa
|
7f870602e58acce71b013ba8ac60393516d45c05
|
refs/heads/master
| 2023-03-28T00:51:31.679507
| 2021-04-02T16:04:46
| 2021-04-02T16:04:46
| 354,066,693
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
file1=open("question3.txt","r")
for i in file1:
if "delhi" in i:
a=open("delhi.txt","a")
a.write(i)
elif "shimla" in i:
e=open("simla.txt","a")
e.write(i)
else:
p=open("other city.txt","a")
p.write(i)
|
[
"you@example.com"
] |
you@example.com
|
5cb325c3f33fcb2a31c628b28503a26b01429328
|
5d81c0293fc118fb15a5ba8dfd5a61689967c86b
|
/script.py
|
647580da0c26a2a879e0dc212968750abb396a74
|
[] |
no_license
|
NatashaSay/car_price
|
5b0c5268decc376d499a1f531e2c06dec087b05a
|
4a9f7862e3d6bf3671f849bf56dd05b5fe98a9b1
|
refs/heads/main
| 2023-02-16T20:02:15.471534
| 2021-01-15T17:44:51
| 2021-01-15T17:44:51
| 329,974,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,038
|
py
|
import os
from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import xgboost
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import make_scorer, r2_score
import warnings
warnings.filterwarnings("ignore")
import joblib
# Year
def change_years(df):
df = df.copy()
for var in df['registration_year']:
if var < 20:
df['registration_year'] = df['registration_year'].replace(var, var + 2000)
elif var < 100:
df['registration_year'] = df['registration_year'].replace(var, var + 1900)
return df
# Variable transformation
def change_power(train, test):
df = train.copy()
df1 = test.copy()
df['power'] = df['power'].replace(0, np.nan)
mode = df['power'].mode(True)
df['power'] = df['power'].replace(np.nan, int(mode))
df1['power'] = df1['power'].replace(0, np.nan)
mode1 = df1['power'].mode(True)
df1['power'] = df1['power'].replace(np.nan, int(mode1))
return df, df1
def change_capacity(train, test):
df = train.copy()
df1 = test.copy()
df['engine_capacity'] = df['engine_capacity'].replace(0, np.nan)
mode = df['engine_capacity'].mode(True)
print(int(mode))
df['engine_capacity'] = df['engine_capacity'].replace(np.nan, int(mode))
df1['engine_capacity'] = df1['engine_capacity'].replace(0, np.nan)
mode1 = df1['engine_capacity'].mode(True)
df1['engine_capacity'] = df1['engine_capacity'].replace(np.nan, int(mode1))
return df, df1
def find_frequent_labels(df, var, rare_perc):
df = df.copy()
tmp = df.groupby(var)['price'].count() / len(df)
return tmp[tmp > rare_perc].index
def replace_categories(train, test, var, target, test_data):
ordered_labels = train.groupby([var])[target].mean().sort_values().index
ordinal_label = {k: i for i, k in enumerate(ordered_labels, 0)}
train[var] = train[var].map(ordinal_label)
test[var] = test[var].map(ordinal_label)
test_data[var] = test_data[var].map(ordinal_label)
def test_model(model, X_train, y_train):
cv = KFold(n_splits=3, shuffle=True, random_state=45)
r2 = make_scorer(r2_score)
r2_val_score = cross_val_score(model, X_train, y_train, cv=cv, scoring=r2)
score = [r2_val_score.mean()]
return score
# Data
def pred():
data = pd.read_csv('./tables/train.csv')
test_data = pd.read_csv('./tables/test_no_target1.csv')
# Splitting
X_train, X_test, y_train, y_test = train_test_split(data, data['price'],
test_size=0.1,
random_state=0)
# Missing values
vars_with_na = [
var for var in data.columns
if X_train[var].isnull().sum() > 0 and X_train[var].dtypes == 'O'
if test_data[var].isnull().sum() > 0 and test_data[var].dtypes == 'O'
]
X_train[vars_with_na] = X_train[vars_with_na].fillna('Missing')
X_test[vars_with_na] = X_test[vars_with_na].fillna('Missing')
test_data[vars_with_na] = test_data[vars_with_na].fillna('Missing')
test_data.head()
X_train = change_years(X_train)
test_data = change_years(test_data)
# Numerical values
vars_with_na = [
var for var in data.columns
if X_train[var].isnull().sum() > 0 and X_train[var].dtypes != 'O'
if test_data[var].isnull().sum() > 0 and test_data[var].dtypes != 'O'
]
X_train[vars_with_na].isnull().mean()
for var in vars_with_na:
mode_val = X_train[var].mode()[0]
mode_val_test = test_data[var].mode()[0]
X_train[var + '_na'] = np.where(X_train[var].isnull(), 1, 0)
X_test[var + '_na']=np.where(X_test[var].isnull(), 1, 0)
test_data[var + '_na']=np.where(test_data[var].isnull(), 1, 0)
X_train[var] = X_train[var].fillna(mode_val)
X_test[var] = X_test[var].fillna(mode_val)
test_data[var] = test_data[var].fillna(mode_val_test)
X_train, X_test = change_power(X_train, X_test)
test_data, X_test = change_power(test_data, X_test)
X_train, X_test = change_capacity(X_train, X_test)
test_data, X_test = change_capacity(test_data, X_test)
for var in ['engine_capacity', 'insurance_price', 'price', 'power', 'mileage']:
X_train[var] = np.log(X_train[var])
X_test[var] = np.log(X_test[var])
#
for var in ['engine_capacity', 'insurance_price', 'power', 'mileage']:
test_data[var] = np.log(test_data[var])
# Categorial variables
cat_vars = [var for var in X_train.columns if X_train[var].dtype == 'O']
for var in cat_vars:
frequent_ls = find_frequent_labels(X_train, var, 0.01)
X_train[var] = np.where(X_train[var].isin(frequent_ls), X_train[var], 'Rare')
X_test[var] = np.where(X_test[var].isin(frequent_ls), X_test[var], 'Rare')
test_data[var] = np.where(test_data[var].isin(frequent_ls), test_data[var], 'Rare')
for var in cat_vars:
replace_categories(X_train, X_test, var, 'price', test_data)
# Feature scaling
train_vars = [var for var in X_train.columns if var not in ['Id', 'price', 'zipcode']]
np.any(np.isnan(X_test))
np.all(np.isfinite(X_test))
scaler = MinMaxScaler()
scaler.fit(X_train[train_vars])
X_train[train_vars] = scaler.transform(X_train[train_vars])
X_test[train_vars] = scaler.transform(X_test[train_vars])
test_data[train_vars] = scaler.transform(test_data[train_vars])
X_train.to_csv('./tables/xtrain.csv', index=False)
X_test.to_csv('./tables/xtest.csv', index=False)
test_data.to_csv('./tables/test_data.csv', index=False)
# Step 2
X_train = pd.read_csv('./tables/xtrain.csv')
X_test = pd.read_csv('./tables/xtest.csv')
y_train = X_train['price']
y_test = X_test['price']
features = ['type', 'registration_year', 'gearbox', 'power', 'model', 'mileage',
'fuel', 'brand', 'damage', 'insurance_price']
X_train = X_train[features]
X_test = X_test[features]
xgb2_reg=xgboost.XGBRegressor(n_estimators=899,
mon_child_weiht=2,
max_depth=4,
learning_rate=0.05,
booster='gbtree')
test_model(xgb2_reg, X_train, y_train)
test_data = pd.read_csv('./tables/test_data.csv')
test_data_id = pd.read_csv('./tables/test_data.csv')
test_data = test_data[features]
xgb2_reg.fit(X_train, y_train)
y_pred = np.exp(xgb2_reg.predict(test_data))
submit_test = pd.concat([test_data_id['Id'], pd.DataFrame(y_pred)], axis=1)
submit_test.columns=['Id', 'Predicted']
submit_test.to_csv('./tables/submission1.csv', index=False)
file = f'v1-{datetime.now()}.model'.replace(' ', '_')
joblib.dump(xgb2_reg, 'models/' + file)
return y_pred
pred()
|
[
"nataliia.saichyshyna@nure.ua"
] |
nataliia.saichyshyna@nure.ua
|
8fbc882f62ae45fbd343faf3abb3bef1abf2a11d
|
8814c911f8ad490b712b3235d787abc8e930a2fc
|
/manage.py
|
fa6c0b453395ef88114c8c7d09a6a5812e90f514
|
[] |
no_license
|
ronak007mistry/Django-Money-Management
|
6094890564796912e5229d0a16009b02ff5077dc
|
6e11a7b819f0443a60e5b0f5bf451059b04aadb7
|
refs/heads/main
| 2023-01-25T03:29:11.944350
| 2020-12-13T19:44:43
| 2020-12-13T19:44:43
| 320,367,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'expenseapp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"59952103+ronak007mistry@users.noreply.github.com"
] |
59952103+ronak007mistry@users.noreply.github.com
|
a7619f22d79e104f13f14f5ad73685679f20016d
|
00c9f4c7e371fc8e48295a85aca5d347ae943fe2
|
/AviationSite/home/urls.py
|
3774752af6c9789a570b2cc1456e2316eb20ab02
|
[] |
no_license
|
Marlster/AviationWebsite
|
19d06baf01efc305f580013524c5a060b6940171
|
2392405439fb406c70b386d3a72bba17be4aafd2
|
refs/heads/master
| 2021-06-19T15:27:50.149194
| 2019-11-06T23:08:47
| 2019-11-06T23:08:47
| 154,491,695
| 1
| 0
| null | 2021-06-10T21:11:54
| 2018-10-24T11:46:46
|
Python
|
UTF-8
|
Python
| false
| false
| 288
|
py
|
from django.conf.urls import url
from django.urls import path
from django.views.generic import RedirectView
from . import views
urlpatterns = [
# a blank url displays the home page
url(r'^success$', views.success),
url(r'^trials$', views.trials),
url(r'^$', views.home)
]
|
[
"marley7898@gmail.com"
] |
marley7898@gmail.com
|
39aa9b9ea6a9922048aef593dc626aed1f30a5eb
|
95db0a2b870a4eeb2ff906213e6038337c877e1e
|
/Module_system 1.134/header_mission_templates.py
|
05843a7abcae6afba6e16f183454ef7f4e7c4bc1
|
[] |
no_license
|
mbw-economy/Economy-Mod-for-Mount-and-Blade--Warband
|
1a2e3e79d1e9a153b078011e2c8c9d4b4b5814f5
|
0c8109dc44226d451005aadce12ef8584f122979
|
refs/heads/master
| 2021-01-13T16:14:33.383950
| 2011-05-17T02:06:00
| 2011-05-17T02:06:00
| 1,758,841
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,079
|
py
|
###################################################
# header_mission_templates.py
# This file contains declarations for mission templates
# DO NOT EDIT THIS FILE!
###################################################
from header_common import *
from header_operations import *
from header_triggers import *
from header_troops import *
from ID_scenes import *
from ID_items import *
from ID_strings import *
from header_mission_types import *
aif_group_bits = 0
aif_group_mask = 0xF
aif_start_alarmed = 0x00000010
grc_infantry = 0
grc_archers = 1
grc_cavalry = 2
grc_heroes = 3
grc_everyone = 9
mordr_hold = 0
mordr_follow = 1
mordr_charge = 2
mordr_mount = 3
mordr_dismount = 4
mordr_advance = 5
mordr_fall_back = 6
mordr_stand_closer = 7
mordr_spread_out = 8
mordr_use_blunt_weapons = 9
mordr_use_any_weapon = 10
mordr_stand_ground = 11
mordr_hold_fire = 12
mordr_fire_at_will = 13
mordr_retreat = 14
rordr_free = 0
rordr_mount = 1
rordr_dismount = 2
wordr_use_any_weapon = 0
wordr_use_blunt_weapons = 1
aordr_fire_at_will = 0
aordr_hold_your_fire = 1
# Agent AI Simple Behaviors
aisb_hold = 0
aisb_go_to_pos = 1
aisb_mount = 2
aisb_dismount = 3
aisb_melee = 4
aisb_ranged = 5
aisb_ranged_horseback = 6
aisb_charge_horseback = 7
aisb_maneuver_horseback = 8
aisb_flock = 9
aisb_race = 10
aisb_patrol = 11
aisb_no_enemies = 12
aisb_horse_hold = 13
aisb_horse_run_away = 14
# filter flags
mtef_enemy_party = 0x00000001
mtef_ally_party = 0x00000002
mtef_scene_source = 0x00000004
mtef_conversation_source = 0x00000008
mtef_visitor_source = 0x00000010
mtef_defenders = 0x00000040
mtef_attackers = 0x00000080
mtef_no_leader = 0x00000100
mtef_no_companions = 0x00000200
mtef_no_regulars = 0x00000400
#mtef_team_0 = 0x00001000
mtef_team_0 = 0x00001000
mtef_team_1 = 0x00002000
mtef_team_2 = 0x00003000
mtef_team_3 = 0x00004000
mtef_team_4 = 0x00005000
mtef_team_5 = 0x00006000
mtef_team_member_2 = 0x00008000
mtef_infantry_first = 0x00010000
mtef_archers_first = 0x00020000
mtef_cavalry_first = 0x00040000
mtef_no_auto_reset = 0x00080000
mtef_reverse_order = 0x01000000
mtef_use_exact_number = 0x02000000
mtef_leader_only = mtef_no_companions | mtef_no_regulars
mtef_regulars_only = mtef_no_companions | mtef_no_leader
#alter flags
af_override_weapons = 0x0000000f
af_override_weapon_0 = 0x00000001
af_override_weapon_1 = 0x00000002
af_override_weapon_2 = 0x00000004
af_override_weapon_3 = 0x00000008
af_override_head = 0x00000010
af_override_body = 0x00000020
#af_override_leg = 0x00000040
af_override_foot = 0x00000040
af_override_gloves = 0x00000080
af_override_horse = 0x00000100
af_override_fullhelm = 0x00000200
#af_override_hands = 0x00000100
af_require_civilian = 0x10000000
#af_override_all_but_horse = 0x000000ff
af_override_all_but_horse = af_override_weapons | af_override_head | af_override_body |af_override_gloves
af_override_all = af_override_horse | af_override_all_but_horse
af_override_everything = af_override_all | af_override_foot
requires_third_party = 0x00000001
#mission template flags. also in mission_template.h
#use only the lower 12 bits. Upper 20 is taken up by xsize and ysize.
mtf_arena_fight = 0x00000001 #identify enemies through team_no
mtf_team_fight = 0x00000001 #identify enemies through team_no
mtf_battle_mode = 0x00000002 #No inventory access
mtf_commit_casualties = 0x00000010
mtf_no_blood = 0x00000100
max_size = 1023
xsize_bits = 12
ysize_bits = 22
def xsize(n):
return (n & max_size) << xsize_bits
def ysize(n):
return (n & max_size) << ysize_bits
#Mission result flags. also in mission.h
mc_loot = 0x0001
mc_imprison_unconscious = 0x0002
|
[
"Nicholas@.(none)"
] |
Nicholas@.(none)
|
257c12eb61d44015157ebd96fee8c5366c00f03d
|
6e6a082325a9b38ce9b5258e6171a776636fdd68
|
/turtle-snake/snake.py
|
846ca34299b846e2bf22eef0a342ee8ee61b6b02
|
[] |
no_license
|
bunja/python-100days
|
5d8d0e7739e642ab16289c6a1b371776a761370a
|
494e945a9e5d4cecff6583f5851ee18564a1f2e9
|
refs/heads/main
| 2023-08-30T17:12:41.244183
| 2021-11-04T13:45:28
| 2021-11-04T13:45:28
| 369,013,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
from turtle import Turtle
STARTING_POSITION = [(0, 0), (-20, 0), (-40, 0)]
MOVE_DIST = 20
UP = 90
DOWN = 270
RIGHT = 0
LEFT = 180
class Snake:
def __init__(self):
self.segments = []
self.create_snake()
self.head = self.segments[0]
def create_snake(self):
for pos in STARTING_POSITION:
self.add_segment(pos)
def add_segment(self, pos):
sq = Turtle("square")
sq.speed("slowest")
sq.color("white")
sq.penup()
sq.goto(pos)
self.segments.append(sq)
def extend(self):
self.add_segment(self.segments[-1].position())
def reset(self):
for seg in self.segments:
seg.goto(1000, 1000)
self.segments.clear()
self.create_snake()
self.head = self.segments[0]
def move(self):
for seg in range(len(self.segments) - 1, 0, -1):
new_x = self.segments[seg - 1].xcor()
new_y = self.segments[seg - 1].ycor()
self.segments[seg].goto(new_x, new_y)
self.head.forward(MOVE_DIST)
def up(self):
if self.head.heading() != DOWN:
self.head.setheading(UP)
def down(self):
if self.head.heading() != UP:
self.head.setheading(DOWN)
def left(self):
if self.head.heading() != RIGHT:
self.head.setheading(LEFT)
def right(self):
if self.head.heading() != LEFT:
self.head.setheading(RIGHT)
|
[
"kos.lv@yandex.com"
] |
kos.lv@yandex.com
|
1157bbc242bff1549e9e91e11094f5ef9e248a3a
|
fccb4ee993e9164fc3ca111505141f00c79a5534
|
/source code/session 2 - Text preprocessing/2-2-4-inception.py
|
1439f3e04cd20b2f42d95fc72544fd01a649e825
|
[] |
no_license
|
buomsoo-kim/Sentiment-Analysis-with-Python
|
de8c99ceb9001c9ddb2483c08e4c150b605dfbc9
|
3281f8a4556cffc862a18a00dad56e92129e5bec
|
refs/heads/master
| 2021-07-03T13:15:02.239720
| 2017-09-25T02:09:26
| 2017-09-25T02:09:26
| 103,010,880
| 21
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,930
|
py
|
# 텍스트 분석을 위해 nltk 모듈을 불러온다
import nltk
# nltk 모듈에서 Stopwords를 직접 불러온다
from nltk.corpus import stopwords
# nltk의 WordNetLemmatizer를 불러와 lemmatizer 변수에 저장한다
lemmatizer = nltk.wordnet.WordNetLemmatizer()
# 영어의 stopwords를 불러와 변수에 저장한다
stopWords = stopwords.words('english')
# open 함수를 통해 'result-1-3-5-inception.txt' 파일을 열고 이를 f로 지정한다
with open('result-1-3-5-inception.txt', 'r', encoding = 'utf-8') as f: # 읽기 형식('r')로 지정하고 인코딩은 'utf-8'로 설정한다
lines = f.readlines() # readlines 함수로 텍스트 파일의 내용을 읽어 리스트로 저장한다
f.close() # 파일을 닫는다
reviewProcessedList = [] # 처리된 결과를 담기 위한 리스트를 생성한다
# for문을 통해 각 줄을 전처리한다
for line in lines:
reviewProcessed = '' # 한 줄을 전처리한 결과를 담기 위한 String 변수 생성
tokens = nltk.word_tokenize(line) # 리뷰를 tokenize한다
for token in tokens:
if token.lower() not in stopWords: # 소문자로 변환한 token이 stopwords에 없으면:
reviewProcessed += ' ' + lemmatizer.lemmatize(token) # lemmatize한 붙인다
reviewProcessedList.append(reviewProcessed) # 전처리가 끝난 리뷰를 리스트에 첨부한다
# open 함수를 통해 'result-2-2-4-inception.txt' 파일을 열고 이를 f로 지정한다
with open('result-2-2-4-inception.txt', 'w', encoding = 'utf-8') as f: # 쓰기 형식('w')로 지정하고 인코딩은 'utf-8'로 설정한다
for reviewProcessed in reviewProcessedList: # 각각의 리뷰를 파일에 쓴다
f.write(reviewProcessed + '\n') # 각 리뷰를 줄바꿈('\n')으로 구분한다
f.close() # 파일을 닫는다
|
[
"noreply@github.com"
] |
buomsoo-kim.noreply@github.com
|
500ff4e5900396a5fbb390f58f1a83465f1e71c7
|
b6ede31e37d504b5c60d5b5e95d502808d868659
|
/src/svs/user_interaction.py
|
64e8b8768ecbb1bcf8d5e1d2fb865f564b6614f5
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
licehammer/svs
|
6c45530d947299f883e42bc9af3eaf6814979a03
|
b4e6af8e97761d13256e8ecd67ee5ba6f1dbd063
|
refs/heads/master
| 2021-01-18T18:49:51.454366
| 2016-06-14T14:38:23
| 2016-06-14T14:38:41
| 61,113,469
| 0
| 0
| null | 2016-06-14T10:22:48
| 2016-06-14T10:22:48
| null |
UTF-8
|
Python
| false
| false
| 2,137
|
py
|
import cherrypy
from mako.lookup import TemplateLookup
import pkg_resources
from svs.i18n_tool import ugettext_lazy as N_
__author__ = 'regu0004'
LOOKUP = TemplateLookup(directories=[pkg_resources.resource_filename("svs", "templates")], module_directory='modules/',
input_encoding='utf-8', output_encoding='utf-8',
imports=["from svs.i18n_tool import ugettext as _"])
class EndUserErrorResponse(cherrypy.HTTPError):
def __init__(self, timestamp, uid, message, form_action="/error"):
error = {
"uid": uid,
"timestamp": self._format_timestamp(timestamp),
"message": message,
}
argv = {
"error": error,
"language": cherrypy.response.i18n.locale.language,
"form_action": form_action
}
self.error_page = LOOKUP.get_template("error.mako").render(**argv)
super(EndUserErrorResponse, self).__init__(400, self.error_page)
def _format_timestamp(self, timestamp):
return str(timestamp)
def get_error_page(self, *args, **kwargs):
return self.error_page
class ConsentPage(object):
"""
Render the consent page.
"""
TEMPLATE = "consent.mako"
@classmethod
def render(cls, client_name, idp_entity_id, released_claims, relay_state, form_action="/consent"):
client_name_display_string = "<strong>'{}'</strong>".format(client_name)
question = N_("{client_name} requires the information below to be transferred:").format(
client_name=client_name_display_string)
state = {
"idp_entity_id": idp_entity_id,
"state": relay_state,
}
return LOOKUP.get_template(cls.TEMPLATE).render(consent_question=question,
released_claims=released_claims,
state=state,
form_action=form_action,
language=cherrypy.response.i18n.locale.language)
|
[
"rebecka.gulliksson@umu.se"
] |
rebecka.gulliksson@umu.se
|
f3215469c3db9f1e25a2cce29c6512d71545aeed
|
783a300adcca3f7bbba47a786b0a557c6ada705e
|
/MPUtils/umxnet/transform.py
|
a7a5aa6120b79e105dbb9d97832ba3568ea5c892
|
[] |
no_license
|
yinglang/MPUtils
|
81e1dc1012d4845a6bdabde662bd5282e7adab41
|
79329292c9454ea29d8e1a4658846124076b1003
|
refs/heads/master
| 2021-07-12T09:58:40.338541
| 2019-02-26T13:37:14
| 2019-02-26T13:37:14
| 150,934,903
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,233
|
py
|
from mxnet import nd
from random import uniform, randint
import math
class RandomPadCrop:
def __init__(self, pad):
"""
pad: tuple of (lh, rh, lw, rw) of pading length
"""
self.pad = pad
self.random_range = nd.array([pad[0]+pad[1], pad[2]+pad[3]])
def __call__(self, data):
pad, h, w = self.pad, data.shape[1], data.shape[2]
data = data.expand_dims(axis=0).pad(mode="constant", constant_value=0,
pad_width=(0, 0, 0, 0, pad[0], pad[1], pad[2], pad[3]))
x0, y0 = (nd.random.uniform(shape=(2,)) * self.random_range).astype('uint8')
x0, y0 = x0.asscalar(), y0.asscalar()
return data[0, :, x0:x0+h, y0:y0+w]
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
used after transforms.ToTensor and transforms.Normalize
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability = 0.5, sl = 0.02, sh = 0.4, r1 = 0.3):
self.probability = probability
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if uniform(0, 1) > self.probability:
return img
for attempt in range(100):
area = img.shape[1] * img.shape[2]
target_area = uniform(self.sl, self.sh) * area
aspect_ratio = uniform(self.r1, 1/self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.shape[2] and h < img.shape[1]:
x1 = randint(0, img.shape[1] - h)
y1 = randint(0, img.shape[2] - w)
img[:, x1:x1+h, y1:y1+w] = 0
return img
return img
# from gluoncv.data.transforms.presets import experimental
# import gluoncv.data.transforms.presets.experimental as experimental
from gluoncv.data.transforms import presets
import numpy as np
from mxnet import nd
import mxnet as mx
import random
def random_crop_with_constraints(bbox, size, min_scale=0.3, max_scale=1,
max_aspect_ratio=2, constraints=None,
max_trial=50):
"""Crop an image randomly with bounding box constraints.
This data augmentation is used in training of
Single Shot Multibox Detector [#]_. More details can be found in
data augmentation section of the original paper.
.. [#] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy,
Scott Reed, Cheng-Yang Fu, Alexander C. Berg.
SSD: Single Shot MultiBox Detector. ECCV 2016.
Parameters
----------
bbox : numpy.ndarray
Numpy.ndarray with shape (N, 4+) where N is the number of bounding boxes.
The second axis represents attributes of the bounding box.
Specifically, these are :math:`(x_{min}, y_{min}, x_{max}, y_{max})`,
we allow additional attributes other than coordinates, which stay intact
during bounding box transformations.
size : tuple
Tuple of length 2 of image shape as (width, height).
min_scale : float
The minimum ratio between a cropped region and the original image.
The default value is :obj:`0.3`.
max_scale : float
The maximum ratio between a cropped region and the original image.
The default value is :obj:`1`.
max_aspect_ratio : float
The maximum aspect ratio of cropped region.
The default value is :obj:`2`.
constraints : iterable of tuples
An iterable of constraints.
Each constraint should be :obj:`(min_iou, max_iou)` format.
If means no constraint if set :obj:`min_iou` or :obj:`max_iou` to :obj:`None`.
If this argument defaults to :obj:`None`, :obj:`((0.1, None), (0.3, None),
(0.5, None), (0.7, None), (0.9, None), (None, 1))` will be used.
max_trial : int
Maximum number of trials for each constraint before exit no matter what.
Returns
-------
numpy.ndarray
Cropped bounding boxes with shape :obj:`(M, 4+)` where M <= N.
tuple
Tuple of length 4 as (x_offset, y_offset, new_width, new_height).
"""
# default params in paper
if constraints is None:
constraints = (
(0.1, None),
(0.3, None),
(0.5, None),
(0.7, None),
(0.9, None),
(None, 1),
)
if len(bbox) == 0:
constraints = []
w, h = size
candidates = [(0, 0, w, h)]
for min_iou, max_iou in constraints:
min_iou = -np.inf if min_iou is None else min_iou
max_iou = np.inf if max_iou is None else max_iou
for _ in range(max_trial):
scale = random.uniform(min_scale, max_scale)
aspect_ratio = random.uniform(
max(1 / max_aspect_ratio, scale * scale),
min(max_aspect_ratio, 1 / (scale * scale)))
crop_h = int(h * scale / np.sqrt(aspect_ratio))
crop_w = int(w * scale * np.sqrt(aspect_ratio))
crop_t = random.randrange(h - crop_h)
crop_l = random.randrange(w - crop_w)
crop_bb = np.array((crop_l, crop_t, crop_l + crop_w, crop_t + crop_h))
# below 3 line add by hui
if len(bbox) == 0:
candidates.append((left, top, right-left, bottom-top))
break
iou = presets.ssd.experimental.bbox.bbox_iou(bbox, crop_bb[np.newaxis])
if min_iou <= iou.min() and iou.max() <= max_iou:
top, bottom = crop_t, crop_t + crop_h
left, right = crop_l, crop_l + crop_w
candidates.append((left, top, right-left, bottom-top))
break
# random select one
while candidates:
crop = candidates.pop(np.random.randint(0, len(candidates)))
if len(bbox) == 0:
return bbox, crop
new_bbox = presets.ssd.experimental.bbox.bbox_crop(bbox, crop, allow_outside_center=False)
if new_bbox.size < 1:
continue
new_crop = (crop[0], crop[1], crop[2], crop[3])
return new_bbox, new_crop
return bbox, (0, 0, w, h)
def returned_value(res, args, kwargs):
if len(args) > 0: res.append(args)
if len(kwargs) > 0: res.append(kwargs)
return res
class TrainTransform(presets.ssd.SSDDefaultTrainTransform):
"""
reference to SSDDefaultTrainTransform
"""
def __init__(self, width, height, anchors=None, mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225), iou_thresh=0.5, box_norm=(0.1, 0.1, 0.2, 0.2),
color_distort_kwargs={}, random_expand_kwargs={}, random_crop_kwargs={},
**kwargs):
"""
?? presets.ssd.experimental.image.random_color_distort
?? presets.ssd.timage.random_expand
?? presets.ssd.experimental.bbox.random_crop_with_constraints
"""
super(TrainTransform, self).__init__(width, height, anchors, mean,
std, iou_thresh, box_norm, **kwargs)
self.color_distort_kwargs = color_distort_kwargs
self.random_expand_kwargs = random_expand_kwargs
self.random_crop_kwargs = random_crop_kwargs
def __call__(self, src, label, *args, **kwargs):
"""Apply transform to training image/label."""
# random color jittering
img = presets.ssd.experimental.image.random_color_distort(src, **self.color_distort_kwargs)
# random expansion with prob 0.5
if np.random.uniform(0, 1) > 0.5:
img, expand = presets.ssd.timage.random_expand(img, fill=[m * 255 for m in self._mean], **self.random_expand_kwargs)
if label.shape[0] > 0:
bbox = presets.ssd.tbbox.translate(label, x_offset=expand[0], y_offset=expand[1])
else:
bbox = label
else:
img, bbox = img, label
# random cropping
h, w, _ = img.shape
bbox, crop = random_crop_with_constraints(bbox, (w, h), **self.random_crop_kwargs)
x0, y0, w, h = crop
img = mx.image.fixed_crop(img, x0, y0, w, h)
# resize with random interpolation
h, w, _ = img.shape
interp = np.random.randint(0, 5)
img = presets.ssd.timage.imresize(img, self._width, self._height, interp=interp)
if len(bbox) > 0: bbox = presets.ssd.tbbox.resize(bbox, (w, h), (self._width, self._height))
# random horizontal flip
h, w, _ = img.shape
img, flips = presets.ssd.timage.random_flip(img, px=0.5)
if len(bbox) > 0: bbox = presets.ssd.tbbox.flip(bbox, (w, h), flip_x=flips[0])
# to tensor
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)
if self._anchors is None:
if len(bbox) == 0: bbox = np.array([[-1] * 6])
return returned_value([img, bbox.astype(img.dtype)], args, kwargs)
# generate training target so cpu workers can help reduce the workload on gpu
if len(bbox) > 0:
gt_bboxes = mx.nd.array(bbox[np.newaxis, :, :4])
gt_ids = mx.nd.array(bbox[np.newaxis, :, 4:5])
cls_targets, box_targets, _ = self._target_generator(
self._anchors, None, gt_bboxes, gt_ids)
else:
cls_targets = nd.zeros(shape=(1, self._anchors.size//4))
box_targets = nd.zeros(shape=(1, self._anchors.size//4, 4))
return returned_value([img, cls_targets[0], box_targets[0]], args, kwargs)
class ValidTransform(object):
"""Default SSD validation transform.
Parameters
----------
width : int
Image width.
height : int
Image height.
mean : array-like of size 3
Mean pixel values to be subtracted from image tensor. Default is [0.485, 0.456, 0.406].
std : array-like of size 3
Standard deviation to be divided from image. Default is [0.229, 0.224, 0.225].
"""
def __init__(self, width, height, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
self._width = width
self._height = height
self._mean = mean
self._std = std
def __call__(self, src, label, *args, **kwargs):
"""Apply transform to validation image/label."""
# resize
h, w, _ = src.shape
img = presets.ssd.timage.imresize(src, self._width, self._height, interp=9)
if label.shape[0] > 0:
bbox = presets.ssd.tbbox.resize(label, in_size=(w, h), out_size=(self._width, self._height))
else: bbox=np.array([[-1]*6])
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)
return returned_value([img, bbox.astype(img.dtype)], args, kwargs)
class SSDTransform(presets.ssd.SSDDefaultTrainTransform):
"""
reference to SSDDefaultTrainTransform
"""
def __init__(self, width, height, anchors=None, mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225), iou_thresh=0.5, box_norm=(0.1, 0.1, 0.2, 0.2),
color_distort_kwargs={}, random_expand_kwargs={}, random_crop_kwargs={},
level='image', **kwargs):
"""
?? presets.ssd.experimental.image.random_color_distort
?? presets.ssd.timage.random_expand
?? presets.ssd.experimental.bbox.random_crop_with_constraints
level: 'image' return [data, gt(pad_val=-1)], 'anchor' return [data, cls_target, bbox_target],
'image,anchor' return [data, gt, cls_target, bbox_target]
"""
super(SSDTransform, self).__init__(width, height, anchors, mean,
std, iou_thresh, box_norm, **kwargs)
self.color_distort_kwargs = color_distort_kwargs
self.random_expand_kwargs = random_expand_kwargs
self.random_crop_kwargs = random_crop_kwargs
self.level = level = level.lower()
assert level in ['image', 'anchor', 'image,anchor'], "level must be one of ['image', 'anchor', 'image,anchor']"
assert (anchors is not None) or level=='image', "anchors must be specified when level != {}".format(level)
def __call__(self, src, label, *args, **kwargs):
"""Apply transform to training image/label."""
# random color jittering
img = presets.ssd.experimental.image.random_color_distort(src, **self.color_distort_kwargs)
# random expansion with prob 0.5
if np.random.uniform(0, 1) > 0.5:
img, expand = presets.ssd.timage.random_expand(img, fill=[m * 255 for m in self._mean], **self.random_expand_kwargs)
if label.shape[0] > 0:
bbox = presets.ssd.tbbox.translate(label, x_offset=expand[0], y_offset=expand[1])
else:
bbox = label
else:
img, bbox = img, label
# random cropping
h, w, _ = img.shape
bbox, crop = random_crop_with_constraints(bbox, (w, h), **self.random_crop_kwargs)
x0, y0, w, h = crop
img = mx.image.fixed_crop(img, x0, y0, w, h)
# resize with random interpolation
h, w, _ = img.shape
interp = np.random.randint(0, 5)
img = presets.ssd.timage.imresize(img, self._width, self._height, interp=interp)
if len(bbox) > 0: bbox = presets.ssd.tbbox.resize(bbox, (w, h), (self._width, self._height))
# random horizontal flip
h, w, _ = img.shape
img, flips = presets.ssd.timage.random_flip(img, px=0.5)
if len(bbox) > 0: bbox = presets.ssd.tbbox.flip(bbox, (w, h), flip_x=flips[0])
# to tensor
img = mx.nd.image.to_tensor(img)
img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)
if self._anchors is None or self.level == 'image':
if len(bbox) == 0: bbox = np.array([[-1] * 6])
return returned_value([img, bbox.astype(img.dtype)], args, kwargs)
# generate training target so cpu workers can help reduce the workload on gpu
if len(bbox) > 0:
gt_bboxes = mx.nd.array(bbox[np.newaxis, :, :4])
gt_ids = mx.nd.array(bbox[np.newaxis, :, 4:5])
cls_targets, box_targets, _ = self._target_generator(
self._anchors, None, gt_bboxes, gt_ids)
else:
cls_targets = nd.zeros(shape=(1, self._anchors.size//4))
box_targets = nd.zeros(shape=(1, self._anchors.size//4, 4))
if level == 'anchor':
return returned_value([img, cls_targets[0], box_targets[0]], args, kwargs)
else:
return returned_value([img, bbox.astype(img.dtype), cls_targets[0], box_targets[0]], args, kwargs)
|
[
"y19941010@126.com"
] |
y19941010@126.com
|
0f7eb8486eb91647c6acab95fb94e10d4ecc1a1e
|
6d162c19c9f1dc1d03f330cad63d0dcde1df082d
|
/util/test/tests/Vulkan/VK_Dedicated_Allocation.py
|
c06da545539026efa6c1692bd928e9ca41a25124
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"CC-BY-3.0",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
baldurk/renderdoc
|
24efbb84446a9d443bb9350013f3bfab9e9c5923
|
a214ffcaf38bf5319b2b23d3d014cf3772cda3c6
|
refs/heads/v1.x
| 2023-08-16T21:20:43.886587
| 2023-07-28T22:34:10
| 2023-08-15T09:09:40
| 17,253,131
| 7,729
| 1,358
|
MIT
| 2023-09-13T09:36:53
| 2014-02-27T15:16:30
|
C++
|
UTF-8
|
Python
| false
| false
| 2,336
|
py
|
import renderdoc as rd
import rdtest
class VK_Dedicated_Allocation(rdtest.TestCase):
demos_test_name = 'VK_Dedicated_Allocation'
def check_capture(self):
action = self.find_action("Draw")
self.controller.SetFrameEvent(action.eventId, True)
postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut, 0, action.numIndices)
postvs_ref = {
0: {
'vtx': 0,
'idx': 0,
'gl_PerVertex_var.gl_Position': [-0.5, 0.5, 0.0, 1.0],
'vertOut.pos': [-0.5, 0.5, 0.0, 1.0],
'vertOut.col': [0.0, 1.0, 0.0, 1.0],
'vertOut.uv': [0.0, 0.0, 0.0, 1.0],
},
1: {
'vtx': 1,
'idx': 1,
'gl_PerVertex_var.gl_Position': [0.0, -0.5, 0.0, 1.0],
'vertOut.pos': [0.0, -0.5, 0.0, 1.0],
'vertOut.col': [0.0, 1.0, 0.0, 1.0],
'vertOut.uv': [0.0, 1.0, 0.0, 1.0],
},
2: {
'vtx': 2,
'idx': 2,
'gl_PerVertex_var.gl_Position': [0.5, 0.5, 0.0, 1.0],
'vertOut.pos': [0.5, 0.5, 0.0, 1.0],
'vertOut.col': [0.0, 1.0, 0.0, 1.0],
'vertOut.uv': [1.0, 0.0, 0.0, 1.0],
},
}
self.check_mesh_data(postvs_ref, postvs_data)
rdtest.log.success('Mesh data is as expected')
pipe: rd.PipeState = self.controller.GetPipelineState()
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, 155, 195, [1.0, 0.0, 0.09, 1.0], eps=1.0/255.0)
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, 190, 195, [0.0, 1.0, 0.09, 1.0], eps=1.0/255.0)
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, 255, 195, [1.0, 0.0, 0.09, 1.0], eps=1.0/255.0)
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, 230, 150, [0.723, 1.0, 1.0, 1.0], eps=1.0/255.0)
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, 190, 80, [0.2, 0.2, 0.2, 1.0], eps=1.0/255.0)
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, 200, 80, [0.723, 1.0, 1.0, 1.0], eps=1.0/255.0)
self.check_pixel_value(pipe.GetOutputTargets()[0].resourceId, 210, 80, [0.2, 0.2, 0.2, 1.0], eps=1.0/255.0)
|
[
"baldurk@baldurk.org"
] |
baldurk@baldurk.org
|
c49799e3184491bde60d8e62de727e4ff19198a4
|
84b8597b5fa1fba9d7859e048d245c327a8d2428
|
/tests/integration/cmor/_fixes/cmip5/test_cesm1_waccm.py
|
20023eebfaf03f12b0f9c75fc3977606c52cb216
|
[
"Apache-2.0"
] |
permissive
|
ESMValGroup/ESMValCore
|
e648db4702e67f24b38db079c2839a6e75b64b7b
|
d5187438fea2928644cb53ecb26c6adb1e4cc947
|
refs/heads/main
| 2023-08-16T01:35:08.783000
| 2023-08-11T11:17:20
| 2023-08-11T11:17:20
| 190,192,145
| 41
| 36
|
Apache-2.0
| 2023-09-14T15:34:58
| 2019-06-04T11:58:08
|
Python
|
UTF-8
|
Python
| false
| false
| 415
|
py
|
"""Tests for CESM1-WACCM fixes."""
from esmvalcore.cmor._fixes.cmip5.cesm1_cam5 import Cl as BaseCl
from esmvalcore.cmor._fixes.cmip5.cesm1_waccm import Cl
from esmvalcore.cmor.fix import Fix
def test_get_cl_fix():
"""Test getting of fix."""
fix = Fix.get_fixes('CMIP5', 'CESM1-WACCM', 'Amon', 'cl')
assert fix == [Cl(None)]
def test_cl_fix():
"""Test fix for ``cl``."""
assert Cl is BaseCl
|
[
"noreply@github.com"
] |
ESMValGroup.noreply@github.com
|
e4d9df2c11e97a002bf4f75df361c351e7d66f56
|
1d1d46c4aeab8296c803456ad79f2bfadab5ed9c
|
/login-Pyramid/venv/login-app/login_app/views.py
|
25fa63bd2e843d9539678d7cb643c5bc38cc270c
|
[] |
no_license
|
Chanpreet08/Login-Web-App
|
8be1f2ec3a1d854688d901909198bae8661e6a99
|
40efdc582139abeb4bb927c7402c7974bfb0dd84
|
refs/heads/master
| 2021-09-06T04:30:21.164878
| 2018-02-02T10:41:57
| 2018-02-02T10:41:57
| 119,503,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
from pyramid.view import view_config
from pymongo import MongoClient
import bcrypt
from pyramid.response import Response
@view_config(route_name='home', renderer='templates/mytemplate.jinja2')
def my_view(request):
return {'project': 'cc'}
@view_config(route_name='login', renderer='json')
def login(request):
if 'form.submitted' in request.params:
username = request.params['username']
password = request.params['password']
loggedUser = request.db['users'].find_one({'username':username})
if loggedUser:
hashpwd = loggedUser['password'].encode('utf-8')
if bcrypt.checkpw(password.encode('utf-8'),hashpwd):
request.session['username']= username
return {'success':True,'msg':'successful login'}
else:
return {'success':False,'msg':'Wrong password'}
return {'sucess':False,'msg':'No User'}
@view_config(route_name='signup', renderer ='templates/registertemplate.jinja2')
def signup(request):
return ''
@view_config(route_name='register', renderer ='json')
def register(request):
if 'form.submitted' in request.params:
username = request.params['username']
password = request.params['password']
user = request.db['users'].find_one({'username':username})
if not user:
hashpwd = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt())
request.db['users'].insert({'username':username,'password':hashpwd})
request.session['username'] = username
return {'success':True,'msg':'User created'}
else:
return {'sucess':False,'msg':'Username exist'}
|
[
"chanpreet.chhabra@gmail.com"
] |
chanpreet.chhabra@gmail.com
|
4219e549ae8bbff7a57fd5fb2530e7fb85acd307
|
d1f19fe346b9d56416ea35b9e0a130b352b1f647
|
/youtube_clone/settings.py
|
beb5f0a39ce3b73ea54755fe67789054a5d1fcf5
|
[] |
no_license
|
mark-okoth/django-youtube-clone
|
2fd714a36ae4065b16462c5912c05c62c7e74ff3
|
67590999bf1904672c60cacb064be3f8391af23d
|
refs/heads/master
| 2021-03-05T11:22:04.665873
| 2020-03-08T12:43:36
| 2020-03-08T12:43:36
| 246,118,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,352
|
py
|
"""
Django settings for youtube_clone project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(@3k1s3ju52ehws!n%0(wgx+x1e@q8@26)+pjo9y!t^#v$y^h3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'youtube_app',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'youtube_clone.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'youtube_clone.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'youtube_clone/static')
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
[
"markokoth96@gmail.com"
] |
markokoth96@gmail.com
|
c5b74f1c6488586c41050831e9e7b65ac3d54aa5
|
caef0308b388eee0159e4a7184c22293e69690e8
|
/leetcode/674.py
|
0a4505e2eacbcb026f0fefbcfdc85627d7ee8dc1
|
[] |
no_license
|
AlwaysOnline233/cookbook
|
440ca4325842482157484ca2df9d0f1932b0b29e
|
edebb0f98d88566977cc2514631818d8371b78dd
|
refs/heads/master
| 2022-12-14T20:03:26.527927
| 2020-09-06T16:57:36
| 2020-09-06T16:57:36
| 262,815,273
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
# 最长连续递增序列
'''
给定一个未经排序的整数数组,找到最长且连续的的递增序列,并返回该序列的长度。
示例: 输入: [1,3,5,4,7] 输出: 3 解释: 最长连续递增序列是 [1,3,5], 长度为3。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/longest-continuous-increasing-subsequence
'''
'''
滑动窗口
每个(连续)增加的子序列是不相交的,并且每当 nums[i-1]>=nums[i] 时,每个此类子序列的边界都会出现。
当它这样做时,它标志着在 nums[i] 处开始一个新的递增子序列,我们将这样的 i 存储在变量 anchor 中。
'''
class Solution(object):
def findLengthOfLCIS(self, nums):
ans = anchor = 0
for i in range(len(nums)):
if i and nums[i-1] >= nums[i]: anchor = i
ans = max(ans, i - anchor + 1)
return ans
a = Solution()
print(a.findLengthOfLCIS([1, 3, 5, 4, 7]))
print(a.findLengthOfLCIS([2, 2, 2, 2, 2]))
|
[
"941906145@qq.com"
] |
941906145@qq.com
|
05b4537bebdc1725ca8159bc11f0a78840762ede
|
fb9f061724925a2ac06948ae97ccfa02c5ffd366
|
/app/utils.py
|
443b843daa224f244857ee13285274cee1c40522
|
[] |
no_license
|
manish2382/plivo
|
39e018360555519f86b8b561330dd97f4a3c933d
|
d25337242ea218c865f51e709f85487407c23652
|
refs/heads/master
| 2021-05-03T11:43:26.455487
| 2016-09-27T17:59:35
| 2016-09-27T17:59:35
| 69,380,870
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,455
|
py
|
import redis
from werkzeug import exceptions
from models import Account, phone_number
from config import REDIS_HOST, REDIS_PORT
def get_redis_connection():
"""
Returns redis connection
:return:
"""
return redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=0)
def register_stop_request(text, key, value, expiry):
"""
Checks for input text if it matches with STOP than register the key values pair for expiry interval
:param text: incoming text
:param key: key which will be registered
:param value: value to be registered
:param expiry: expiry in seconds
:return: No return
"""
if text.strip() == "STOP":
r = get_redis_connection()
# set expiry time as 4 hours
r.set(key, value, ex=expiry)
def check_stop_request(key, from_number):
"""
Check if key and from_number has been registered for stop request and raise error
:param key: key value
:param from_number:
:return: error statement if stop request is registered else none
"""
# check if stop request is registered in redis
error = None
r = get_redis_connection()
if r.get(key) == from_number:
error = "sms from %s to %s blocked by STOP request"
return error
def authenticate_account(request, number):
"""
Authenticates the account and raises error
:param request: contains input parameter
:param number: number belonging to some account
:return: Error in case authentication fails
"""
error = None
username = request.get('username', None)
auth_id = request.get('password', None)
# authenticate the request
account_obj = Account.query.filter_by(auth_id=auth_id, username=username).first()
if not account_obj:
raise exceptions.Forbidden()
# check if number belongs to the particular account
number_object = phone_number.query.filter_by(number=number, account_id=account_obj.id).first()
if not number_object:
error = "%s parameter not found"
return error
def validate_input(request):
"""
Checks for mandatory parameters and validates each parameter
:param request: contains the request data
:return: it returns error if any otherwise it returns an account object
"""
# Get all the mandatory parameter from request
from_number = request.get('from', None)
to_number = request.get('to', None)
text = request.get('text', None)
result = None
# check if all the parameters are present
error = "parameter '%s' is missing"
if not from_number:
result = error % "from"
if not to_number:
result = error % "to"
if not text:
result = error % "text"
# return error message if any parameter is missing
if result:
return result
# check validity of each input
error = "parameter '%s' is invalid"
if not (6 <= len(from_number) <= 16):
result = error % "from"
if not (6 <= len(to_number) <= 16):
result = error % "to"
if not (1 <= len(text) <= 120):
result = error % "text"
return result
def check_and_update_usage(key, limit, timeout):
"""
Checks for usage limit and raises error if limit is crossed.
Updates the usage count as well as remaining time after which usage counter will be reset
:param key: number that is being used for sending out smses
:param limit: from same number this api can be invoked till limit after that error will be raised
:param timeout: seconds after which counter will be reset
:return: Error statement if usage limit has been crossed
"""
redis_conn = get_redis_connection()
# lock the section so that multiple simultaneous request get correct count
lock = redis_conn.lock("OUTBOUND", sleep=0.1, thread_local=True, timeout=10)
# critical section starts
lock.acquire()
# check if the limit has crossed in last 24 hours
count = redis_conn.get(key)
if not count:
# for expired/first time set expiry as 24 hours and count as 1
redis_conn.set(key, 1, timeout)
else:
count = int(count)
if count >= limit:
return "limit reached for from %s"
else:
# increment the count and set the expiry timer to previous value
ttl = redis_conn.pttl(key)
redis_conn.psetex(key, ttl, count + 1)
# critical section over
lock.release()
return None
|
[
"noreply@github.com"
] |
manish2382.noreply@github.com
|
c15882d22ea136a5878946d7fd4ffc4a7dfa12a7
|
575d197af5bbc31b89df37f8733e81707294948c
|
/Python2/examples/xml/xmldict1.py
|
126dbacf68b93b0a6e46b8d92085e7b1cb58c771
|
[] |
no_license
|
tisnik/python-programming-courses
|
5c7f1ca9cae07a5f99dd8ade2311edb30dc3e088
|
4e61221b2a33c19fccb500eb5c8cdb49f5b603c6
|
refs/heads/master
| 2022-05-13T07:51:41.138030
| 2022-05-05T15:37:39
| 2022-05-05T15:37:39
| 135,132,128
| 3
| 2
| null | 2021-04-06T12:19:16
| 2018-05-28T08:27:19
|
Python
|
UTF-8
|
Python
| false
| false
| 104
|
py
|
import xmltodict
with open("test5.xml", "r") as fin:
s = xmltodict.parse(fin.read())
print(s)
|
[
"ptisnovs@redhat.com"
] |
ptisnovs@redhat.com
|
f23056156e6da4aa3af19de37c89a96b86360bca
|
7fddea39cee2dc9a90b6c9cb44bff8bff6bdc8e1
|
/app.py
|
d0f5c5498320f72be988e387a28ecb3832a9ab27
|
[] |
no_license
|
Uche-UJO/sqlalchemy-challenge
|
b781cafff2c0bcaadb9a1161a536c191eef53fbd
|
9ee1bea0db07134c63d5c14be425c4807c4672dc
|
refs/heads/master
| 2022-04-15T01:21:18.071906
| 2020-04-13T02:36:13
| 2020-04-13T02:36:13
| 255,174,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,057
|
py
|
# Import all dependables
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
from flask import Flask, jsonify
import datetime as dt
# Setting up database
engine = create_engine("sqlite:///Resources/hawaii.sqlite", connect_args={'check_same_thread': False}, echo=True)
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
# Setup Flask
app = Flask(__name__)
# Routes
@app.route("/")
def welcome():
"""List all available api routes."""
return"""<html>
<h1>List of all available Honolulu, HI API routes</h1>
<ul>
<br>
<li>
Return a list of precipitations from last year:
<br>
<a href="/api/v1.0/precipitation">/api/v1.0/precipitation</a>
</li>
<br>
<li>
Return a JSON list of stations from the dataset:
<br>
<a href="/api/v1.0/stations">/api/v1.0/stations</a>
</li>
<br>
<li>
Return a JSON list of Temperature Observations (tobs) for the previous year:
<br>
<a href="/api/v1.0/tobs">/api/v1.0/tobs</a>
</li>
<br>
<li>
Return a JSON list of tmin, tmax, tavg for the dates greater than or equal to the date provided:
<br>Replace <start> with a date in Year-Month-Day format.
<br>
<a href="/api/v1.0/2017-01-01">/api/v1.0/2017-01-01</a>
</li>
<br>
<li>
Return a JSON list of tmin, tmax, tavg for the dates in range of start date and end date inclusive:
<br>
Replace <start> and <end> with a date in Year-Month-Day format.
<br>
<br>
<a href="/api/v1.0/2017-01-01/2017-01-07">/api/v1.0/2017-01-01/2017-01-07</a>
</li>
<br>
</ul>
</html>
"""
@app.route("/api/v1.0/precipitation")
def precipitation():
"""Return a list of precipitations from last year"""
# Design a query to retrieve the last 12 months of precipitation data and plot the results
max_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
# Get the first element of the tuple
max_date = max_date[0]
# Calculate the date 1 year ago from the last data point in the database
date_year_ago = dt.datetime.strptime(max_date, "%Y-%m-%d") - dt.timedelta(days=366)
# Perform a query to retrieve the date and precipitation scores
date_prcp_query = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= date_year_ago).all()
# Convert list of tuples into normal list
date_prcp_dict = dict(date_prcp_query)
return jsonify(date_prcp_dict)
@app.route("/api/v1.0/stations")
def stations():
"""Return a JSON list of stations from the dataset."""
# Query stations
results_stations = session.query(Measurement.station).group_by(Measurement.station).all()
# Convert list of tuples into normal list
stations_list = list(np.ravel(results_stations))
return jsonify(stations_list)
@app.route("/api/v1.0/tobs")
def tobs():
"""Return a JSON list of Temperature Observations (tobs) for the previous year."""
# Design a query to retrieve the last 12 months of precipitation data and plot the results
max_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
# Get the first element of the tuple
max_date = max_date[0]
# Calculate the date 1 year ago from the last data point in the database
date_year_ago = dt.datetime.strptime(max_date, "%Y-%m-%d") - dt.timedelta(days=366)
# Query tobs
results_tobs = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= date_year_ago).all()
# Convert list of tuples into normal list
tobs_list = list(results_tobs)
return jsonify(tobs_list)
@app.route("/api/v1.0/<start>")
def start(start=None):
"""Return a JSON list of tmin, tmax, tavg for the dates greater than or equal to the date provided"""
from_start = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).group_by(Measurement.date).all()
from_start_list=list(from_start)
return jsonify(from_start_list)
@app.route("/api/v1.0/<start>/<end>")
def start_end(start=None, end=None):
"""Return a JSON list of tmin, tmax, tavg for the dates in range of start date and end date inclusive"""
between_dates = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).filter(Measurement.date <= end).group_by(Measurement.date).all()
between_dates_list=list(between_dates)
return jsonify(between_dates_list)
if __name__ == '__main__':
app.run(debug=True)
|
[
"uutchei@gmail.com"
] |
uutchei@gmail.com
|
518fd1a0a4bf71eacac0439de1c651cd98d36c8f
|
e626396f4a254e960fe83a46ce026f34f4c69066
|
/controlled/urls.py
|
1f6bee6ca00b0b78e6cdfda9716d96ea72e79317
|
[] |
no_license
|
aalshrif90/djsurvey
|
b22506af2678d38ce205bf324a8a2bafc0875cd1
|
f5b5fb48eae37aef43287440351c242394d0c604
|
refs/heads/master
| 2022-12-14T06:01:36.619699
| 2019-05-27T17:03:28
| 2019-05-27T17:03:28
| 156,228,731
| 1
| 0
| null | 2022-11-22T03:47:03
| 2018-11-05T14:13:56
|
CSS
|
UTF-8
|
Python
| false
| false
| 411
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^pcode/$', views.pcode, name='pcode'),
url(r'^background/$', views.background, name='background'),
url(r'^question/$', views.question, name='question'),
url(r'^showProgramCode/$', views.showProgramCode, name='showProgramCode'),
url(r'^exit/$', views.exit, name='exit'),
]
|
[
"gxr199@gmail.com"
] |
gxr199@gmail.com
|
e5f8064fbb82af59a4dd0144265b7a3fd9870f01
|
c9ad9ddd29f4e7a1dab339b23e014776dc4ade95
|
/src/users/templatetags/my_tags.py
|
b8ef0c032abdddda3f9b75228b351e4e9be2d8bd
|
[] |
no_license
|
club11/Filippow_Wlad
|
ddc06e5f59c4e8cb29db81a93b5e43568826c03f
|
36b069635139fc6b344a3ab3939a5f8eaf1a7961
|
refs/heads/master
| 2023-06-19T04:02:17.047305
| 2021-07-14T11:34:46
| 2021-07-14T11:34:46
| 367,748,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
from django import template
import requests
register = template.Library()
USD_ENDPOINT = 'https://www.nbrb.by/api/exrates/rates/431'
@register.simple_tag
def currency_rate():
res = requests.get(USD_ENDPOINT)
return res.json().get('Cur_OfficialRate')
|
[
"club11@bk.ru"
] |
club11@bk.ru
|
8fc1f9cede3267fb6223d0ce95a5db892a7799d1
|
84a3036916808c25ebdb8e3b1e72be178f6f196a
|
/lib/train/__init__.py
|
d817445dc52d31ca74dc3895cbed3bd0d887e06e
|
[] |
no_license
|
kadir-gunel/the-story-of-heads
|
6675829521eb3f1013a58e8ba5a22de8e507dd99
|
efaa0dd520400baa760654b5b85396c203d3cbb7
|
refs/heads/master
| 2023-07-02T21:33:52.963465
| 2021-08-02T09:06:09
| 2021-08-02T09:06:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,396
|
py
|
# Training routine
from collections import defaultdict
import tensorflow as tf
import sys
import inspect
import lib
from lib.train.saveload import initialize_uninitialized_variables
from . import algorithms, saveload, tickers
from .problem import Problem, SimpleProblem
from .tickers import Ticker, DistributedTicker, TrainService, LearningRateService, ModelService, \
LearningRateFn, SummaryWriterService, GlobalStepService
from ..session import profile_scope
from ..data import TfUploader
from ..util import nested_map
class DuplicateServiceError(Exception):
pass
class DuplicateMethodError(Exception):
pass
# Main train loop
def train(problem, algorithm, iterator, tickers, tick_every_steps=0):
uploader = TfUploader(iterator, capacity=5)
if uploader.empty:
raise RuntimeError("Trainset is empty")
global_step_ticker = _GlobalStepService(tick_every_steps)
train_ticker = _TrainTicker(problem, algorithm, uploader)
tickers = [global_step_ticker, train_ticker] + sorted(tickers, key=lambda t: t.priority)
if not lib.ops.mpi.is_master():
tickers = [t for t in tickers if isinstance(t, DistributedTicker)]
real_tickers = [t for t in tickers if isinstance(t, Ticker)]
session = tf.get_default_session()
context = _TrainContext(uploader.iterator, tickers)
initialize_uninitialized_variables()
for ticker in sorted(real_tickers, key=lambda t: t.init_priority):
ticker.on_started(context)
# second loop because tickers can depend on each other
for ticker in real_tickers:
ticker.prepare_ingraph_ops()
with uploader:
try:
while not context.should_stop:
batch_evals = []
for ticker in real_tickers:
batch_evals.append(ticker.before_train_batch())
with profile_scope(level=1):
if tick_every_steps == 0:
batch_results = session.run(batch_evals, feed_dict={
global_step_ticker.global_step_ingraph: global_step_ticker.global_step,
global_step_ticker.batch_no_ingraph: global_step_ticker.batch_no
})
else:
batch_results = session.run(batch_evals, feed_dict={
global_step_ticker.global_step_ingraph: global_step_ticker.global_step,
global_step_ticker.batch_no_ingraph: global_step_ticker.batch_no,
global_step_ticker.tick_no_ingraph: global_step_ticker.tick_no
})
for ticker, result in zip(real_tickers, batch_results):
ticker.after_train_batch(result)
except tf.errors.OutOfRangeError:
pass
for ticker in real_tickers:
ticker.on_finished()
## ============================================================================
# Internal functions
def _get_classes(cls, desired_cls):
bases = cls.__bases__
if desired_cls in bases:
yield cls
else:
for base in bases:
yield from _get_classes(base, desired_cls)
class _TrainContext(object):
def __init__(self, iterator, objects):
self._register_providers(objects)
self.subscribers = self._register_subscribers(objects)
self.iterator = iterator
self.should_stop = False
def stop_training(self, reason):
print("Stopping because of %s" % reason, file=sys.stderr)
self.should_stop = True
def skip_train_data(self, batches):
print("! Skipping %d batches..." % batches, file=sys.stderr, flush=True, end='')
for i in range(batches):
if i > 1000 and (i & (i - 1)) == 0:
print(" %d" % i, file=sys.stderr, flush=True, end='')
next(self.iterator)
print(" done", file=sys.stderr, flush=True)
def _register_providers(self, objects):
service_providers = {}
method_services = {}
for obj in objects:
for srv_class in _get_classes(type(obj), tickers.Service):
if srv_class in service_providers:
raise DuplicateServiceError("Multiple providers for service %s detected: %s and %s" % (srv_class, service_providers[srv_class], obj))
service_providers[srv_class] = obj
for srv_method, _ in inspect.getmembers(srv_class, predicate=inspect.isfunction):
if srv_method in method_services:
raise DuplicateMethodError("Multiple services implementing %s detected: %s and %s" % (srv_method, method_services[srv_method], srv_class))
method_services[srv_method] = srv_class
self.__dict__[srv_method] = getattr(obj, srv_method)
def _register_subscribers(self, objects):
subscribers = defaultdict(list)
for obj in objects:
for subscriber_class in _get_classes(type(obj), tickers.Subscriber):
subscribers[subscriber_class].append(obj)
return subscribers
def get_subscribers(self, subscriber_cls):
return self.subscribers[subscriber_cls]
## ============================================================================
# Internal tickers
class _AnyProblem(Problem):
def __init__(self, problem):
self.problem = problem
self.simple = isinstance(problem, SimpleProblem)
def parse_batch(self, batch, is_train):
return self.problem.parse_batch(batch, is_train)
def batch_counters(self, parsed_batch, is_train, **kwargs):
if self.simple:
return self.problem.loss(parsed_batch, is_train, **kwargs)
else:
return self.problem.batch_counters(parsed_batch, is_train, **kwargs)
def loss_multibatch(self, counters, is_train):
if self.simple:
return tf.reduce_mean(counters)
else:
return self.problem.loss_multibatch(counters, is_train)
def summary_multibatch(self, counters, prefix, is_train):
if self.simple:
return [tf.summary.scalar('%s/loss' % prefix, tf.reduce_mean(counters))]
else:
op = self.problem.summary_multibatch(counters, prefix, is_train)
if not isinstance(op, (list, tuple)):
op = [op]
return op
def params_summary(self):
if self.simple:
return []
else:
op = self.problem.params_summary()
if not isinstance(op, (list, tuple)):
op = [op]
return op
def make_feed_dict(self, batch):
if self.simple:
return super(_AnyProblem, self).make_feed_dict(batch)
else:
return self.problem.make_feed_dict(batch)
def get_batch_cost_fn(self):
if self.simple:
return super(_AnyProblem, self).get_batch_cost_fn()
else:
return self.problem.get_batch_cost_fn()
# - _GlobalStep - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class _GlobalStepService(
DistributedTicker,
GlobalStepService
):
def __init__(self, tick_every_steps):
assert len(tf.get_collection(tf.GraphKeys.GLOBAL_STEP)) == 0, "Global step already registered!"
self.global_step = 0
self.global_step_var = tf.get_variable(
'global_step', [], tf.int64,
initializer=tf.constant_initializer(0), trainable=False
)
self.batch_no = 0
self.batch_no_var = tf.get_variable(
'batch_no', [], tf.int64,
initializer=tf.constant_initializer(0), trainable=False
)
self.tick_every_steps = tick_every_steps
if self.tick_every_steps > 0:
self.tick_no = 0
self.tick_no_var = tf.get_variable(
'tick_no', [], tf.int64,
initializer=tf.constant_initializer(0), trainable=False
)
with tf.name_scope("step"):
# In on_started global_step_ingraph and batch_no_ingraph should return current value
self.global_step_ingraph = tf.placeholder_with_default(self.global_step_var, shape=[])
self.batch_no_ingraph = tf.placeholder_with_default(self.batch_no_var, shape=[])
tf.add_to_collection(tf.GraphKeys.GLOBAL_STEP, self.global_step_ingraph)
if self.tick_every_steps > 0:
self.tick_no_ingraph = tf.placeholder_with_default(self.tick_no_var, shape=[])
tf.add_to_collection("TICK_NO", self.tick_no_ingraph)
self.tick_every_steps = tick_every_steps
def on_finished(self):
tf.get_collection_ref(tf.GraphKeys.GLOBAL_STEP).clear()
def on_train_batch_ingraph(self):
with tf.name_scope("step"):
if self.tick_every_steps == 0:
return [
tf.assign(self.global_step_var, self.global_step_var + 1),
tf.assign(self.batch_no_var, self.batch_no_var + 1)
]
else:
is_it_time_yet = tf.equal(tf.mod(self.tick_no_var, self.tick_every_steps), self.tick_every_steps - 1)
incr_global_step = tf.cond(is_it_time_yet,
lambda: tf.assign(self.global_step_var, self.global_step_var + 1),
lambda: tf.identity(self.global_step_var))
incr_batch_no = tf.cond(is_it_time_yet,
lambda: tf.assign(self.batch_no_var, self.batch_no_var + 1),
lambda: tf.identity(self.batch_no_var))
with tf.control_dependencies([incr_global_step, incr_batch_no]):
incr_tick_no = tf.assign(self.tick_no_var, self.tick_no_var + 1)
return [
incr_global_step,
incr_batch_no,
incr_tick_no
]
def after_train_batch(self, ingraph_result):
if self.tick_every_steps == 0:
self.global_step, self.batch_no = ingraph_result
else:
self.global_step, self.batch_no, self.tick_no = ingraph_result
def get_batch_no(self):
return self.batch_no
def get_batch_no_ingraph(self):
return self.batch_no_ingraph
def get_global_step(self):
return self.global_step
def get_global_step_ingraph(self):
return self.global_step_ingraph
def set_global_step(self, global_step):
with tf.name_scope("step"):
self.global_step = tf.get_default_session().run(
tf.assign(self.global_step_var, global_step))
# - _TrainTicker - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class _TrainTicker(DistributedTicker, ModelService, TrainService, LearningRateService):
def __init__(self, problem, algorithm, uploader):
# Bind to learning rate multiplier here
LearningRateService.__init__(self, algorithm.learning_rate)
self.problem = _AnyProblem(problem)
self.algorithm = algorithm
with tf.name_scope("counters"):
self.local_counters = nested_map(lambda t: tf.expand_dims(t, 0), self.problem.batch_counters(uploader.get_next(), True))
with tf.name_scope("loss"):
self.local_loss = self.problem.loss_multibatch(self.local_counters, True)
with tf.name_scope("aggregate"):
self.counters = nested_map(lambda t: lib.ops.mpi.allgather(t), self.local_counters)
self.loss = lib.ops.mpi.allreduce(self.local_loss, name='TrainLoss')
with tf.name_scope("update"):
self.update_op = self.algorithm.create_update_ops(self.local_loss, self.loss)
def on_train_batch_ingraph(self):
return [self.local_loss, self.update_op, self.get_learning_rate_ingraph()]
def after_train_batch(self, ingraph_result):
# Print dot.
lr = ingraph_result[-1]
self.set_learning_rate(lr)
print('.', end='', file=sys.stderr, flush=True)
def get_problem(self):
return self.problem
def get_model(self, name):
return self.problem.problem.models[name]
def get_train_counters_ingraph(self):
return self.counters
def get_train_loss_ingraph(self):
return self.loss
|
[
"voita-e-a@yandex.ru"
] |
voita-e-a@yandex.ru
|
0c24ca66b1dbbb2192380a4100670313cb1f5bde
|
6572cd664a7bcc3820d83192922a0c789d56106a
|
/notebooks_for_development/logg3pt0only_find_layden_coeffs_for_synthetic_spectra_no_errors.py
|
f1f6fa4b9282c060cd7714ac50a4d4cdca0ca52a
|
[
"MIT"
] |
permissive
|
mwanakijiji/rrlyrae_metallicity
|
3ac98d64595d73d1540cb428c7f08140d14617f3
|
e716a58443c015a1a209d78afb7728bdcdf1bd39
|
refs/heads/master
| 2023-01-07T04:17:08.671642
| 2022-01-09T23:10:04
| 2022-01-09T23:10:04
| 74,923,374
| 0
| 0
|
MIT
| 2022-12-27T15:35:49
| 2016-11-28T00:33:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 14,910
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# This tries a couple function-fitting routines to find the best-fit
# Layden coefficients if the input data is synthetic data with no errors
# Created 2020 Jan. 25 by E.S.
# #### In the following, we plot fits in KH space and write out data including the BIC to select
# #### the best model among variations that consider up to third-degree terms involving H (Balmer
# #### EW) and F (Fe/H), viz.
#
# #### $K = a + bH + cF + dHF + f(H^{2}) + g(F^{2}) + h(H^{2})F + kH(F^{2}) $
# #### $+ m(H^{3}) + n(F^{3}) $
#
# #### N.b. The BIC is
#
# #### $BIC = kln(n) -2ln(L)$
#
# #### where
# #### $k$: number of free parameters
# #### $n$: number of data points
# #### $L$: maximized likelihood function of model
# In[2]:
import pandas as pd
import numpy as np
import astropy
import itertools
import multiprocessing
import random
import string
from astropy import stats
from scipy import optimize
import matplotlib
matplotlib.use('Agg') # necessary in the cloud
import matplotlib.pyplot as plt
# In[3]:
# read in data
df = pd.read_csv("data/test_hk_data_winnowed_20200210_comparison.csv")
# remove the three really bad datapoints
index_names1 = df[ df["original_spec_file_name"]=="600025p00.smo" ].index
df.drop(index_names1 , inplace=True)
index_names2 = df[ df["original_spec_file_name"]=="625025p02.smo" ].index
df.drop(index_names2 , inplace=True)
index_names3 = df[ df["original_spec_file_name"]=="600030p02.smo" ].index
df.drop(index_names3 , inplace=True)
index_names4 = df[ df["logg"]==2.5 ].index # test of individual values of logg
df.drop(index_names4 , inplace=True)
df = df.reset_index(drop = True)
df_choice = df
# set name of csv written-out data to which we will append BIC info
csv_file_name = "junk.csv"
# In[4]:
# figure out all subsets of coefficients beyond [a,b,c,d]
coeffs_strings = ["f","g","h","k","m","n"]
coeffs_strings_nan = ["NaN1"]
new_coeffs_6 = list(itertools.combinations(coeffs_strings, 6))
new_coeffs_5 = list(itertools.combinations(coeffs_strings, 5))
new_coeffs_4 = list(itertools.combinations(coeffs_strings, 4))
new_coeffs_3 = list(itertools.combinations(coeffs_strings, 3))
new_coeffs_2 = list(itertools.combinations(coeffs_strings, 2))
new_coeffs_1 = list(itertools.combinations(coeffs_strings, 1))
baseline = list(itertools.combinations(coeffs_strings_nan, 1)) # original Layden [a,b,c,d] coefficients only
# In[7]:
# create the array of arrays, so we can map them across cores
new_coeffs_mother_array = [baseline,new_coeffs_1,new_coeffs_2,new_coeffs_3,new_coeffs_4,new_coeffs_5,new_coeffs_6]
# In[8]:
def expanded_layden_all_coeffs(coeff_array,H,F):
# definition of coefficients as of 2020 Mar 9:
# K = a + bH + cF + dHF + f(H^{2}) + g(F^{2}) + hF(H^{2}) + kH(F^{2}) + m(H^{3}) + n(F^{3})
a_coeff = coeff_array[0]
b_coeff = coeff_array[1]
c_coeff = coeff_array[2]
d_coeff = coeff_array[3]
f_coeff = coeff_array[4]
g_coeff = coeff_array[5]
h_coeff = coeff_array[6]
k_coeff = coeff_array[7]
m_coeff = coeff_array[8]
n_coeff = coeff_array[9]
K_calc = a_coeff + b_coeff*H + c_coeff*F + d_coeff*H*F + f_coeff*np.power(H,2.) + g_coeff*np.power(F,2.) + h_coeff*F*np.power(H,2.) + k_coeff*H*np.power(F,2.) + m_coeff*np.power(H,3.) + n_coeff*np.power(F,3.)
return K_calc
# In[9]:
def original_layden_abcd(coeff_array,H,F):
# definition of coefficients as of 2020 Mar 9:
# K = a + bH + cF + dHF + f(H^{2}) + g(F^{2}) + hF(H^{2}) + kH(F^{2}) + m(H^{3}) + n(F^{3})
a_coeff = coeff_array[0]
b_coeff = coeff_array[1]
c_coeff = coeff_array[2]
d_coeff = coeff_array[3]
K_calc = a_coeff + b_coeff*H + c_coeff*F + d_coeff*H*F
return K_calc
# In[10]:
def find_bic_of_1_subarray(new_coeffs_array):
for t in range(0,len(new_coeffs_array)):
print("----------")
print("coefficients being tested:")
print(new_coeffs_array[t])
# initialize initial values to 1
pinit = np.ones(10, dtype=np.float)
# initialize bounds
# elements represent, in order, [a,b,c,d,f,g,k,h,m,n]
bounds_upper_array = (np.inf,np.inf,np.inf,np.inf,np.inf,np.inf,np.inf,np.inf,np.inf,np.inf)
bounds_lower_array = (-np.inf,-np.inf,-np.inf,-np.inf,-np.inf,-np.inf,-np.inf,-np.inf,-np.inf,-np.inf)
# convert to a list so values can be changed
bounds_upper_array_list = list(bounds_upper_array)
bounds_lower_array_list = list(bounds_lower_array)
# if certain coefficients don't appear, set them to effectively zero
if (new_coeffs_array[t].count("f") == 0):
pinit[4] = 0
bounds_upper_array_list[4] = 1e-40
bounds_lower_array_list[4] = 0
if (new_coeffs_array[t].count("g") == 0):
pinit[5] = 0
bounds_upper_array_list[5] = 1e-40
bounds_lower_array_list[5] = 0
if (new_coeffs_array[t].count("k") == 0):
pinit[6] = 0
bounds_upper_array_list[6] = 1e-40
bounds_lower_array_list[6] = 0
if (new_coeffs_array[t].count("h") == 0):
pinit[7] = 0
bounds_upper_array_list[7] = 1e-40
bounds_lower_array_list[7] = 0
if (new_coeffs_array[t].count("m") == 0):
pinit[8] = 0
bounds_upper_array_list[8] = 1e-40
bounds_lower_array_list[8] = 0
if (new_coeffs_array[t].count("n") == 0):
pinit[9] = 0
bounds_upper_array_list[9] = 1e-40
bounds_lower_array_list[9] = 0
# convert back to tuple
bounds_upper_array = tuple(bounds_upper_array_list)
bounds_lower_array = tuple(bounds_lower_array_list)
bounds_array = [bounds_lower_array,bounds_upper_array]
print("----------")
print("bounds array:")
print(bounds_array)
# the error function
errfunc_coeffs = lambda p, H, F, K, err_K: (K - expanded_layden_all_coeffs(p, H, F)) / err_K
# the least-squares fit
out = optimize.least_squares(errfunc_coeffs, pinit, bounds=bounds_array,
args=(df_choice["balmer"], df_choice["final_feh_center"],
df_choice["K"], df_choice["err_K"]))
pfinal = out.x
print("----------")
print("pfinal:")
print(pfinal)
#####################
# calculate BIC
# N.b. astropy BIC assumes Gaussian distribution
# retrieved K values, using best-fit params
retrieved_K = expanded_layden_all_coeffs(pfinal, df_choice["balmer"], df_choice["final_feh_center"])
# 'sum of squared residuals between model and data'
ssr = np.sum(np.power(np.subtract(df_choice["K"],retrieved_K),2.))
# number of parameters that were actually varying
n_params = len(np.where(np.abs(pfinal) > 1e-15)[0]) # [a,b,c,d] + ...
# number of datapoints
n_samples = len(df_choice["balmer"])
bic = astropy.stats.bayesian_info_criterion_lsq(ssr, n_params, n_samples)
print("----------")
print("n_params:")
print(n_params)
print("n_samples:")
print(n_samples)
print("----------")
print("BIC:")
print(bic)
print(pfinal[0])
print(pfinal[1])
print(pfinal[-1])
#####################
# generate random string to tag the plot
N_string = 7
res = ''.join(random.choices(string.ascii_uppercase +string.digits, k = N_string))
# record in csv
file_object = open(csv_file_name, 'a')
file_object.write(str(new_coeffs_array[t])+";"+
str(bic)+";"+
str(n_params)+";"+
str(ssr)+";"+
str(n_samples)+";"+
str(pfinal[0])+";"+
str(pfinal[1])+";"+
str(pfinal[2])+";"+
str(pfinal[3])+";"+
str(pfinal[4])+";"+
str(pfinal[5])+";"+
str(pfinal[6])+";"+
str(pfinal[7])+";"+
str(pfinal[8])+";"+
str(pfinal[9])+";"+
str(res)+"\n")
# Close the file
file_object.close()
# make some isometallicity lines for the plot
isometal_balmer_abcissa = np.arange(2,12,0.2)
retrieved_K_isometal_neg3pt0 = expanded_layden_all_coeffs(pfinal, isometal_balmer_abcissa, -3.0)
retrieved_K_isometal_neg2pt5 = expanded_layden_all_coeffs(pfinal, isometal_balmer_abcissa, -2.5)
retrieved_K_isometal_neg2pt0 = expanded_layden_all_coeffs(pfinal, isometal_balmer_abcissa, -2.)
retrieved_K_isometal_neg1pt5 = expanded_layden_all_coeffs(pfinal, isometal_balmer_abcissa, -1.5)
retrieved_K_isometal_neg1pt0 = expanded_layden_all_coeffs(pfinal, isometal_balmer_abcissa, -1.)
retrieved_K_isometal_neg0pt5 = expanded_layden_all_coeffs(pfinal, isometal_balmer_abcissa, -0.5)
retrieved_K_isometal_pos0pt0 = expanded_layden_all_coeffs(pfinal, isometal_balmer_abcissa, 0.0)
retrieved_K_isometal_pos0pt2 = expanded_layden_all_coeffs(pfinal, isometal_balmer_abcissa, 0.2)
# plot it
plt.clf()
plt.figure(figsize=(20,10))
# underplot isometallicity lines
plt.plot(isometal_balmer_abcissa, retrieved_K_isometal_neg3pt0, linestyle="--", label="Isometal, Fe/H=-3.0")
plt.plot(isometal_balmer_abcissa, retrieved_K_isometal_neg2pt5, linestyle="--", label="Isometal, Fe/H=-2.5")
plt.plot(isometal_balmer_abcissa, retrieved_K_isometal_neg2pt0, linestyle="--", label="Isometal, Fe/H=-2.0")
plt.plot(isometal_balmer_abcissa, retrieved_K_isometal_neg1pt5, linestyle="--", label="Isometal, Fe/H=-1.5")
plt.plot(isometal_balmer_abcissa, retrieved_K_isometal_neg1pt0, linestyle="--", label="Isometal, Fe/H=-1.0")
plt.plot(isometal_balmer_abcissa, retrieved_K_isometal_neg0pt5, linestyle="--", label="Isometal, Fe/H=-0.5")
plt.plot(isometal_balmer_abcissa, retrieved_K_isometal_pos0pt0, linestyle="--", label="Isometal, Fe/H=+0.0")
plt.plot(isometal_balmer_abcissa, retrieved_K_isometal_pos0pt2, linestyle="--", label="Isometal, Fe/H=+0.2")
# data points
#print(len(df_choice["final_feh_center"]))
plt.errorbar(df_choice["balmer"], df_choice["K"], yerr=df_choice["err_K"], marker="o", markerfacecolor="orange", ecolor="k", ls="none", label="Empirical")
plt.scatter(df_choice["balmer"], retrieved_K,
label="Retrieved, Modified Layden eqn")
# connect the empirical-retrieved dots, using list comprehension
[plt.plot([df_choice["balmer"][j],df_choice["balmer"][j]],
[df_choice["K"][j],retrieved_K[j]], color="k") for j in range(len(df_choice["final_feh_center"]))]
plt.ylabel("K EW ($\AA$)")
plt.xlabel("Balmer EW ($\AA$)")
plt.title(str(new_coeffs_array[t]) + "\nBIC = " + str(bic))
plt.legend()
plt.savefig("plot_"+res+".pdf")
plt.close()
print("----------")
print("----------")
# In[13]:
# map the function across all available cores
ncpu = multiprocessing.cpu_count()
pool = multiprocessing.Pool(ncpu)
pool.map(find_bic_of_1_subarray, new_coeffs_mother_array)
# In[9]:
# baseline check
'''
abcd_older = [12.513685,-0.78716521,3.8778512,-0.24297523]
abcd_now = [1.21768692e+01,-7.52340434e-01,3.76117382e+00,-2.30912220e-01]
K_baseline = original_layden_abcd(coeff_array=abcd_older,H=df_choice["balmer"],F=df_choice["final_feh_center"])
ssr = np.sum(np.power(np.subtract(df_choice["K"],K_baseline),2.))
n_params = 4
n_samples = len(df_choice["balmer"])
bic = astropy.stats.bayesian_info_criterion_lsq(ssr, n_params, n_samples)
print(bic)
'''
# ### Compare BICs
# In[6]:
'''
BIC_orig = 245.1464970126952
BIC_1_f = 245.36213036898667
BIC_2_g = 197.42180787854656
BIC_3_h = 226.17141772986807
BIC_4_fg = 196.28752773438762
BIC_5_fh = 226.36328694853378
BIC_6_gh = 203.0216458555668
BIC_7_fgh = 116.29437000510694
print("$\Delta$ BIC_1_f, BIC_orig:")
print(np.subtract(BIC_1_f,BIC_orig))
print("------")
print("$\Delta$ BIC_2_g, BIC_orig:")
print(np.subtract(BIC_2_g,BIC_orig))
print("------")
print("$\Delta$ BIC_3_h, BIC_orig:")
print(np.subtract(BIC_3_h,BIC_orig))
print("------")
print("$\Delta$ BIC_4_fg, BIC_orig:")
print(np.subtract(BIC_4_fg,BIC_orig))
print("------")
print("$\Delta$ BIC_5_fh, BIC_orig:")
print(np.subtract(BIC_5_fh,BIC_orig))
print("------")
print("$\Delta$ BIC_6_gh, BIC_orig:")
print(np.subtract(BIC_6_gh,BIC_orig))
print("------")
print("$\Delta$ BIC_7_fgh, BIC_orig:")
print(np.subtract(BIC_7_fgh,BIC_orig))
print("------")
'''
# In[54]:
# Find some metallicities
'''
H = df_choice["balmer"]
K = df_choice["K"]
## calculate retrieved Fe/H using solution with [a,b,c,d,f,g,h]
modified_soln_7_abcdfgh = [16.92437966,-0.98640101,5.2261726,0.53344007,-0.06341921,0.27027538,-0.02034332]
coeff_a = modified_soln_7_abcdfgh[0]
coeff_b = modified_soln_7_abcdfgh[1]
coeff_c = modified_soln_7_abcdfgh[2]
coeff_d = modified_soln_7_abcdfgh[3]
coeff_f = modified_soln_7_abcdfgh[4]
coeff_g = modified_soln_7_abcdfgh[5]
coeff_h = modified_soln_7_abcdfgh[6]
A_cap = coeff_g*H + coeff_h*np.power(H,2)
B_cap = coeff_c + coeff_d*H + coeff_f*np.power(H,2)
C_cap = -K + coeff_a + coeff_b*H
F_pos = np.divide(-B_cap + np.sqrt(np.power(B_cap,2.)-4*A_cap*C_cap),2*A_cap)
F_neg = np.divide(-B_cap - np.sqrt(np.power(B_cap,2.)-4*A_cap*C_cap),2*A_cap)
## and calculate retrieved Fe/H using just [a,b,c,d] (the Layden fit, but with our best-fit values)
original_layden_our_fit_soln_0_abcd = [12.51368502,-0.78716519,3.87785117,-0.24297523]
coeff_a_original = original_layden_our_fit_soln_0_abcd[0]
coeff_b_original = original_layden_our_fit_soln_0_abcd[1]
coeff_c_original = original_layden_our_fit_soln_0_abcd[2]
coeff_d_original = original_layden_our_fit_soln_0_abcd[3]
F_original_our_fit = np.divide(K-coeff_a_original-coeff_b_original*H,coeff_c_original+coeff_d_original*H)
plt.clf()
plt.scatter(df_choice["final_feh_center"],F_original_our_fit,facecolors="none",
edgecolors="k",label="Layden-style abcd")
plt.scatter(df_choice["final_feh_center"],F_pos,facecolors="orange",edgecolors="r",
label="Modified abcdfgh: positive soln")
for i in range(0,len(df)):
plt.annotate(df["original_spec_file_name"].iloc[i],
xy=(df["final_feh_center"].iloc[i],F_pos.iloc[i]),
xytext=(df["final_feh_center"].iloc[i],F_pos.iloc[i]))
plt.scatter(df_choice["final_feh_center"],F_neg,label="Modified abcdfgh: negative soln")
plt.plot([-3,2],[-3,2],linestyle="--")
plt.xlabel("Injected Fe/H")
plt.ylabel("Retrieved Fe/H")
plt.legend()
plt.show()
'''
|
[
"spalding@email.arizona.edu"
] |
spalding@email.arizona.edu
|
49a3c945fc0bb39a04e1e7830e495de0bffae069
|
2471db05fe8dc2b6186c7485f58217c7663f723e
|
/storage_table_demo_v3.py
|
06b586876d49d40492e88f1e8ec41e013073f24e
|
[] |
no_license
|
psoe195/azurehw
|
b1d898ee7f634aea3366e731dbfa27056efe43c8
|
b3aa106272ebb5a32e28a4f51c501a4f0489070e
|
refs/heads/master
| 2021-08-15T19:10:26.133187
| 2017-11-18T05:17:13
| 2017-11-18T05:17:13
| 111,159,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,209
|
py
|
import string,random,time,azurerm,json
from azure.storage.table import TableService, Entity
# here are psoe comments
# Define variables to handle Azure authentication
auth_token = azurerm.get_access_token_from_cli()
subscription_id = azurerm.get_subscription_from_cli()
print "subscription_id = " + subscription_id
print "auth_token = "+ auth_token
# Define variables with random resource group and storage account names
resourcegroup_name = 'ps'+''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(6))
storageaccount_name = 'ps'+''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(6))
location = 'westus'
print "resourcegroup_name = " + resourcegroup_name
print "storageaccount_name = " + storageaccount_name
print "location = " + location
###
# Create the a resource group for our demo
# We need a resource group and a storage account. A random name is generated, as each storage account name must be globally unique.
###
response = azurerm.create_resource_group(auth_token, subscription_id, resourcegroup_name, location)
if response.status_code == 200 or response.status_code == 201:
print('Resource group: ' + resourcegroup_name + ' created successfully.')
else:
print('Error creating resource group')
# Create a storage account for our demo
response = azurerm.create_storage_account(auth_token, subscription_id, resourcegroup_name, storageaccount_name, location, storage_type='Standard_LRS')
if response.status_code == 202:
print('Storage account: ' + storageaccount_name + ' created successfully.')
time.sleep(2)
else:
print('Error creating storage account')
###
# Use the Azure Storage Storage SDK for Python to create a Table
###
print('\nLet\'s create an Azure Storage Table to store some data.')
raw_input('Press Enter to continue...')
# Each storage account has a primary and secondary access key.
# These keys are used by aplications to access data in your storage account, such as Tables.
# Obtain the primary storage access key for use with the rest of the demo
response = azurerm.get_storage_account_keys(auth_token, subscription_id, resourcegroup_name, storageaccount_name)
storageaccount_keys = json.loads(response.text)
storageaccount_primarykey = storageaccount_keys['keys'][0]['value']
# Create the Table with the Azure Storage SDK and the access key obtained in the previous step
table_service = TableService(account_name=storageaccount_name, account_key=storageaccount_primarykey)
response = table_service.create_table('itemstable')
if response == True:
print('Storage Table: itemstable created successfully.\n')
else:
print('Error creating Storage Table.\n')
time.sleep(1)
###
# Use the Azure Storage Storage SDK for Python to create some entries in the Table
###
print('Now let\'s add some entries to our Table.\nRemember, Azure Storage Tables is a NoSQL datastore, so this is similar to adding records to a database.')
raw_input('Press Enter to continue...')
# Each entry in a Table is called an 'Entity'.
# Here, we add an entry for first pizza with two pieces of data - the name, and the cost
#
# A partition key tracks how like-minded entries in the Table are created and queried.
# A row key is a unique ID for each entity in the partition
# These two properties are used as a primary key to index the Table. This makes queries much quicker.
car = Entity()
car.PartitionKey = 'carmenu'
car.RowKey = '001'
car.make = 'Toyota'
car.model = 'Avalon'
car.year = 2017
car.color = 'Blue'
car.cost = 35000
table_service.insert_entity('itemstable', car)
print('Created entry for Toyota Avalon...')
car = Entity()
car.PartitionKey = 'carmenu'
car.RowKey = '002'
car.make = 'Toyota'
car.model = 'Corolla'
car.year = 2017
car.color = 'White'
car.cost = 18000
table_service.insert_entity('itemstable', car)
print('Created entry for Toyota Corolla...')
car = Entity()
car.PartitionKey = 'carmenu'
car.RowKey = '003'
car.make = 'Honda'
car.model = 'Accord'
car.year = 2017
car.color = 'Red'
car.cost = 25000
table_service.insert_entity('itemstable', car)
print('Created entry for Honda Accord...\n')
# A partition key tracks how like-minded entries in the Table are created and queried.
# A row key is a unique ID for each entity in the partition
# These two properties are used as a primary key to index the Table. This makes queries much quicker.
coffee = Entity()
coffee.PartitionKey = 'coffeestore'
coffee.RowKey = '006'
coffee.brand = 'Star bucks'
coffee.flavor = 'dark'
coffee.size = 'small'
coffee.cost = 1.5
table_service.insert_entity('itemstable', coffee)
print('Created entry for Star bucks dark small...\n')
time.sleep(1)
coffee = Entity()
coffee.PartitionKey = 'coffeestore'
coffee.RowKey = '007'
coffee.brand = 'Star bucks'
coffee.flavor = 'sweet'
coffee.size = 'medium'
coffee.cost = 2.5
table_service.insert_entity('itemstable', coffee)
print('Created entry for Star bucks sweet medium...\n')
time.sleep(1)
coffee = Entity()
coffee.PartitionKey = 'coffeestore'
coffee.RowKey = '008'
coffee.brand = 'Peets'
coffee.flavor = 'Extra dark'
coffee.size = 'Large'
coffee.cost = 3.5
table_service.insert_entity('itemstable', coffee)
print('Created entry for Peets extra dark large...\n')
time.sleep(1)
###
# Use the Azure Storage Storage SDK for Python to query for entities in our Table
###
print('With some data in our Azure Storage Table, we can query the data.\nLet\'s see what the car menu looks like.')
raw_input('Press Enter to continue...')
# In this query, you define the partition key to search within, and then which properties to retrieve
# Structuring queries like this improves performance as your application scales up and keeps the queries efficient
items = table_service.query_entities('itemstable', filter="PartitionKey eq 'carmenu'", select='make,model,year, color,cost')
for item in items:
print('Make: ' + str(item.make))
print('Model: ' + str(item.model))
print('Year: ' + str(item.year))
print('color: ' + str(item.color))
print('Price: ' + str(item.cost) + '\n')
items = table_service.query_entities('itemstable', filter="PartitionKey eq 'coffeestore'", select='brand,flavor,size,cost')
for item in items:
print('Brand: ' + str(item.brand))
print('Flavor: ' + str(item.flavor))
print('Size: ' + str(item.size))
print('Price: ' + str(item.cost) + '\n')
time.sleep(1)
###
# This was a quick demo to see Tables in action.
# Although the actual cost is minimal (fractions of a cent per month) for the three entities we created, it's good to clean up resources when you're done
###
print('\nThis is a basic example of how Azure Storage Tables behave like a database.\nTo keep things tidy, let\'s clean up the Azure Storage resources we created.')
raw_input('Press Enter to continue...')
response = table_service.delete_table('itemstable')
if response == True:
print('Storage table: itemstable deleted successfully.')
else:
print('Error deleting Storage Table')
response = azurerm.delete_resource_group(auth_token, subscription_id, resourcegroup_name)
if response.status_code == 202:
print('Resource group: ' + resourcegroup_name + ' deleted successfully.')
else:
print('Error deleting resource group.')
|
[
"petersoe@gmail.com"
] |
petersoe@gmail.com
|
77f354f61023fa4a42e61899214b2e46445fd876
|
0b842bcb3bf20e1ce628d39bf7e11abd7699baf9
|
/oscar/a/api/yang/modules/tech/common/qwilt_tech_platform_temperature/tech/platform/temperature/status/status_maapi_gen.py
|
03bc31a5274180986553996d0f3063aedcce7b29
|
[] |
no_license
|
afeset/miner2-tools
|
75cc8cdee06222e0d81e39a34f621399e1ceadee
|
81bcc74fe7c0ca036ec483f634d7be0bab19a6d0
|
refs/heads/master
| 2016-09-05T12:50:58.228698
| 2013-08-27T21:09:56
| 2013-08-27T21:09:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,266
|
py
|
# Copyright Qwilt, 2012
#
# The code contained in this file may not be used by any other entities without explicit written permission from Qwilt.
#
# Author: naamas
from a.infra.misc.enum_with_value import EnumWithValue
from a.infra.basic.return_codes import ReturnCodes
from a.infra.misc.init_guard import InitGuard
from a.sys.confd.pyconfdlib.tag_values import TagValues
from a.sys.confd.pyconfdlib.value import Value
from a.sys.confd.pyconfdlib.key_path import KeyPath
from status_maapi_base_gen import StatusMaapiBase
from a.api.yang.modules.tech.common.qwilt_tech_platform_temperature.qwilt_tech_platform_temperature_module_gen import TemperatureStatusType
from a.api.yang.modules.tech.common.qwilt_tech_platform_temperature.qwilt_tech_platform_temperature_module_gen import TemperatureOperationalStatusReasonType
from a.api.yang.modules.tech.common.qwilt_tech_platform_temperature.qwilt_tech_platform_temperature_module_gen import TemperatureOperationalStatusType
class BlinkyStatusMaapi(StatusMaapiBase):
def __init__ (self, logger):
self.myInitGuard = InitGuard()
self._log=logger.createLogger("sys-blinky-oper-example","blinky-maapi-status")
self.domain = None
self.operationalStatusRequested = False
self.operationalStatus = None
self.operationalStatusSet = False
self.operationalStatusReasonRequested = False
self.operationalStatusReason = None
self.operationalStatusReasonSet = False
self.temperatureStatusRawRequested = False
self.temperatureStatusRaw = None
self.temperatureStatusRawSet = False
self.temperatureStatusRequested = False
self.temperatureStatus = None
self.temperatureStatusSet = False
def init (self, domain):
self.myInitGuard.crashIfInitDone()
for logFunc in self._log('init').debug3Func(): logFunc('called. domain=%s', domain)
self.domain = domain
self.myInitGuard.initDone()
def requestConfigAndOper (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-config-and-oper').debug3Func(): logFunc('called, PARAMS')
self.requestOperationalStatus(True)
self.requestOperationalStatusReason(True)
self.requestTemperatureStatusRaw(True)
self.requestTemperatureStatus(True)
def requestConfig (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-config').debug3Func(): logFunc('called, PARAMS')
self.requestOperationalStatus(False)
self.requestOperationalStatusReason(False)
self.requestTemperatureStatusRaw(False)
self.requestTemperatureStatus(False)
def requestOper (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-oper').debug3Func(): logFunc('called, PARAMS')
self.requestOperationalStatus(True)
self.requestOperationalStatusReason(True)
self.requestTemperatureStatusRaw(True)
self.requestTemperatureStatus(True)
def clearAllRequested (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('clear-all-requested').debug3Func(): logFunc('called, PARAMS')
self.requestOperationalStatus(False)
self.requestOperationalStatusReason(False)
self.requestTemperatureStatusRaw(False)
self.requestTemperatureStatus(False)
def clearAllSet (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('clear-all-set').debug3Func(): logFunc('called, PARAMS')
def write (self
, trxContext=None
):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('write').debug3Func(): logFunc('called, PARAMS')
return self._internalWrite(trxContext)
def read (self
, trxContext=None):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('read').debug3Func(): logFunc('called, PARAMS')
return self._internalRead(
False,
trxContext)
def readAllOrFail (self
, trxContext=None):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('read-all-or-fail').debug3Func(): logFunc('called, PARAMS')
return self._internalRead(
True,
trxContext)
def requestOperationalStatus (self, requested):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-operationalstatus').debug3Func(): logFunc('called. requested=%s', requested)
self.operationalStatusRequested = requested
self.operationalStatusSet = False
def isOperationalStatusRequested (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('is-operationalstatus-requested').debug3Func(): logFunc('called. requested=%s', self.operationalStatusRequested)
return self.operationalStatusRequested
def getOperationalStatus (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('get-operationalstatus').debug3Func(): logFunc('called. self.operationalStatusSet=%s, self.operationalStatus=%s', self.operationalStatusSet, self.operationalStatus)
if self.operationalStatusSet:
return self.operationalStatus
return None
def hasOperationalStatus (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('has-operationalstatus').debug3Func(): logFunc('called. self.operationalStatusSet=%s, self.operationalStatus=%s', self.operationalStatusSet, self.operationalStatus)
if self.operationalStatusSet:
return True
return False
def setOperationalStatus (self, operationalStatus):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('set-operationalstatus').debug3Func(): logFunc('called. operationalStatus=%s, old=%s', operationalStatus, self.operationalStatus)
self.operationalStatusSet = True
self.operationalStatus = operationalStatus
def requestOperationalStatusReason (self, requested):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-operationalstatusreason').debug3Func(): logFunc('called. requested=%s', requested)
self.operationalStatusReasonRequested = requested
self.operationalStatusReasonSet = False
def isOperationalStatusReasonRequested (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('is-operationalstatusreason-requested').debug3Func(): logFunc('called. requested=%s', self.operationalStatusReasonRequested)
return self.operationalStatusReasonRequested
def getOperationalStatusReason (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('get-operationalstatusreason').debug3Func(): logFunc('called. self.operationalStatusReasonSet=%s, self.operationalStatusReason=%s', self.operationalStatusReasonSet, self.operationalStatusReason)
if self.operationalStatusReasonSet:
return self.operationalStatusReason
return None
def hasOperationalStatusReason (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('has-operationalstatusreason').debug3Func(): logFunc('called. self.operationalStatusReasonSet=%s, self.operationalStatusReason=%s', self.operationalStatusReasonSet, self.operationalStatusReason)
if self.operationalStatusReasonSet:
return True
return False
def setOperationalStatusReason (self, operationalStatusReason):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('set-operationalstatusreason').debug3Func(): logFunc('called. operationalStatusReason=%s, old=%s', operationalStatusReason, self.operationalStatusReason)
self.operationalStatusReasonSet = True
self.operationalStatusReason = operationalStatusReason
def requestTemperatureStatusRaw (self, requested):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-temperaturestatusraw').debug3Func(): logFunc('called. requested=%s', requested)
self.temperatureStatusRawRequested = requested
self.temperatureStatusRawSet = False
def isTemperatureStatusRawRequested (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('is-temperaturestatusraw-requested').debug3Func(): logFunc('called. requested=%s', self.temperatureStatusRawRequested)
return self.temperatureStatusRawRequested
def getTemperatureStatusRaw (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('get-temperaturestatusraw').debug3Func(): logFunc('called. self.temperatureStatusRawSet=%s, self.temperatureStatusRaw=%s', self.temperatureStatusRawSet, self.temperatureStatusRaw)
if self.temperatureStatusRawSet:
return self.temperatureStatusRaw
return None
def hasTemperatureStatusRaw (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('has-temperaturestatusraw').debug3Func(): logFunc('called. self.temperatureStatusRawSet=%s, self.temperatureStatusRaw=%s', self.temperatureStatusRawSet, self.temperatureStatusRaw)
if self.temperatureStatusRawSet:
return True
return False
def setTemperatureStatusRaw (self, temperatureStatusRaw):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('set-temperaturestatusraw').debug3Func(): logFunc('called. temperatureStatusRaw=%s, old=%s', temperatureStatusRaw, self.temperatureStatusRaw)
self.temperatureStatusRawSet = True
self.temperatureStatusRaw = temperatureStatusRaw
def requestTemperatureStatus (self, requested):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-temperaturestatus').debug3Func(): logFunc('called. requested=%s', requested)
self.temperatureStatusRequested = requested
self.temperatureStatusSet = False
def isTemperatureStatusRequested (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('is-temperaturestatus-requested').debug3Func(): logFunc('called. requested=%s', self.temperatureStatusRequested)
return self.temperatureStatusRequested
def getTemperatureStatus (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('get-temperaturestatus').debug3Func(): logFunc('called. self.temperatureStatusSet=%s, self.temperatureStatus=%s', self.temperatureStatusSet, self.temperatureStatus)
if self.temperatureStatusSet:
return self.temperatureStatus
return None
def hasTemperatureStatus (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('has-temperaturestatus').debug3Func(): logFunc('called. self.temperatureStatusSet=%s, self.temperatureStatus=%s', self.temperatureStatusSet, self.temperatureStatus)
if self.temperatureStatusSet:
return True
return False
def setTemperatureStatus (self, temperatureStatus):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('set-temperaturestatus').debug3Func(): logFunc('called. temperatureStatus=%s, old=%s', temperatureStatus, self.temperatureStatus)
self.temperatureStatusSet = True
self.temperatureStatus = temperatureStatus
def _clearAllReadData (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('clear-all-read-data').debug3Func(): logFunc('called')
self.operationalStatus = 0
self.operationalStatusSet = False
self.operationalStatusReason = 0
self.operationalStatusReasonSet = False
self.temperatureStatusRaw = 0
self.temperatureStatusRawSet = False
self.temperatureStatus = 0
self.temperatureStatusSet = False
def _getSelfKeyPath (self
, junkForTemplate):
for logFunc in self._log('get-self-key-path').debug3Func(): logFunc('called. PARAMS, junkForTemplate=%s', junkForTemplate)
keyPath = KeyPath()
xmlVal = Value()
xmlVal.setXmlTag(("status", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature", "qt-pltf-temperature"))
keyPath.addKeyPathPrefix(xmlVal)
xmlVal = Value()
xmlVal.setXmlTag(("temperature", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature", "qt-pltf-temperature"))
keyPath.addKeyPathPrefix(xmlVal)
xmlVal = Value()
xmlVal.setXmlTag(("platform", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform", "qt-pltf"))
keyPath.addKeyPathPrefix(xmlVal)
xmlVal = Value()
xmlVal.setXmlTag(("tech", "http://qwilt.com/ns/yang/device/tech/qwilt-tech", "qt"))
keyPath.addKeyPathPrefix(xmlVal)
for logFunc in self._log('get-self-key-path-done').debug3Func(): logFunc('done. keyPath=%s. PARAMS', keyPath)
return keyPath
def _internalWrite (self,
trxContext):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('internal-write').debug3Func(): logFunc('called. PARAMS')
tagValueList = TagValues()
res = self._fillWriteTagValues(tagValueList)
if res != ReturnCodes.kOk:
for logFunc in self._log('write-fill-write-tag-value-failed').errorFunc(): logFunc('_fillWriteTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
itemsToDelete = []
res = self._collectItemsToDelete(
itemsToDelete)
if res != ReturnCodes.kOk:
for logFunc in self._log('write-collect-items-to-delete-failed').errorFunc(): logFunc('_collectItemsToDelete() failed. PARAMS')
return ReturnCodes.kGeneralError
keyPath = self._getSelfKeyPath(
None)
res = self.domain.writeMaapi(tagValueList, keyPath, trxContext, itemsToDelete)
if res != ReturnCodes.kOk:
for logFunc in self._log('write-domain-failed').errorFunc(): logFunc('domain.writeMaapi() failed. PARAMS')
return ReturnCodes.kGeneralError
for logFunc in self._log('internal-write-done').debug3Func(): logFunc('done. PARAMS')
return ReturnCodes.kOk
def _internalRead (self,
readAllOrFail,
trxContext):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('internal-read').debug3Func(): logFunc('called. PARAMS, readAllOrFail=%s', readAllOrFail)
if readAllOrFail:
self._clearAllReadData()
tagValueList = TagValues()
res = self._fillReadTagValues(tagValueList)
if res != ReturnCodes.kOk:
for logFunc in self._log('read-fill-read-tag-value-failed').errorFunc(): logFunc('_fillReadTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
keyPath = self._getSelfKeyPath(
None)
res = self.domain.readMaapi(tagValueList, keyPath, trxContext)
if res != ReturnCodes.kOk:
for logFunc in self._log('read-domain-failed').errorFunc(): logFunc('domain.readMaapi() failed. PARAMS')
return ReturnCodes.kGeneralError
res = self._readTagValues(tagValueList, readAllOrFail)
if res != ReturnCodes.kOk:
for logFunc in self._log('read-read-tag-values-failed').errorFunc(): logFunc('_readTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
for logFunc in self._log('internal-read-done').debug3Func(): logFunc('done. PARAMS, readAllOrFail=%s', readAllOrFail)
return ReturnCodes.kOk
def _collectItemsToDelete (self,
itemsToDelete):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('collect-items-to-delete').debug3Func(): logFunc('called: itemsToDelete=%s. PARAMS', itemsToDelete)
for logFunc in self._log('collect-items-to-delete-done').debug3Func(): logFunc('done: itemsToDelete=%s. PARAMS', itemsToDelete)
return ReturnCodes.kOk
def _fillWriteTagValues (self, tagValueList):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('fill-write-tag-values').debug3Func(): logFunc('called: tagValueList=%s', tagValueList)
return ReturnCodes.kOk
def _fillReadTagValues (self, tagValueList):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('fill-read-tag-values').debug3Func(): logFunc('called: tagValueList=%s', tagValueList)
if self.isOperationalStatusRequested():
valOperationalStatus = Value()
valOperationalStatus.setEmpty()
tagValueList.push(("operational-status", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature"), valOperationalStatus)
if self.isOperationalStatusReasonRequested():
valOperationalStatusReason = Value()
valOperationalStatusReason.setEmpty()
tagValueList.push(("operational-status-reason", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature"), valOperationalStatusReason)
if self.isTemperatureStatusRawRequested():
valTemperatureStatusRaw = Value()
valTemperatureStatusRaw.setEmpty()
tagValueList.push(("temperature-status-raw", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature"), valTemperatureStatusRaw)
if self.isTemperatureStatusRequested():
valTemperatureStatus = Value()
valTemperatureStatus.setEmpty()
tagValueList.push(("temperature-status", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature"), valTemperatureStatus)
return ReturnCodes.kOk
def _readTagValues (self, tagValueList, readAllOrFail):
__pychecker__ = 'maxlines=300'
__pychecker__ = 'maxreturns=30'
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('read-tag-values').debug3Func(): logFunc('called. readAllOrFail=%s, tagValueList=%s', readAllOrFail, tagValueList)
res = ReturnCodes.kOk
for logFunc in self._log('read-tag-values-leaves').debug3Func(): logFunc('reading leaves. tagValueList=%s', tagValueList)
if self.isOperationalStatusRequested():
((tag, ns), tempValue) = tagValueList.popFront()
if (tag != "operational-status") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature"):
for logFunc in self._log('reag-tag-values-unexpected-tag-leaf-operationalstatus').errorFunc(): logFunc('got unexpected tag-value for leaf: %s. expected: (%s, %s), got: (%s, %s)',
"operationalStatus", "operational-status", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature", tag, ns)
self._clearAllReadData()
return ReturnCodes.kGeneralError
tempVar = None
tempVar = tempValue.asEnum()
if res != ReturnCodes.kOk or tempVar is None:
for logFunc in self._log('read-tag-values-operational-status-bad-value').infoFunc(): logFunc('operationalStatus not read')
if readAllOrFail:
self._clearAllReadData()
return ReturnCodes.kGeneralError
if tempVar is not None:
self.setOperationalStatus(tempVar)
for logFunc in self._log('read-tag-values-operational-status').debug3Func(): logFunc('read operationalStatus. operationalStatus=%s, tempValue=%s', self.operationalStatus, tempValue.getType())
if self.isOperationalStatusReasonRequested():
((tag, ns), tempValue) = tagValueList.popFront()
if (tag != "operational-status-reason") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature"):
for logFunc in self._log('reag-tag-values-unexpected-tag-leaf-operationalstatusreason').errorFunc(): logFunc('got unexpected tag-value for leaf: %s. expected: (%s, %s), got: (%s, %s)',
"operationalStatusReason", "operational-status-reason", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature", tag, ns)
self._clearAllReadData()
return ReturnCodes.kGeneralError
tempVar = None
tempVar = tempValue.asEnum()
if res != ReturnCodes.kOk or tempVar is None:
for logFunc in self._log('read-tag-values-operational-status-reason-bad-value').infoFunc(): logFunc('operationalStatusReason not read')
if readAllOrFail:
self._clearAllReadData()
return ReturnCodes.kGeneralError
if tempVar is not None:
self.setOperationalStatusReason(tempVar)
for logFunc in self._log('read-tag-values-operational-status-reason').debug3Func(): logFunc('read operationalStatusReason. operationalStatusReason=%s, tempValue=%s', self.operationalStatusReason, tempValue.getType())
if self.isTemperatureStatusRawRequested():
((tag, ns), tempValue) = tagValueList.popFront()
if (tag != "temperature-status-raw") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature"):
for logFunc in self._log('reag-tag-values-unexpected-tag-leaf-temperaturestatusraw').errorFunc(): logFunc('got unexpected tag-value for leaf: %s. expected: (%s, %s), got: (%s, %s)',
"temperatureStatusRaw", "temperature-status-raw", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature", tag, ns)
self._clearAllReadData()
return ReturnCodes.kGeneralError
tempVar = None
tempVar = tempValue.asString()
if res != ReturnCodes.kOk or tempVar is None:
for logFunc in self._log('read-tag-values-temperature-status-raw-bad-value').infoFunc(): logFunc('temperatureStatusRaw not read')
if readAllOrFail:
self._clearAllReadData()
return ReturnCodes.kGeneralError
if tempVar is not None:
self.setTemperatureStatusRaw(tempVar)
for logFunc in self._log('read-tag-values-temperature-status-raw').debug3Func(): logFunc('read temperatureStatusRaw. temperatureStatusRaw=%s, tempValue=%s', self.temperatureStatusRaw, tempValue.getType())
if self.isTemperatureStatusRequested():
((tag, ns), tempValue) = tagValueList.popFront()
if (tag != "temperature-status") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature"):
for logFunc in self._log('reag-tag-values-unexpected-tag-leaf-temperaturestatus').errorFunc(): logFunc('got unexpected tag-value for leaf: %s. expected: (%s, %s), got: (%s, %s)',
"temperatureStatus", "temperature-status", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature", tag, ns)
self._clearAllReadData()
return ReturnCodes.kGeneralError
tempVar = None
tempVar = tempValue.asEnum()
if res != ReturnCodes.kOk or tempVar is None:
for logFunc in self._log('read-tag-values-temperature-status-bad-value').infoFunc(): logFunc('temperatureStatus not read')
if readAllOrFail:
self._clearAllReadData()
return ReturnCodes.kGeneralError
if tempVar is not None:
self.setTemperatureStatus(tempVar)
for logFunc in self._log('read-tag-values-temperature-status').debug3Func(): logFunc('read temperatureStatus. temperatureStatus=%s, tempValue=%s', self.temperatureStatus, tempValue.getType())
for logFunc in self._log('read-tag-values-done').debug3Func(): logFunc('done. readAllOrFail=%s, tagValueList=%s', readAllOrFail, tagValueList)
return ReturnCodes.kOk
"""
Extracted from the below data:
{
"node": {
"name": "status",
"namespace": "status",
"className": "StatusMaapi",
"importStatement": "from a.api.yang.modules.tech.common.qwilt_tech_platform_temperature.tech.platform.temperature.status.status_maapi_gen import StatusMaapi",
"baseClassName": "StatusMaapiBase",
"baseModule": "status_maapi_base_gen"
},
"ancestors": [
{
"moduleYangNamespacePrefix": "qt",
"yangName": "tech",
"namespace": "tech",
"isCurrent": false,
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech",
"name": "tech"
},
{
"moduleYangNamespacePrefix": "qt-pltf",
"yangName": "platform",
"namespace": "platform",
"isCurrent": false,
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform",
"name": "platform"
},
{
"moduleYangNamespacePrefix": "qt-pltf-temperature",
"yangName": "temperature",
"namespace": "temperature",
"isCurrent": false,
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature",
"name": "temperature"
},
{
"moduleYangNamespacePrefix": "qt-pltf-temperature",
"yangName": "status",
"namespace": "status",
"isCurrent": true,
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature",
"name": "status"
}
],
"descendants": [],
"conditionalDebugName": null,
"operLeaves": [
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature",
"moduleYangNamespacePrefix": "qt-pltf-temperature",
"typeHandler": "handler: EnumHandlerPy",
"memberName": "operationalStatus",
"yangName": "operational-status",
"object": "",
"leafrefPath": null,
"defaultVal": null,
"hasDefaultRef": false
},
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature",
"moduleYangNamespacePrefix": "qt-pltf-temperature",
"typeHandler": "handler: EnumHandlerPy",
"memberName": "operationalStatusReason",
"yangName": "operational-status-reason",
"object": "",
"leafrefPath": null,
"defaultVal": null,
"hasDefaultRef": false
},
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature",
"moduleYangNamespacePrefix": "qt-pltf-temperature",
"typeHandler": "handler: StringHandler",
"memberName": "temperatureStatusRaw",
"yangName": "temperature-status-raw",
"object": "",
"leafrefPath": null,
"defaultVal": null,
"hasDefaultRef": false
},
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature",
"moduleYangNamespacePrefix": "qt-pltf-temperature",
"typeHandler": "handler: EnumHandlerPy",
"memberName": "temperatureStatus",
"yangName": "temperature-status",
"object": "",
"leafrefPath": null,
"defaultVal": null,
"hasDefaultRef": false
}
],
"module": {},
"configLeaves": [],
"env": {
"namespaces": [
"a",
"api",
"yang",
"modules",
"tech",
"common",
"qwilt_tech_platform_temperature"
]
},
"leaves": [
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature",
"moduleYangNamespacePrefix": "qt-pltf-temperature",
"typeHandler": "handler: EnumHandlerPy",
"memberName": "operationalStatus",
"yangName": "operational-status",
"object": "",
"leafrefPath": null,
"defaultVal": null,
"hasDefaultRef": false
},
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature",
"moduleYangNamespacePrefix": "qt-pltf-temperature",
"typeHandler": "handler: EnumHandlerPy",
"memberName": "operationalStatusReason",
"yangName": "operational-status-reason",
"object": "",
"leafrefPath": null,
"defaultVal": null,
"hasDefaultRef": false
},
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature",
"moduleYangNamespacePrefix": "qt-pltf-temperature",
"typeHandler": "handler: StringHandler",
"memberName": "temperatureStatusRaw",
"yangName": "temperature-status-raw",
"object": "",
"leafrefPath": null,
"defaultVal": null,
"hasDefaultRef": false
},
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-platform-temperature",
"moduleYangNamespacePrefix": "qt-pltf-temperature",
"typeHandler": "handler: EnumHandlerPy",
"memberName": "temperatureStatus",
"yangName": "temperature-status",
"object": "",
"leafrefPath": null,
"defaultVal": null,
"hasDefaultRef": false
}
],
"createTime": "2013"
}
"""
|
[
"afeset@gmail.com"
] |
afeset@gmail.com
|
4958347475528010f6f727089254a0e308ba70ba
|
d447d0b9287caeaf10714bb3c749acf9f8667538
|
/python/withstatement.py
|
f1f0d452c86437c2385c06ccec3feb5b7b1f2dd9
|
[] |
no_license
|
zchen24/tutorial
|
cbcfc4d461869300ff7fc5c9d66337b7faf336f9
|
e323c7b1536915683ef7ad312af99012fec2e66c
|
refs/heads/master
| 2023-08-18T12:14:10.223230
| 2023-08-13T11:55:38
| 2023-08-13T11:55:38
| 122,897,217
| 13
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,738
|
py
|
#!/usr/bin/env python
"""
Example shows how with statement works
The example shows three approaches
1. manual approach: try / finally block
2. classic class approach
3. contextlib.contextmanager approach
Paradigm:
setup code
try:
user code
finally:
tear down (clean up)
"""
from contextlib import contextmanager
import logging
def my_function():
logging.debug('First debug data')
logging.error('Error data')
logging.debug('Second debug data')
# standard class way
class debug_logging_class(object):
def __init__(self, level):
self.level = level
def __enter__(self):
self.logger = logging.getLogger()
self.old_level = self.logger.getEffectiveLevel()
self.logger.setLevel(self.level)
return
def __exit__(self, exc_type, exc_val, exc_tb):
self.logger.setLevel(self.old_level)
@contextmanager
def debug_logging(level):
logger = logging.getLogger()
old_level = logger.getEffectiveLevel()
logger.setLevel(level)
try:
yield
finally:
logger.setLevel(old_level)
if __name__ == '__main__':
logging.warning('\n\n------ default -----')
my_function()
logging.warning('\n\n----- manual try -----')
my_logger = logging.getLogger()
my_old_level = my_logger.getEffectiveLevel()
my_logger.setLevel(logging.DEBUG)
try:
my_function()
finally:
my_logger.setLevel(my_old_level)
logging.warning('\n\n------ contextmanager -----')
with debug_logging(logging.DEBUG):
my_function()
logging.warning('\n\n----- with class -----')
with debug_logging_class(logging.DEBUG):
my_function()
logging.warning('\n\n----- after all -----')
my_function()
|
[
"zihan.chen.jhu@gmail.com"
] |
zihan.chen.jhu@gmail.com
|
2e99493bbffcbb5c8d7859d60518ed5de170707d
|
fed93c5054545d927f3695b51f3a8c9dafb90086
|
/Python/tagiofuns/gcite.py
|
86b01680c7c695dd462823bfbbe8101ad5d0acf2
|
[] |
no_license
|
spluque/TagTools
|
34629e360afd3170aa167437cccfd72001b2c69c
|
5f150109114cbbdf551cbf8a02e335006613d332
|
refs/heads/master
| 2021-12-07T10:54:11.656760
| 2021-10-14T20:36:29
| 2021-10-14T20:36:29
| 233,162,704
| 0
| 0
| null | 2020-01-11T02:11:30
| 2020-01-11T02:11:29
| null |
UTF-8
|
Python
| false
| false
| 2,882
|
py
|
def gcite(doi=None, opage=False):
"""
Get citation (gcite) information from the web using an 'objects' doi number (digital object identifier).
cite, bibf = gcite(doi)
or
cite, bibf = gcite(doi, opage)
Inputs:
doi number as a string. The doi number can be entered as a url ('https://doi.org/10.1109/JOE.2002.808212') or as a number ('10.1109/JOE.2002.808212').
The function checks for the mssing host address and appends it to the doi number
opage is an optional boolean argument to open the project archive / webpage
Outputs:
cite APA-formatted citation
bibf bibtex-formatted citation
Examples
cite,_ = gcite('https://doi.org/10.1109/JOE.2002.808212')
print(cite) returns:
Johnson, M. P., & Tyack, P. L. (2003). A digital acoustic recording tag for measuring the response of wild marine mammals to sound. IEEE Journal of Oceanic Engineering, 28(1), 3–12. doi:10.1109/joe.2002.808212
cite, bibf = gcite('10.1109/JOE.2002.808212')
print(cite) returns:
Johnson, M. P., & Tyack, P. L. (2003). A digital acoustic recording tag for measuring the response of wild marine mammals to sound. IEEE Journal of Oceanic Engineering, 28(1), 3–12. doi:10.1109/joe.2002.808212
print(bibf) returns:
@article{Johnson_2003,
doi = {10.1109/joe.2002.808212},
url = {https://doi.org/10.1109%2Fjoe.2002.808212},
year = 2003,
month = {jan},
publisher = {Institute of Electrical and Electronics Engineers ({IEEE})},
volume = {28},
number = {1},
pages = {3--12},
author = {M.P. Johnson and P.L. Tyack},
title = {A digital acoustic recording tag for measuring the response of wild marine mammals to sound},
journal = {{IEEE} Journal of Oceanic Engineering}
}
Valid: Python
rjs30@st-andrews.ac.uk
Python implementation dmwisniewska@gmail.com
last modified: 23 July 2021
"""
import subprocess
cite, bibf = ([] for i in range(2))
if not doi or not isinstance(doi, str):
print(help(gcite))
return (cite, bibf)
if not opage:
opage = 0
# Check for missing host address
if doi.find('https://doi.org/')<0:
doi = 'https://doi.org/' + doi
# Get formatted citation (APA style)
command = 'curl -LH "Accept: text/x-bibliography; style=apa" ' + doi
res = subprocess.check_output(command)
cite = res.decode("utf-8")
# Get bibtex-formatted citation info
command = 'curl -LH "Accept: application/x-bibtex" ' + doi
res = subprocess.check_output(command)
bibf = res.decode("utf-8")
# Open webpage
if opage:
import webbrowser
webbrowser.open_new_tab(doi)
return (cite, bibf)
|
[
"dmwisniewska@gmail.com"
] |
dmwisniewska@gmail.com
|
35c0fadc45d6dc61d71f740259dd9338eb4c2c47
|
9399e4a5b1870b931625a2d5a1424e2a130e960e
|
/PyBer_Challenge.py
|
66a8a8e175b425be30b131b6a6616e087e78ea44
|
[] |
no_license
|
ebskii52/PyberAnalysis
|
e4dba557414e846f683d2a2f3ae09e594bac44f6
|
3a7cc8d0bb53f4353eacd0eea434b6e01eb5e467
|
refs/heads/master
| 2020-11-26T06:52:42.835049
| 2019-12-24T09:23:56
| 2019-12-24T09:23:56
| 228,995,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,504
|
py
|
#%%
# Add Matplotlib inline magic command
%matplotlib inline
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import os
# %%
# Files to load
city_data_to_load = os.path.abspath("Resources/city_data.csv")
ride_data_to_load = os.path.abspath("Resources/ride_data.csv")
# %%## Module 5 Challenge
## Create a PyBer Summary DataFrame
# Combine the data into a single dataset
ride_data_df = pd.read_csv(ride_data_to_load)
city_data_df = pd.read_csv(city_data_to_load)
pyber_data_df = pd.merge(ride_data_df, city_data_df, on=["city", "city"])
# Display the DataFrame
pyber_data_df.head()
# %%
# Create the Urban city DataFrame.
urban_cities_df = pyber_data_df[pyber_data_df["type"] == "Urban"]
urban_cities_df.head()
# %%
# Create the Suburban and Rural city DataFrames.
suburban_cities_df = pyber_data_df[pyber_data_df["type"] == "Suburban"]
rural_cities_df = pyber_data_df[pyber_data_df["type"] == "Rural"]
# %%
# Get the number of rides for urban cities.
urban_ride_count = urban_cities_df.groupby(["city"]).count()["ride_id"]
# %%
# Create the suburban and rural ride count.
suburban_ride_count = suburban_cities_df.groupby(["city"]).count()["ride_id"]
rural_ride_count = rural_cities_df.groupby(["city"]).count()["ride_id"]
# %%
# Get average fare for each city in the urban cities.
urban_avg_fare = urban_cities_df.groupby(["city"]).mean()["fare"]
urban_avg_fare.head()
# %%
# Get average fare for each city in the suburban and rural cities.
suburban_avg_fare = suburban_cities_df.groupby(["city"]).mean()["fare"]
rural_avg_fare = rural_cities_df.groupby(["city"]).mean()["fare"]
# %%
# Total Number of Urban Drivers
urban_driver_count = urban_cities_df.groupby(["city"]).mean()["driver_count"]
suburban_driver_count = suburban_cities_df.groupby(["city"]).mean()["driver_count"]
rural_driver_count = rural_cities_df.groupby(["city"]).mean()["driver_count"]
#%%
# Get the average number of drivers for each urban city.
Total_Rides = pyber_data_df.groupby(["type"]).count()["city"]
urban_driver_sum = urban_driver_count.sum()
rural_driver_sum= rural_driver_count.sum()
suburban_driver_sum = suburban_driver_count.sum()
TotalDriverDF = pd.DataFrame({"type": ["Rural", "Suburban", "Urban"], "Total Drivers":[rural_driver_sum,suburban_driver_sum,urban_driver_sum]})
TotalDriverDF = TotalDriverDF.set_index(["type"])["Total Drivers"]
Total_Fares = pyber_data_df.groupby(["type"]).sum()["fare"]
Avg_PerRide = pyber_data_df.groupby(["type"]).mean()["fare"]
Avg_FarePerDriver = Total_Fares/TotalDriverDF
# %%
#To create the summary DataFrame, follow these steps:
#Get the total rides, total drivers, and total fares for each city type using the groupby() function on the city type using the merged DataFrame or separate DataFrames.
#Calculate the average fare per ride and the average fare per driver by city type.
#Delete the index name.
#Create the summary DataFrame with the appropriate columns and apply formatting where appropriate.
SummaryDF = pd.DataFrame({"Total Rides": Total_Rides, "Total Drivers":TotalDriverDF, "Total Fares": Total_Fares,
"Average Fare per ride":Avg_PerRide, "Average Per Driver": Avg_FarePerDriver})
SummaryDF.index.name = None
SummaryDF["Total Fares"] = SummaryDF["Total Fares"].map("${:.2f}".format)
SummaryDF["Average Fare per ride"] = SummaryDF["Average Fare per ride"].map("${:.2f}".format)
SummaryDF["Total Drivers"] = SummaryDF["Total Drivers"].map("{:.0f}".format)
SummaryDF["Average Per Driver"] = SummaryDF["Average Per Driver"].map("${:.2f}".format)
SummaryDF["Total Rides"] = SummaryDF["Total Rides"].map("{:,}".format)
# %%
## Rename columns
pyber_data_df.rename(columns={'city': 'City', 'date':'Date','fare':'Fare', 'ride_id': 'Ride Id','driver_count': 'No. Drivers', 'type':'City Type'}, inplace=True)
pyber_data_df
# %%
## Set the index to the Date column
# Reorder the columns in the order you want them to appear.
new_column_order = ['Date', 'City', 'Fare', 'Ride Id','No. Drivers', 'City Type']
pyber_data_df = pyber_data_df[new_column_order]
pyber_data_df
# %%
# Create a new DataFrame for fares and include only the
##Date, City Type, and Fare columns using the copy() method on the merged DataFrame
new_pyber_data_df = pyber_data_df[["Date", "City", "Fare", "City Type"]].copy()
# %%
new_pyber_data_df.set_index("Date")
DateTypes = new_pyber_data_df["Date"].tolist()
Date_fixed = []
for name in DateTypes:
if len(name.split()) >= 1:
Date_fixed.append(name.split()[0])
new_pyber_data_df["Date"] = Date_fixed
# %%
new_pyber_data_df.head()
# %%
# 7. Calculate the sum() of fares by the type of city and date using groupby() to create a new DataFrame.
FaresDF = new_pyber_data_df.groupby(["City Type", "Date"]).sum()["Fare"]
FaresDF = FaresDF.to_frame()
FaresDF
# %%
## 8. Reset the index, which is needed for Step 10.
FaresDF.reset_index(inplace = True)
FaresDF
#%%
#from datetime import datetime
#FaresDF['Date'] = pd.to_datetime(FaresDF['Date']).dt.date
FaresDF
#FaresDF.index.name = "Date"
#FaresDF
#%%
##
## 9. Create a pivot table DataFrame with the Date as the index and columns = 'City Type' with the Fare for each Date in each row
import numpy as np
import scipy.stats as sts
FarePVTable = pd.pivot_table(FaresDF, values='Fare', index=['Date'], columns=['City Type'], aggfunc=np.sum)
FarePVTable
#%%
## 10. Create a new DataFrame from the pivot table DataFrame on the given dates '2019-01-01':'2019-04-28' using loc .
FarePVTableDF = FarePVTable.reset_index()
FarePVTableDFTrial = FarePVTableDF.set_index("Date")
FarePVTableDF = FarePVTableDFTrial.loc['2019-01-01':'2019-04-28']
FarePVTableDF
#%%
from datetime import datetime
FaresDFDate = FarePVTableDF.copy()
FaresDFDate = FaresDFDate.reset_index()
FaresDFDate["Date"] = pd.to_datetime(FaresDFDate["Date"])
FaresDFDate = FaresDFDate.reset_index(drop=True)
FaresDFDate = FaresDFDate.set_index(["Date"])
FaresDFDate
#%%
## 11. Create a new DataFrame by setting the DataFrame you created in Step 11
## with resample() in weekly bins, and calculate the sum() of the fares for each week.
TotalFaresWeekly = FaresDFDate.resample("W").sum()
TotalFaresWeekly
# %%
# Using the object-oriented interface method, plot the DataFrame you created in
# Step 12 using the df.plot() function. Things to consider with your plotting:
import numpy as np
import scipy.stats as sts
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
plt.figure(figsize=(15,8))
plt.xlabel("Date")
plt.ylabel("Fare($)")
# Create a title.
plt.title("PyBer Fare by Month")
plt.plot((TotalFaresWeekly.index.to_pydatetime()).astype('datetime64[W]'), TotalFaresWeekly["Urban"], marker="*", color="green", linewidth=2, label='Urban')
plt.plot((TotalFaresWeekly.index.to_pydatetime()).astype('datetime64[W]'), TotalFaresWeekly["Rural"], marker="*", color="black", linewidth=2, label='Rural')
plt.plot((TotalFaresWeekly.index.to_pydatetime()).astype('datetime64[W]'), TotalFaresWeekly["Suburban"], marker="*", color="red", linewidth=2, label='Suburban')
# Add a grid.
plt.grid()
#plt.xticks(RuralDateTrialType.index,Rotation="vertical")
# Add the legend.
# Create a legend
lgnd = plt.legend(fontsize="12", mode="Expanded",
scatterpoints=1, loc="best", title="City Types")
lgnd.legendHandles[0]._sizes = [75]
lgnd.legendHandles[1]._sizes = [75]
lgnd.legendHandles[2]._sizes = [75]
lgnd.get_title().set_fontsize(12)
plt.savefig(os.path.abspath("analysis/module_challenge.png"))
# %%
|
[
"ebskii52@users.noreply.github.com"
] |
ebskii52@users.noreply.github.com
|
683ed72a45c224d95f506a1e8c1a5060eab42a12
|
2a9b4b0f9cfe7d32434128776037ea6b5ff9800b
|
/day14.py
|
6f91c0a4042e6d3f4df7d0a2360f664efaf895d2
|
[] |
no_license
|
hacktoon/adventofcode
|
5d71fa6957397cf627adca048602fdda874c4ed0
|
810686eeb3b8c627fa75fb5010b63e1beaf909ba
|
refs/heads/master
| 2021-01-10T11:05:11.535312
| 2015-12-19T19:08:47
| 2015-12-19T19:08:47
| 47,684,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,157
|
py
|
# Advent of code
# Day 14: Reindeer Olympics
'''
This year is the Reindeer Olympics! Reindeer can fly at high speeds, but must rest occasionally to recover their energy. Santa would like to know which of his reindeer is fastest, and so he has them race.
Reindeer can only either be flying (always at their top speed) or resting (not moving at all), and always spend whole seconds in either state.
For example, suppose you have the following Reindeer:
Comet can fly 14 km/s for 10 seconds, but then must rest for 127 seconds.
Dancer can fly 16 km/s for 11 seconds, but then must rest for 162 seconds.
After one second, Comet has gone 14 km, while Dancer has gone 16 km. After ten seconds, Comet has gone 140 km, while Dancer has gone 160 km. On the eleventh second, Comet begins resting (staying at 140 km), and Dancer continues on for a total distance of 176 km. On the 12th second, both reindeer are resting. They continue to rest until the 138th second, when Comet flies for another ten seconds. On the 174th second, Dancer flies for another 11 seconds.
In this example, after the 1000th second, both reindeer are resting, and Comet is in the lead at 1120 km (poor Dancer has only gotten 1056 km by that point). So, in this situation, Comet would win (if the race ended at 1000 seconds).
Given the descriptions of each reindeer (in your puzzle input), after exactly 2503 seconds, what distance has the winning reindeer traveled?
'''
import re
TOTAL_SECONDS = 2503
def init_speed_info(line):
name, kms, tm, rest = re.match('^([^\s]+).*?(\d+).*?(\d+).*?(\d+)', line).groups()
return {'name': name, 'kms': int(kms), 'time': int(tm), 'rest': int(rest), 'resting': 0, 'distance': 0, 'limit': int(tm), 'points': 0}
def update_speed_info(rd):
if rd['resting'] > 0:
rd['resting'] -= 1
return
if rd['limit'] > 0:
rd['limit'] -= 1
rd['distance'] += rd['kms']
else:
rd['resting'] = rd['rest'] - 1
rd['limit'] = rd['time']
def update_points(reindeers):
distances = sorted(reindeers, key=lambda x: x['distance'], reverse=True)
winner = distances[0]
leading_distance = winner['distance']
for d in distances:
if d['distance'] == leading_distance:
d['points'] += 1
with open('input/day14.txt') as f:
speeds = f.readlines()
reindeers = []
for i in speeds:
reindeers.append(init_speed_info(i))
for i in range(TOTAL_SECONDS):
for reindeer in reindeers:
update_speed_info(reindeer)
update_points(reindeers)
winning_distance = sorted(reindeers, key=lambda x: x['distance'], reverse=True)
winner_distance = winning_distance[0]
print('The winning reindeer in first part is {} who traveled {} km'.format(winner_distance['name'], winner_distance['distance']))
'''
--- Part Two ---
Seeing how reindeer move in bursts, Santa decides he's not pleased with the old scoring system.
Instead, at the end of each second, he awards one point to the reindeer currently in the lead. (If there are multiple reindeer tied for the lead, they each get one point.) He keeps the traditional 2503 second time limit, of course, as doing otherwise would be entirely ridiculous.
Given the example reindeer from above, after the first second, Dancer is in the lead and gets one point. He stays in the lead until several seconds into Comet's second burst: after the 140th second, Comet pulls into the lead and gets his first point. Of course, since Dancer had been in the lead for the 139 seconds before that, he has accumulated 139 points by the 140th second.
After the 1000th second, Dancer has accumulated 689 points, while poor Comet, our old champion, only has 312. So, with the new scoring system, Dancer would win (if the race ended at 1000 seconds).
Again given the descriptions of each reindeer (in your puzzle input), after exactly 2503 seconds, how many points does the winning reindeer have?
'''
winning_points = sorted(reindeers, key=lambda x: x['points'], reverse=True)
winner_points = winning_points[0]
print('The winning reindeer in second part is {} who has {} points'.format(winner_points['name'], winner_points['points']))
|
[
"karlisson.bezerra@titansgroup.com.br"
] |
karlisson.bezerra@titansgroup.com.br
|
1978ad9916a047e0be6f26fe06c4759fb69c63ff
|
e30b8b65a12253b323d847636a751f6ff75d757c
|
/Clientes.py
|
0acb82c90edc888c9ea15c47c9b50a0a0d0ae232
|
[] |
no_license
|
C4RL0M/Sistema-de-Gestao
|
009d73dc3f75b4b0033ed76a53366e35b6041fa4
|
fab8b70d55fc0724f9ad0497323b2dcbfd07a8f3
|
refs/heads/main
| 2023-04-05T13:41:28.045247
| 2021-04-15T03:10:49
| 2021-04-15T03:10:49
| 345,491,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,223
|
py
|
geral = []
pessoa = {}
print("1-Cadastrar pessoa")
print("2-Lista Cadastros")
print("3-Procurar Pessoa Especifica")
print("4-Encerrar")
op = input("Digite a opção desejada: ")
while op !="4":
if op == '1':
pessoa ['nome'] = input("informe o nome do cliente: ")
pessoa ['idade'] = input("informe a idade do cliente: ")
pessoa ['cep'] = input("informe o cep do cliente: ")
perg = 's'
perg = input("quer cadastrar outro? S/N ")
geral.append(pessoa.copy())
if perg in 'sS':
pessoa.clear()
continue
if perg in 'Nn':
pessoa.clear()
print("1-Cadastrar pessoa")
print("2-Lista Cadastros")
print("3-Procurar Pessoa Especifica")
print("4-Encerrar")
op = input("Digite a opção desejada: ")
if op == '2':
for pessoa in geral:
print(pessoa)
p = 's'
p = input("quer escolher outra opção? S/N ")
if p in 'Ss':
print("1-Cadastrar pessoa")
print("2-Lista Cadastros")
print("3-Procurar Pessoa Especifica")
print("4-Encerrar")
op = input("Digite a opção desejada: ")
if p in 'nN':
break
if op == '3':
busca = input("Digite o nome do cliente que deseja buscar: ")
for pessoa in geral:
if busca == pessoa ['nome']:
for k,v in pessoa.items():
print (f"{v}")
p = 's'
p = input("quer escolher outra opção? S/N ")
if p in 'Ss':
print("1-Cadastrar pessoa")
print("2-Lista Cadastros")
print("3-Procurar Pessoa Especifica")
print("4-Encerrar")
op = input("Digite a opção desejada: ")
if p in 'nN':
break
if op not in '1234':
print("digite apenas 1, 2, 3 ou 4")
print("1-Cadastrar pessoa")
print("2-Lista Cadastros")
print("3-Procurar Pessoa Especifica")
print("4-Encerrar")
op = input("Digite a opção desejada: ")
print("Obrigado!")
|
[
"noreply@github.com"
] |
C4RL0M.noreply@github.com
|
99ee2513f8b245f847babb718f5a265872e2e7bb
|
c76c37611bf5b437f821efbfeb33fdc2e2bdd0ec
|
/scripts/solve_problems.py
|
160771036f8edb28e9d87a8f55f8764ae24ca362
|
[
"MIT"
] |
permissive
|
LucasLeandro1204/PCCoder
|
618f84a27b2841cfb7b365c3480802ef38d81ca1
|
82bfc5a2c58e4d2f276a3ce2be3718ec19083142
|
refs/heads/master
| 2023-04-07T04:54:44.501309
| 2021-04-17T10:34:03
| 2021-04-17T10:34:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,735
|
py
|
import argparse
import json
import multiprocessing
import torch
import params
from model.model import PCCoder
from env.env import ProgramEnv
from env.search import cab, dfs
from dsl.example import Example
from dsl.program import Program
from dsl.value import Value
def load_problems(path):
problems = []
with open(path) as fh:
for line in fh:
problems.append(json.loads(line.rstrip()))
return problems
def init_worker(*args):
global method, counter, fail_counter, model, timeout, max_program_len, max_beam_size
method, counter, fail_counter, model, timeout, max_program_len, max_beam_size = args
def solve_problems(problems, method, model, timeout, max_program_len, max_beam_size, num_workers):
"""
Attempts to predict programs for the given I/O sample sets.
"""
# Prevents deadlocks due to torch's problems with GPUs on multi processes.
# This line is here for convenience, but it is recommended to solve problems on CPU since the overhead
# in this case is minimal.
torch.set_num_threads(1)
counter = multiprocessing.Value('i', 0)
fail_counter = multiprocessing.Value('i', 0)
if num_workers is None or num_workers > 1:
pool = multiprocessing.Pool(processes=num_workers, initializer=init_worker,
initargs=(method, counter, fail_counter, model, timeout, max_program_len,
max_beam_size))
return pool.map(solve_problem_worker, problems)
else:
# Don't run in pool to enable debugging
init_worker(method, counter, fail_counter, model, timeout, max_program_len, max_beam_size)
return [solve_problem_worker(data) for data in problems]
def solve_problem_worker(data):
examples = Example.from_line(data)
env = ProgramEnv(examples)
if method == 'beam':
solution = cab(env, max_program_len, model, params.cab_beam_size, params.cab_width,
params.cab_width_growth, timeout, max_beam_size=max_beam_size)
elif method == 'dfs':
solution = dfs(env, max_program_len, model, params.dfs_max_width, timeout)
counter.value += 1
print("\rSolving problems... %d (failed: %d)" % (counter.value, fail_counter.value), end="")
if solution['result'] is False:
solution['result'] = "Failed"
fail_counter.value += 1
else:
values = [Value.construct(x) for x in data['examples'][0]['inputs']]
value_types = [x.type for x in values]
solution['result'] = Program(value_types, solution['result']).encode()
return solution
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_path', type=str)
parser.add_argument('output_path', type=str)
parser.add_argument('model_path', type=str)
parser.add_argument('timeout', type=int)
parser.add_argument('max_program_len', type=int)
parser.add_argument('--num_workers', type=int, default=None)
parser.add_argument('--max_beam_size', type=int, default=819200)
parser.add_argument('--search_method', choices=['beam', 'dfs'], default='beam')
args = parser.parse_args()
problems = load_problems(args.input_path)
model = PCCoder()
model.load(args.model_path)
model.eval()
res = solve_problems(problems, args.search_method, model, args.timeout, args.max_program_len,
args.max_beam_size, args.num_workers)
print("")
solved = len([x for x in res if x['result'] != 'Failed'])
print("Solved: %d\\%d:" % (solved, len(res)), str(100.0 * solved / len(res)) + '%')
open(args.output_path, 'w').write('\n'.join([json.dumps(x) for x in res]))
if __name__ == '__main__':
main()
|
[
"amit.zhr@gmail.com"
] |
amit.zhr@gmail.com
|
c42682f31c8b5151cf482f4dea52a56ae050c364
|
e16a51b35795956afa2d2b2c370ad6afb0d6ed7b
|
/pi_for_physicists.py
|
4310fd2caa685a6c6a94ed7964761389691889eb
|
[] |
no_license
|
lgsteele/simple-python-programs
|
8bb88e088859bea658d9c7ab157f789641bc3270
|
e09c9503dc9a8f3d20a4b066b4a05ccc02fe6c2c
|
refs/heads/master
| 2021-05-31T16:57:44.111071
| 2016-05-05T18:58:32
| 2016-05-05T18:58:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
#This is a program that determines if you know the value of pi. It assumes you are a physicist.
# To run in Notepad ++ type F5, then enter the following command C:\Python27\Lib\idlelib\idle.py "$(FULL_CURRENT_PATH)"
def pi():
a = raw_input("You are a physicist, eh? What is the value of pi?\n")
a = float(a)
if a <= 5 and a >= 1:
print "Eh, close enough..."
else:
print "*facepalm*"
pi()
|
[
"louis.g.steele@gmail.com"
] |
louis.g.steele@gmail.com
|
a02a0761414356e6711d7127dc1787b474585e81
|
363d4005947f7a69021488d75db1ae5c5897775f
|
/rabish/blazeLearn.py
|
308e90bf5e3dccdd8bc2aa0fa2c804d770c16e14
|
[] |
no_license
|
anxu5829/MatrixCalculate
|
c09d87397edc6f26ee152783c186e13741161b5c
|
9e1d9f770082e5ac46dbda9a6f67ebde8b6235c8
|
refs/heads/master
| 2021-08-30T06:14:21.795069
| 2017-12-08T11:42:52
| 2017-12-08T11:42:52
| 111,750,588
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,017
|
py
|
# use dask to cal and save data
import numpy as np
import dask.array as da
import os
os.chdir("D:")
#os.chdir("C:\\Users\\22560\\Desktop\\recommand Sys\\recommand Sys")
# generate test data
z=np.arange(2e5)
z = z.reshape((int(1e5),2))
# this will rise an memory error
#z = z.dot(z.transpose())
#we can do it like this
#change to da
z = da.from_array(z,chunks=(10000,2))
# use da dot method
# 秒算
zdot = z.dot(z.transpose())
# 可以很方便的从zdot中提取数据
arrayNeed = zdot[1,:].compute()
# 存储和调用
# save data so that can be use repeatly
# 真实数据大概是存不下的,可能需要>100g的存储空间,
# 电脑水平不够
da.to_hdf5('data.hdf5','/zdata/zdot',zdot)
# load data from hdf5
import h5py
f = h5py.File("data.hdf5")
# 似乎可以只读取局部数据?
# d is a pointer to the data on disk
d = f['/zdata/zdot']
# you can extract data in d like this
data = d[1:20,:]
# test big multiply
dd = da.from_array(d,chunks=(10000,10000))
x = np.ones((2,100000))
y = x.dot(dd)
# 关于hdf5 的管理
# remember : Groups work like dictionaries,
# and datasets work like NumPy arrays
import h5py
import numpy as np
f = h5py.File("mytestfile.hdf5", "w")
# create an empty 'array' on your disk
# (100,) is its shape
# "mydatasets" is the name of this 'array'
# dset is like a pointer so that you can manipulate it
dset = f.create_dataset("mydataset", (100,), dtype='i')
dset[:] = np.arange(100)
# it has some attr
dset.shape
dset.dtype
# you can use it like array
dset[:10]
# how hdf5 organize data?
# it organize data using their 'name' ,
# like you organize your file on your pc
dset.name
# you can add some meta data on dset:
dset.attrs['create Time'] = '2017-11-26'
# you can obtain part of data from hdf5
# this will create a pointer
pointer = f['mydataset']
# slice method will load data into mem
pointer[:10]
# and 'f' also has a name , point to the root
f.name
# you can use create_group to create a "folder":
xuan = f.create_group("xuan")
# now var xuan is point to the 'folder' '/xuan'
z = np.arange(10)
z_hdf5 = xuan.create_dataset("z",data = z )
# if you want to get the content under the 'folder':
[i for i in f.items()]
# if you want to drop a folder/dataset :
del f['xuan']
# 关于odo
import blaze as bz
import pandas as pd
x= bz.data("test.csv")
x = bz.odo(x,pd.DataFrame)
# 关于dask:一个强大的数据处理模块
# 1 array 方法
## create and store
# generate test data
z=np.arange(2e4)
z = z.reshape((int(1e4),2))
# this will rise an memory error
#z = z.dot(z.transpose())
#we can do it like this
#change to da
z = da.from_array(z,chunks=(1000,2))
# store
z.to_hdf5("z.hdf5","/z",z)
# about sparse matrix:
import dask.array as da
import numpy as np
import sparse
from scipy.sparse import csr_matrix
sprs = csr_matrix((1e4, 1e4))
sprs[1,1] = 1000
sprs_da = da.from_array(sprs,chunks=(10,2))
# 注意:map_blocks 本质是对于每一个chunk 做一个函数映射,理论上可以自定义各种复杂函数
sprs_da = sprs_da.map_blocks(sparse.COO)
# 2 dataframe 方法
#A Dask DataFrame is a large parallel dataframe
# composed of many smaller Pandas dataframes,
# split along the index.
# create and store
import dask.dataframe as dd
# dd 不会直接把数据读到内存中
train = dd.read_csv("train.csv")
train.head()
# dd 不仅有dataframe , 还有series
# api
# you need to learn it someday
# http://dask.pydata.org/en/latest/dataframe-api.html#series
# 3 delayed 方法:并行运算
# 实现了计算图
from dask import delayed
def inc(x):
return x + 1
def double(x):
return x + 2
def add(x, y):
return x + y
data = [1, 2, 3, 4, 5]
output = []
for x in data:
a = delayed(inc)(x)
b = delayed(double)(x)
c = delayed(add)(a, b)
output.append(c)
total = delayed(sum)(output)
total.compute()
# 可以设计图:
# 等待上一步的运算结果
import numpy as np
def cal1(a,b):
return a+b
def cal2(c,d):
return c*d
a = np.array([1,2])
b = np.array([3,4])
d = 10
c = delayed(cal1)(a,b)
e = delayed(cal2)(c,d)
f = delayed(sum)(e)
f.visualize()
#delayed
# future : 可以进行并行计算
from dask.distributed import Client
# 使其满足多线程运算
# 4 bag 方法:用于处理一大群零碎的文件(暂略)
# dask 的一个应用
from sklearn import linear_model
from dask import delayed
from dask import compute
import numpy as np
import dask
reg = linear_model.LinearRegression()
Y = np.random.random((50,4))
X = np.random.random((50,3))
result = []
def regression(X,Y):
t = reg.fit(X,Y)
return(t.coef_)
def concat(result):
return result
for i in range(4):
c = delayed(regression)(X,Y[:,i])
result.append(c)
r = delayed(concat)(result)
r.compute()
r.visualize()
# regression
import dask.array as da
y = d[10,:]
xt = np.arange(20e4).reshape((2,int(10e4)))
# about h5sparse
import scipy.sparse as ss
import h5sparse
import numpy as np
sparse_matrix = ss.csr_matrix([[0, 1, 0],
[0, 0, 1],
[0, 0, 0],
[1, 1, 0]],
dtype=np.float64)
with h5sparse.File("test.h5") as h5f:
h5f.create_dataset('sparse/matrix', data=sparse_matrix)
with h5sparse.File("test.h5") as h5f:
h5f.create_dataset('sparse/matrix2', data=h5f['sparse/matrix'])
# read data
h5f = h5sparse.File("C:\\Users\\22560\\Desktop\\dis.h5")
h5f['sparse/matrix'][1:3]
h5f['sparse']['matrix'][1:3].toarray()
import h5py
# allow us to use h5py to get data
h5f = h5py.File("test.h5")
h5sparse.Group(h5f)['sparse/matrix']
h5sparse.Dataset(h5f['sparse/matrix'])
# test append method in h5sparse
import h5sparse
import h5py
import scipy.sparse as ss
import h5sparse
import numpy as np
x1 = np.array([[1,2,3,4],[5,6,7,8]])
x2 = np.array([[1,2,3,4],[5,6,7,8]]) *2
x1 = ss.csr_matrix(x1)
x2 = ss.csr_matrix(x2)
# you may use h5py to control data
with h5py.File("test.h5") as h5f:
del h5f['sparseData/data']
# use h5sparse to save data
# 注意:从h5sparse 中取数据只允许一次取一堆行
# 简单讲一下思路:
# 它把稀疏矩阵的indices,index,data,存放在了h5py的一个group中
# 使用它的h5sparse.Dataset方法可以把数据读取出来,处理为自定义的dataset类型
# attention:
# 1 append method can only be used when the original data is a csr_matrix
# you must set these two paras to ensure the data is chunked :
# chunks = (100,),maxshape = (None,)
with h5sparse.File("test.h5") as h5f:
h5f.create_dataset("sparseData/data",data=x1,chunks = (100,),maxshape = (None,))
h5f['sparseData/data'].append(x2)
# read data from it
with h5py.File("test.h5") as h5f:
print(h5sparse.Dataset(h5f['sparseData/data']).value.todense())
x= np.matrix(
[
[1,2,3,4],
[2,3,4,4]
]
)
import scipy.sparse as ss
x = ss.csc_matrix(x)
y = ss.csc_matrix([1,2,3,4])
x.multiply(y)
import numpy as np
x = np.arange(20).reshape(10,2)
y = -2 * x.dot(x.transpose())
z = (x*x).sum(1)
|
[
"anxu5829@gmail.com"
] |
anxu5829@gmail.com
|
9645a78ec99d0413d31cd60d9b4e834ae1042679
|
141545126466a00f32247dfa40e067ec049b0fa4
|
/Programming Fundamentals Python/17 Lists Advanced Exercise/car race.py
|
e7f35427a8792f06507edeed4c607ebd6c79fba9
|
[] |
no_license
|
RadkaValkova/SoftUni-Web-Developer
|
83314367172a18f001e182b4e57f7ca0502ad1fc
|
61d3414373498bb6009ae70e8d17f26cd2d88ea5
|
refs/heads/main
| 2023-06-01T02:11:06.606370
| 2021-06-29T19:39:19
| 2021-06-29T19:39:19
| 325,611,606
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
time_needed = list(map(int, input().split()))
finish = len(time_needed) // 2
left_range = time_needed[:finish]
right_range = time_needed[-1:finish:-1]
left_time = 0
right_time = 0
for time in left_range:
if time == 0:
left_time *= 0.8
left_time += time
for time in right_range:
if time == 0:
right_time *= 0.8
right_time += time
if left_time < right_time:
winner = 'left'
print(f'The winner is {winner} with total time: {left_time:.1f}')
else:
winner = 'right'
print(f'The winner is {winner} with total time: {right_time:1f}')
# left_car = []
# right_car = []
#
# for time in time_needed[:len(time_needed)//2]:
# if time == 0:
# current_time = sum(left_car)
# left_car.append(-(current_time * 0.2))
# left_car.append(time)
# for time in time_needed[len(time_needed):len(time_needed)//2:-1]:
# if time == 0:
# current_time = sum(right_car)
# right_car.append(-(current_time * 0.2))
# right_car.append(time)
#
# if sum(left_car) < sum(right_car):
# winner = 'left'
# print(f'The winner is {winner} with total time: {sum(left_car):.1f}')
# else:
# winner = 'right'
# print(f'The winner is {winner} with total time: {sum(right_car):1f}')
|
[
"radka_valkova@abv.bg"
] |
radka_valkova@abv.bg
|
a25a32d1e10c15d1c779c657ef58a0ceaf8c6b49
|
2e9a384cf365599d12341ff3ede54b74b2a23499
|
/Scripts/pip3-script.py
|
ec926ca2dda340e4b51d1f40c75114c139249587
|
[] |
no_license
|
ztex10009090/WhatToEat_roadQuerys
|
b0fad05d651a53c995f2d4935aff2defac92aec7
|
fc57399d68bb818fe2bbc27aec94afe703705041
|
refs/heads/master
| 2020-05-23T05:42:14.629285
| 2019-05-14T15:57:18
| 2019-05-14T15:57:18
| 186,652,175
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
#!D:\temp\WhatToEat_roadQuerys\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"ztex10009090@gmailcom"
] |
ztex10009090@gmailcom
|
3b0148c66e9f4aa739d321648279cbc3af9a43f5
|
68990a9361d2a3e3a7526807b29cef405a9e337b
|
/main/admin.py
|
ce42a97d43b0a4899fea544272c4be5fa20211d1
|
[] |
no_license
|
adalaw/TEA-Time
|
18fcbb10c361d565191633d5d7e84f0e7c71485f
|
554cd64cd9a4f9231c776ed1698d85fc1b99d56f
|
refs/heads/main
| 2023-03-18T02:56:12.868458
| 2021-03-11T04:11:44
| 2021-03-11T04:11:44
| 346,571,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
from django.contrib import admin
from .models import *
class ProductAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'code', 'price', 'category', 'hot', 'cold', 'date_created', 'active')
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'image', 'active')
class CustomerAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'phone', 'email')
class OrderAdmin(admin.ModelAdmin):
list_display = ('id', 'customer', 'store', 'date_created', 'status')
class OrderItemAdmin(admin.ModelAdmin):
list_display = ('id', 'order', 'product', 'hotOrCold', 'qty')
admin.site.register(Product, ProductAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Customer, CustomerAdmin)
admin.site.register(Order, OrderAdmin)
admin.site.register(OrderItem, OrderItemAdmin)
|
[
"freegirladalaw@hotmail.com"
] |
freegirladalaw@hotmail.com
|
4423dc9f47cd61d0fa20a672528459528704914d
|
de3231d5e55674538f8888afe94b241eb884fd34
|
/dj4e/home/migrations/0002_auto_20190322_0417.py
|
6aaa72d8662aa67fff542d1947e0a1c794446f80
|
[] |
no_license
|
hanacoon/django_projects
|
9e14371b71f5217b7e68799528825d48684881fe
|
eab1f948f339273f583bd35f7da81d17b333ba60
|
refs/heads/master
| 2020-04-18T01:13:25.816950
| 2019-04-18T04:37:01
| 2019-04-18T04:37:01
| 167,110,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 961
|
py
|
# Generated by Django 2.1.7 on 2019-03-22 04:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='site',
name='ISO',
),
migrations.RemoveField(
model_name='site',
name='category',
),
migrations.RemoveField(
model_name='site',
name='region',
),
migrations.RemoveField(
model_name='site',
name='states',
),
migrations.DeleteModel(
name='Category',
),
migrations.DeleteModel(
name='ISO',
),
migrations.DeleteModel(
name='Region',
),
migrations.DeleteModel(
name='Site',
),
migrations.DeleteModel(
name='States',
),
]
|
[
"hanacoon@Hanas-MacBook-Pro-2.local"
] |
hanacoon@Hanas-MacBook-Pro-2.local
|
356f041e1899bb712556a43afe67e2dbe40e7453
|
b7a8582f8bd8259d2a8cf794efab238c09c10e08
|
/pa3/mylex.py
|
af38607ceb8ca7ebf6aaf0fa9f8789057f5725c3
|
[] |
no_license
|
ajvarshneya/pl
|
3f2e02fea96a031ec68a9506a32673681ae81d9e
|
33e76540acfc0895145ed673c33107c4717b461b
|
refs/heads/master
| 2021-05-31T13:11:30.519870
| 2016-04-02T00:02:17
| 2016-04-02T00:02:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
# Produces Token objects from input
class Lexer(object):
def __init__(self, tokens):
self.tokens = tokens
self.iterator = iter(tokens)
def token(self):
try:
token = next(self.iterator)
# print token
if len(token) == 2:
return Token(token[0], token[1], None)
else:
return Token(token[0], token[1], token[2])
except StopIteration:
return None
# A token class compatible with ply
class Token(object):
def __init__(self, token_lineno, token_type, token_value):
self.lineno = int(token_lineno)
self.type = token_type.upper()
self.value = token_value
self.lexpos = None
def __str__(self):
return "<line: " + str(self.lineno) + ", type: " + str(self.type) + ", value: " + str(self.value) + ">"
|
[
"aj@ajs-MacBook-Air.local"
] |
aj@ajs-MacBook-Air.local
|
1f4d8ef6054bf7d36be215b980e3e271a6884841
|
2d9e23e9aac796a9514709fc9585e12a9d365810
|
/venv/bin/pip
|
341ef8c8329bf74c5b531bf5fd811d2dec6a7c7a
|
[] |
no_license
|
EvelcNeerg/Jerry
|
a6ad923bf69e01c2ae646ead872c7255e78b4e0f
|
f97f3bfb59fa90f2e5efd7a31db6d6496e9c2c3a
|
refs/heads/master
| 2020-04-18T04:37:13.397852
| 2019-01-24T13:30:16
| 2019-01-24T13:30:16
| 167,245,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
#!/Users/cgreen/PycharmProjects/JerrysProject/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"cgreen@shipleyschool.org"
] |
cgreen@shipleyschool.org
|
|
531e3867e6ed8774254ce503967e5f32c3ea8710
|
9a46907c545a3d2e09924d1431ade1cd448e1de6
|
/generate_distorted_images.py
|
bb978248468fa6042c5e39cde465b3cdef5d3842
|
[] |
no_license
|
kazmiekr/GasPumpOCR
|
126cf1bce517c53b1bfe5fcda4b8a3a23adf90d4
|
3ef1386f853176b514708cc6b30d332b3b8d5e9f
|
refs/heads/master
| 2022-08-19T21:40:11.048720
| 2022-07-20T11:20:54
| 2022-07-20T11:20:54
| 86,585,608
| 154
| 57
| null | 2022-07-20T11:20:55
| 2017-03-29T13:30:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,522
|
py
|
import cv2
import os
import sys
from DisplayUtils.TileDisplay import show_img
from ImageProcessing.OpenCVUtils import rotate_image
def dilate_img(img, file_name, file_folder, show, write):
for iterations in range(1, 4, 2):
for dilate in range(1, 4, 2):
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (dilate, dilate))
dilated = cv2.dilate(img, kernel, iterations=iterations)
for rot in range(-2, 3, 2):
dst = rotate_image(dilated, rot)
title = 'dilated-r-' + str(rot) + '-d-' + str(dilate) + '-i-' + str(iterations)
if show:
show_img(title, dst)
if write:
print 'Writing ' + file_folder + '/' + file_name + title + '.png'
cv2.imwrite(file_folder + '/' + file_name + title + '.png', dst)
def erode_img(img, file_name, file_folder, show, write):
for iterations in range(1, 3):
for erode in range(1, 4, 2):
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (erode, erode))
eroded = cv2.erode(img, kernel, iterations=iterations)
for rot in range(-2, 3, 2):
dst = rotate_image(eroded, rot)
title = 'eroded-r-' + str(rot) + '-e-' + str(erode) + '-i-' + str(iterations)
if show:
show_img(title, dst)
if write:
cv2.imwrite(file_folder + '/' + file_name + title + '.png', dst)
def process_image(path, show=True, write=False):
print path
img = cv2.imread(path)
if show:
show_img('orig', img)
file_folder, full_file = os.path.split(path)
file_name = full_file.split('.')[0]
# erode_img(img, file_name, file_folder, show, write)
dilate_img(img, file_name, file_folder, show, write)
def show_distorted(path, show, write):
process_image(path, show=show, write=write)
cv2.waitKey(0)
cv2.destroyAllWindows()
def process_directory(folder):
for file_name in os.listdir(folder):
if not file_name.startswith('.'):
process_image(folder + file_name, show=False, write=True)
def main():
img_file = 'training/7/7_1079_crop_3.png'
show = True
if len(sys.argv) == 2:
img_file = sys.argv[1]
show = False
show_distorted(img_file, show, True)
# For use if you want to generate extra images for all files in a directory
# process_directory('training/8/')
if __name__ == "__main__":
main()
|
[
"kazmiekr@gmail.com"
] |
kazmiekr@gmail.com
|
03832ca41691bd1f684229de69ff9c9c70b06791
|
2a7597b443c5866d62f1fd3386e5699cbb3ccf5e
|
/firstproject/firstapp/forms.py
|
395e5aba181f96b226dd40f4d26c98f8a28a3aae
|
[] |
no_license
|
neha-webllisto/All_projects
|
319b211360d50d3296d872ccff70627762657dbe
|
6bb6f2550b701f8bbdc026725f3db1d781fd69d1
|
refs/heads/master
| 2020-04-15T04:51:39.070851
| 2019-01-07T08:14:22
| 2019-01-07T08:14:22
| 164,398,876
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
from django import forms
class User_form(forms.Form):
Username = forms.CharField(label='Name',max_length=20)
Email = forms.EmailField(label='Email id')
Contact = forms.CharField(label='Phone no.',max_length=12)
Course = forms.CharField(label='Course',max_length=20)
|
[
"mansi@webllisto.com"
] |
mansi@webllisto.com
|
c69d9a13942e8632f0e56c2c3af6de1f0ff2ce04
|
6974096eaf642a1c3dfbc4567d0f0776621261de
|
/pantalaimon/index.py
|
5c8e02bf60702704918aa491142f8a997272a5a8
|
[
"Apache-2.0"
] |
permissive
|
thrrgilag/pantalaimon
|
29709e1231db7655e57685babad27094f68afe5c
|
d388a21b9b1f17b7f52790f79dd571d8e75a4543
|
refs/heads/master
| 2022-11-13T12:56:14.747072
| 2020-07-02T10:19:59
| 2020-07-02T10:19:59
| 277,380,106
| 0
| 0
|
Apache-2.0
| 2020-07-05T20:41:57
| 2020-07-05T20:41:56
| null |
UTF-8
|
Python
| false
| false
| 16,796
|
py
|
# Copyright 2019 The Matrix.org Foundation CIC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class InvalidQueryError(Exception):
pass
if False:
import asyncio
import datetime
import json
import os
from functools import partial
from typing import Any, Dict, List, Optional, Tuple
import attr
import tantivy
from nio import (
RoomEncryptedMedia,
RoomMessageMedia,
RoomMessageText,
RoomNameEvent,
RoomTopicEvent,
)
from peewee import (
SQL,
DateTimeField,
ForeignKeyField,
Model,
SqliteDatabase,
TextField,
)
from pantalaimon.store import use_database
INDEXING_ENABLED = True
class DictField(TextField):
def python_value(self, value): # pragma: no cover
return json.loads(value)
def db_value(self, value): # pragma: no cover
return json.dumps(value)
class StoreUser(Model):
user_id = TextField()
class Meta:
constraints = [SQL("UNIQUE(user_id)")]
class Profile(Model):
user_id = TextField()
avatar_url = TextField(null=True)
display_name = TextField(null=True)
class Meta:
constraints = [SQL("UNIQUE(user_id,avatar_url,display_name)")]
class Event(Model):
event_id = TextField()
sender = TextField()
date = DateTimeField()
room_id = TextField()
source = DictField()
profile = ForeignKeyField(model=Profile, column_name="profile_id")
class Meta:
constraints = [SQL("UNIQUE(event_id, room_id, sender, profile_id)")]
class UserMessages(Model):
user = ForeignKeyField(model=StoreUser, column_name="user_id")
event = ForeignKeyField(model=Event, column_name="event_id")
@attr.s
class MessageStore:
user = attr.ib(type=str)
store_path = attr.ib(type=str)
database_name = attr.ib(type=str)
database = attr.ib(type=SqliteDatabase, init=False)
database_path = attr.ib(type=str, init=False)
models = [StoreUser, Event, Profile, UserMessages]
def __attrs_post_init__(self):
self.database_path = os.path.join(
os.path.abspath(self.store_path), self.database_name
)
self.database = self._create_database()
self.database.connect()
with self.database.bind_ctx(self.models):
self.database.create_tables(self.models)
def _create_database(self):
return SqliteDatabase(
self.database_path, pragmas={"foreign_keys": 1, "secure_delete": 1}
)
@use_database
def event_in_store(self, event_id, room_id):
user, _ = StoreUser.get_or_create(user_id=self.user)
query = (
Event.select()
.join(UserMessages)
.where(
(Event.room_id == room_id)
& (Event.event_id == event_id)
& (UserMessages.user == user)
)
.execute()
)
for _ in query:
return True
return False
def save_event(self, event, room_id, display_name=None, avatar_url=None):
user, _ = StoreUser.get_or_create(user_id=self.user)
profile_id, _ = Profile.get_or_create(
user_id=event.sender, display_name=display_name, avatar_url=avatar_url
)
event_source = event.source
event_source["room_id"] = room_id
event_id = (
Event.insert(
event_id=event.event_id,
sender=event.sender,
date=datetime.datetime.fromtimestamp(event.server_timestamp / 1000),
room_id=room_id,
source=event_source,
profile=profile_id,
)
.on_conflict_ignore()
.execute()
)
if event_id <= 0:
return None
_, created = UserMessages.get_or_create(user=user, event=event_id)
if created:
return event_id
return None
def _load_context(self, user, event, before, after):
context = {}
if before > 0:
query = (
Event.select()
.join(UserMessages)
.where(
(Event.date <= event.date)
& (Event.room_id == event.room_id)
& (Event.id != event.id)
& (UserMessages.user == user)
)
.order_by(Event.date.desc())
.limit(before)
)
context["events_before"] = [e.source for e in query]
else:
context["events_before"] = []
if after > 0:
query = (
Event.select()
.join(UserMessages)
.where(
(Event.date >= event.date)
& (Event.room_id == event.room_id)
& (Event.id != event.id)
& (UserMessages.user == user)
)
.order_by(Event.date)
.limit(after)
)
context["events_after"] = [e.source for e in query]
else:
context["events_after"] = []
return context
@use_database
def load_events(
self,
search_result, # type: List[Tuple[int, int]]
include_profile=False, # type: bool
order_by_recent=False, # type: bool
before=0, # type: int
after=0, # type: int
):
# type: (...) -> Dict[Any, Any]
user, _ = StoreUser.get_or_create(user_id=self.user)
search_dict = {r[1]: r[0] for r in search_result}
columns = list(search_dict.keys())
result_dict = {"results": []}
query = (
UserMessages.select()
.where(
(UserMessages.user_id == user) & (UserMessages.event.in_(columns))
)
.execute()
)
for message in query:
event = message.event
event_dict = {
"rank": 1 if order_by_recent else search_dict[event.id],
"result": event.source,
"context": {},
}
if include_profile:
event_profile = event.profile
event_dict["context"]["profile_info"] = {
event_profile.user_id: {
"display_name": event_profile.display_name,
"avatar_url": event_profile.avatar_url,
}
}
context = self._load_context(user, event, before, after)
event_dict["context"]["events_before"] = context["events_before"]
event_dict["context"]["events_after"] = context["events_after"]
result_dict["results"].append(event_dict)
return result_dict
def sanitize_room_id(room_id):
return room_id.replace(":", "/").replace("!", "")
class Searcher:
def __init__(
self,
index,
body_field,
name_field,
topic_field,
column_field,
room_field,
timestamp_field,
searcher,
):
self._index = index
self._searcher = searcher
self.body_field = body_field
self.name_field = topic_field
self.topic_field = name_field
self.column_field = column_field
self.room_field = room_field
self.timestamp_field = timestamp_field
def search(self, search_term, room=None, max_results=10, order_by_recent=False):
# type (str, str, int, bool) -> List[int, int]
"""Search for events in the index.
Returns the score and the column id for the event.
"""
queryparser = tantivy.QueryParser.for_index(
self._index,
[self.body_field, self.name_field, self.topic_field, self.room_field],
)
# This currently supports only a single room since the query parser
# doesn't seem to work with multiple room fields here.
if room:
query_string = "{} AND room:{}".format(
search_term, sanitize_room_id(room)
)
else:
query_string = search_term
try:
query = queryparser.parse_query(query_string)
except ValueError:
raise InvalidQueryError(f"Invalid search term: {search_term}")
if order_by_recent:
collector = tantivy.TopDocs(
max_results, order_by_field=self.timestamp_field
)
else:
collector = tantivy.TopDocs(max_results)
result = self._searcher.search(query, collector)
retrieved_result = []
for score, doc_address in result:
doc = self._searcher.doc(doc_address)
column = doc.get_first(self.column_field)
retrieved_result.append((score, column))
return retrieved_result
class Index:
def __init__(self, path=None, num_searchers=None):
schema_builder = tantivy.SchemaBuilder()
self.body_field = schema_builder.add_text_field("body")
self.name_field = schema_builder.add_text_field("name")
self.topic_field = schema_builder.add_text_field("topic")
self.timestamp_field = schema_builder.add_unsigned_field(
"server_timestamp", fast="single"
)
self.date_field = schema_builder.add_date_field("message_date")
self.room_field = schema_builder.add_facet_field("room")
self.column_field = schema_builder.add_unsigned_field(
"database_column", indexed=True, stored=True, fast="single"
)
schema = schema_builder.build()
self.index = tantivy.Index(schema, path)
self.reader = self.index.reader(num_searchers=num_searchers)
self.writer = self.index.writer()
def add_event(self, column_id, event, room_id):
doc = tantivy.Document()
room_path = "/{}".format(sanitize_room_id(room_id))
room_facet = tantivy.Facet.from_string(room_path)
doc.add_unsigned(self.column_field, column_id)
doc.add_facet(self.room_field, room_facet)
doc.add_date(
self.date_field,
datetime.datetime.fromtimestamp(event.server_timestamp / 1000),
)
doc.add_unsigned(self.timestamp_field, event.server_timestamp)
if isinstance(event, RoomMessageText):
doc.add_text(self.body_field, event.body)
elif isinstance(event, (RoomMessageMedia, RoomEncryptedMedia)):
doc.add_text(self.body_field, event.body)
elif isinstance(event, RoomNameEvent):
doc.add_text(self.name_field, event.name)
elif isinstance(event, RoomTopicEvent):
doc.add_text(self.topic_field, event.topic)
else:
raise ValueError("Invalid event passed.")
self.writer.add_document(doc)
def commit(self):
self.writer.commit()
def searcher(self):
self.reader.reload()
return Searcher(
self.index,
self.body_field,
self.name_field,
self.topic_field,
self.column_field,
self.room_field,
self.timestamp_field,
self.reader.searcher(),
)
@attr.s
class StoreItem:
event = attr.ib()
room_id = attr.ib()
display_name = attr.ib(default=None)
avatar_url = attr.ib(default=None)
@attr.s
class IndexStore:
user = attr.ib(type=str)
index_path = attr.ib(type=str)
store_path = attr.ib(type=str, default=None)
store_name = attr.ib(default="events.db")
index = attr.ib(type=Index, init=False)
store = attr.ib(type=MessageStore, init=False)
event_queue = attr.ib(factory=list)
write_lock = attr.ib(factory=asyncio.Lock)
read_semaphore = attr.ib(type=asyncio.Semaphore, init=False)
def __attrs_post_init__(self):
self.store_path = self.store_path or self.index_path
num_searchers = os.cpu_count()
self.index = Index(self.index_path, num_searchers)
self.read_semaphore = asyncio.Semaphore(num_searchers or 1)
self.store = MessageStore(self.user, self.store_path, self.store_name)
def add_event(self, event, room_id, display_name, avatar_url):
item = StoreItem(event, room_id, display_name, avatar_url)
self.event_queue.append(item)
@staticmethod
def write_events(store, index, event_queue):
with store.database.bind_ctx(store.models):
with store.database.atomic():
for item in event_queue:
column_id = store.save_event(item.event, item.room_id)
if column_id:
index.add_event(column_id, item.event, item.room_id)
index.commit()
async def commit_events(self):
loop = asyncio.get_event_loop()
event_queue = self.event_queue
if not event_queue:
return
self.event_queue = []
async with self.write_lock:
write_func = partial(
IndexStore.write_events, self.store, self.index, event_queue
)
await loop.run_in_executor(None, write_func)
def event_in_store(self, event_id, room_id):
return self.store.event_in_store(event_id, room_id)
async def search(
self,
search_term, # type: str
room=None, # type: Optional[str]
max_results=10, # type: int
order_by_recent=False, # type: bool
include_profile=False, # type: bool
before_limit=0, # type: int
after_limit=0, # type: int
):
# type: (...) -> Dict[Any, Any]
"""Search the indexstore for an event."""
loop = asyncio.get_event_loop()
# Getting a searcher from tantivy may block if there is no searcher
# available. To avoid blocking we set up the number of searchers to be
# the number of CPUs and the semaphore has the same counter value.
async with self.read_semaphore:
searcher = self.index.searcher()
search_func = partial(
searcher.search,
search_term,
room=room,
max_results=max_results,
order_by_recent=order_by_recent,
)
result = await loop.run_in_executor(None, search_func)
load_event_func = partial(
self.store.load_events,
result,
include_profile,
order_by_recent,
before_limit,
after_limit,
)
search_result = await loop.run_in_executor(None, load_event_func)
search_result["count"] = len(search_result["results"])
search_result["highlights"] = []
return search_result
else:
INDEXING_ENABLED = False
|
[
"poljar@termina.org.uk"
] |
poljar@termina.org.uk
|
3f3ae230f286e6f217bdaf35e1ea3c4fe7b84beb
|
b5c01c72929a2ee1dbe75fd57ff1850be2cdc542
|
/Hackerrank/Fibonacci/Solution.py
|
28c3db56f79f0ec845b2e79e57e975bbc035b635
|
[] |
no_license
|
AnishM-8353/Coding_Questions_Hacktober21
|
9adc24298015f53aa0eea4c3e9269a8b18273ea9
|
5226f8001e6b9f388be671507e416b921b4f0d0d
|
refs/heads/main
| 2023-07-18T20:50:41.810143
| 2021-10-02T13:12:08
| 2021-10-02T13:12:08
| 412,789,563
| 0
| 0
| null | 2021-10-02T12:27:17
| 2021-10-02T12:27:16
| null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
def fibonacci(n):
if n is 0 or n is 1:
return n
return fibonacci(n - 1) + fibonacci(n - 2)
|
[
"noreply@github.com"
] |
AnishM-8353.noreply@github.com
|
6164509e800a06dda453b4292ea669f5ee00614d
|
3ed89d8a9b626a383cf3aa40fe4c9e03c92eb35c
|
/pycodeexec/languages.py
|
261a90997543e154f217c4498d913093c1da1f19
|
[] |
no_license
|
3jackdaws/pycodeexec
|
0644b186556fc10171baad90572c11f7721940ca
|
bc073075f7cf3e2af2cce330597ae57a29ec5066
|
refs/heads/master
| 2020-04-08T05:56:02.162418
| 2018-11-26T07:26:13
| 2018-11-26T07:26:13
| 159,079,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,232
|
py
|
LANGUAGES = {
"python": {
"aliases": [
],
"versions": {
"default": {
"image": "python",
"tag": "alpine",
"command": 'python -c "{}"'
},
"3.6": {
"tag": "3.6-alpine"
},
"2": {
"tag": "2.7-alpine"
}
}
},
"javascript": {
"aliases": [
"js",
"node",
"nodejs"
],
"versions": {
"default": {
"image": "node",
"tag": "alpine",
"command": 'node -e "{}"'
},
}
},
"ruby": {
"aliases": [
"rb"
],
"versions": {
"default": {
"image": "ruby",
"tag": "alpine",
"command": 'ruby -e "{}"'
}
}
},
"c": {
"aliases":[],
"versions": {
"default":{
"image": "frolvlad/alpine-gxx",
"tag":"latest",
"command":'sh -c \'echo "{}"|gcc -w -xc -o p - >/dev/null && chmod 700 p && ./p\''
}
}
}
}
|
[
"3jackdaws@gmail.com"
] |
3jackdaws@gmail.com
|
893240ec55751856141e0876c160e926e05aadf2
|
d6458a979207e00da6dc653c278b9bfb818ce18d
|
/Additional Stuff/Medium Stuff/Classes/Inheritance/Polymorphism/animals.py
|
0456abd6253f6846ad767427f47eded1419f4eff
|
[] |
no_license
|
Hackman9912/05-Python-Programming
|
61ce7bb48188b4cd3cd8e585480325fdd02e579b
|
d03a319c952794b2f298a3ef4ddd09c253e24d36
|
refs/heads/master
| 2020-08-29T14:28:48.403323
| 2019-12-18T21:30:55
| 2019-12-18T21:30:55
| 218,061,276
| 0
| 0
| null | 2019-10-28T14:07:31
| 2019-10-28T14:07:31
| null |
UTF-8
|
Python
| false
| false
| 1,465
|
py
|
# The mammal class represents a generic mammal
import time
class Mammal:
# The __init__ method accepts an argument for the mammals' species
def __init__(self, species):
self.__species = species
# The show_species method displays a message indicating the mammals species
def show_species(self):
print('I am a', self.__species)
# The make_sound method is the mammals way of making a generic sound
def make_sound(self):
print('Grrrrr')
# The Dog class is a subclass of the Mammal class
class Dog(Mammal):
# The __init__ method calls the superclass's __init__ method passing Dog as the species
def __init__(self):
Mammal.__init__(self, 'Dog')
# The make_sound method overrides the superclass's make_sound method
def make_sound(self):
print('Woof! Woof!')
# The Cat class is a subclass of the Mammal class
class Cat(Mammal):
# The __init__ method calls the superclass's __init__ method passing Cat as the species
def __init__(self):
Mammal.__init__(self, 'Cat')
# The make_sound method overrides the superclass's make_sound method
def make_sound(self):
print('Meow')
# Define the main function
def main():
mammal = Mammal('Bigfoot')
mammal.show_species()
mammal.make_sound()
time.sleep(1)
cat = Cat()
cat.show_species()
cat.make_sound()
time.sleep(1)
dog = Dog()
dog.show_species()
dog.make_sound()
main()
|
[
"charleshackett89@gmail.com"
] |
charleshackett89@gmail.com
|
fc262677dbd8e943479ae62b5a88260d2b44f1e0
|
fb936b0a98a6b54eb067e2aade2fde808615c901
|
/PeptideSequencingFunctions.py
|
f8c641dd3aa8af0c495a0bcb87e48ba2bf918618
|
[] |
no_license
|
jmmichaud/BioinfoTools
|
e71bd4b89072d34efaa31703c947cbab5606c256
|
7b3040c9582530494009e072c2950d461fdb254b
|
refs/heads/master
| 2020-04-09T01:03:40.663127
| 2019-04-11T03:45:11
| 2019-04-11T03:45:11
| 159,890,489
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85,950
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 22 13:41:22 2018
@author: jennifermichaud
"""
import copy
import collections
import numpy as np
def GenerateCycloSubstrings(proteinseq):
"""Inputs protein sequence of a cyclic peptide using single letter amino acid
code.
Outputs all possible substrings that would be generated by mass spectrometry includes
empty frag('') and intact string.
"""
substrings = ['']
doubleprotein = proteinseq + proteinseq
l = 1
for i in range(1,len(proteinseq)): #i is size of fragment
proteinlen = len(proteinseq) + i - l #accounts for reducing overlap as i increases
l += 1
for j in range(0,proteinlen): #generates all frags of size i
tempstring = doubleprotein[j:j+i]
substrings.append(tempstring)
substrings.append(proteinseq)
return substrings
def SubstringCycloMasses(proteinseq):
"""Inputs protein sequence using single letter amino acid
code.
Outputs list of integer masses of expected mass spec fragments (all
possible substrings of inputted protein sequence including empty fragment
and intact peptide)."""
tempmass = 0
substringmasses = []
AAtoMassdict = {'': 0, 'A': 71, 'C': 103, 'D': 115, 'E': 129, 'F': 147, 'G': 57, 'H': 137, 'I': 113, 'K': 128, 'L': 113, 'M': 131, 'N': 114, 'P': 97, 'Q': 128, 'R': 156, 'S': 87, 'T': 101, 'V': 99, 'W': 186, 'Y': 163}
substringlist = GenerateCycloSubstrings(proteinseq)
for string in substringlist:
if string == '':
substringmasses.append(AAtoMassdict[string])
else:
for i in range(0,len(string)):
tempmass += AAtoMassdict[string[i]]
substringmasses.append(tempmass)
tempmass = 0
substringmasses.sort()
return substringmasses
def GenerateLinearSubstrings(proteinseq):
"""Inputs protein sequence of a cyclic peptide and outputs all possible
substrings that would be generated by mass spectrometry includes
empty frag('') and intact string"""
substrings = ['']
l = 1
for i in range(1,len(proteinseq)): #i is size of fragment
proteinlen = len(proteinseq) + i - l #accounts for reducing overlap as i increases
l += 1
for j in range(0, proteinlen): #generates all frags of size i
tempstring = proteinseq[j:j+i]
if len(tempstring) == i:
substrings.append(tempstring)
substrings.append(proteinseq)
return substrings
def SubstringLinearMasses(proteinseq):
"""inputs protein sequence and outputs list of integer masses of expected
mass spec fragments (all possible substrings of inputted protein sequence
including empty frag and intact peptide)"""
tempmass = 0
substringmasses = []
AAtoMassdict = {'': 0, 'A': 71, 'C': 103, 'D': 115, 'E': 129, 'F': 147, 'G': 57, 'H': 137, 'I': 113, 'K': 128, 'L': 113, 'M': 131, 'N': 114, 'P': 97, 'Q': 128, 'R': 156, 'S': 87, 'T': 101, 'V': 99, 'W': 186, 'Y': 163}
substringlist = GenerateLinearSubstrings(proteinseq)
for string in substringlist:
if string == '':
substringmasses.append(AAtoMassdict[string])
else:
for i in range(0,len(string)):
tempmass += AAtoMassdict[string[i]]
substringmasses.append(tempmass)
tempmass = 0
substringmasses.sort()
return substringmasses
## TEST DATA
#protein_in = 'LEQN'
#protein_in = 'YKVRLRCFHFSTEY'
#protein_in = 'AHEWMFVKDGNE'
#
#
#protein_in = 'FFNDGPHVRQIPQYAVSQASATYGRVCMIGVDPIGFWMCKDRQWAMAQPS'
#print("Cyclospectrum")
#print(" ".join(GenerateCycloSubstrings(protein_in)))
#print(" ".join(list(map(str, SubstringCycloMasses(protein_in)))))
#
#print("Linearspectrum")
#print(" ".join(GenerateLinearSubstrings(protein_in)))
#print(" ".join(list(map(str, SubstringLinearMasses(protein_in)))))
def CalcAAFragMass(frag):
"""Inputs an amino acid fragment, and outputs its integer mass.
"""
AAtoMassdict = {'': 0, 'A': 71, 'C': 103, 'D': 115, 'E': 129, 'F': 147, 'G': 57, 'H': 137, 'I': 113, 'K': 128, 'L': 113, 'M': 131, 'N': 114, 'P': 97, 'Q': 128, 'R': 156, 'S': 87, 'T': 101, 'V': 99, 'W': 186, 'Y': 163}
mass = 0
for AA in frag:
mass += AAtoMassdict[AA]
return mass
def Countfrags(peptidelength):
"""Inputs linear peptide length (peptidelength).
Outputs number of fragments that can be generated.
"""
nfrags = peptidelength + 1
L = peptidelength - 1
while L > 0:
nfrags += L
L -= 1
return nfrags
#print(Countfrags(1024))
def RemoveListDuplicates(listoflists):
"""Inputs a list of list of numbers and outputs a list with duplicates
removed"""
removedups = []
for slist in listoflists:
removedups.append(" ".join(list(map(str, slist))))
removedups = set(removedups)
removedups = list(removedups)
correctedlist = []
for dlist in removedups:
temp = list(map(int, dlist.split(' ')))
correctedlist.append(temp)
return correctedlist
def Cyclize(linkedspectralist):
"""Inputs a list of ordered integer masses (linkedspectralist) representing
peptides.
Outputs all cyclic versions of the inputted masses as a list of lists.
"""
cyclizelist = []
L = len(linkedspectralist[0])
for peptide in linkedspectralist:
cyclizelist.append(peptide)
for i in range(1,L):
cyclist = peptide[i:]
cyclist.extend(peptide[:i])
cyclizelist.append(cyclist)
return cyclizelist
def SimpleCyclopeptideSequencing(spectrum):
"""Inputs a list of integer masses as that represent a mass spectrum.
Outputs list of potential peptides represented by the mass spectrum using
single letter AA codes.
"""
peptides = []
AAs = []
parentmass = max(spectrum)
looper = 0
AAmasslist = [57, 71, 87, 97, 99, 101, 103, 113, 114, 115, 128, 129, 131, 137, 147, 156, 163, 186]
if 0 in spectrum: #remove 0 from spectrum
spectrum.remove(0)
for mass in AAmasslist: #Build monopeptide list and start peptides list
if mass in spectrum:
peptides.append([mass])
AAs.append(mass)
#generate strings consistent with spectra, add all possibilitiess from
#monopeptide/AA list and then remove combinations whose mass isn't
#consistent with the spectra
duppeps = copy.deepcopy(peptides)
peptides = []
for AA in AAs: # generate dipeptides
subcopy = copy.deepcopy(AAs)
for i in range(len(duppeps)):
for frag in duppeps[i]:
subcopy.remove(frag)
if AA in subcopy:
templist = copy.deepcopy(duppeps[i])
templist.append(AA)
peptides.append(templist)
subcopy = copy.deepcopy(AAs)
peppeps = copy.deepcopy(peptides)
for j in range(len(peppeps)): #remove dipeptides not consistent with mass in spectra
sumpeps = sum(peppeps[j])
if sumpeps not in spectrum and sumpeps != parentmass:
peptides.remove(peppeps[j])
if sumpeps == parentmass:
looper += 1
# while the parentmass is not reached combine peptides consistent with mass
#in spectrum to generate new peptide list and overlap excluding the first
#element of one fragment and last element of another fragment.
b=0
L=2
while looper == 0:
print(b)
b += 1
peptidesolve = []
for j in range(len(peptides)):
for k in range(len(peptides)):
if len(peptides[j]) == len(peptides[k]):
if peptides[j][1:] == peptides[k][:-1]:
plist = peptides[j][:]
plist.append(peptides[k][-1])
if sum(plist) in spectrum:
peptidesolve.append(plist)
if sum(plist) == parentmass: #when sum of frags equals parent mass then looper is increased to break loop
peptidesolve.append(plist)
looper += 1
L += 1 #length of peptide increases with each loop
biggestlist = []
dupps = copy.deepcopy(peptidesolve)
for pep in dupps:
if len(pep) == L:
biggestlist.append(pep[:])
peptides = copy.deepcopy(biggestlist)
peptides = RemoveListDuplicates(peptides) #remove duplicate peptides
cyclicpeptides = Cyclize(peptides) #generate cyclized version of peptides
outputlist = []
for cycle in cyclicpeptides:
outputlist.append("-".join(list(map(str, cycle))))
return " ".join(outputlist)
##TEST DATA
#Spectrum_in = '0 113 128 186 241 299 314 427'
#Spectrum_in = '0 71 97 99 103 113 113 114 115 131 137 196 200 202 208 214 226 227 228 240 245 299 311 311 316 327 337 339 340 341 358 408 414 424 429 436 440 442 453 455 471 507 527 537 539 542 551 554 556 566 586 622 638 640 651 653 657 664 669 679 685 735 752 753 754 756 766 777 782 782 794 848 853 865 866 867 879 885 891 893 897 956 962 978 979 980 980 990 994 996 1022 1093'
#Spectrum_in = [0, 113, 114, 128, 129, 227, 242, 242, 257, 355, 356, 370, 371, 484]
Spectrum_in ='0 113 113 114 128 128 186 186 186 227 241 242 299 299 314 314 355 355 372 413 427 427 468 485 500 500 541 541 541 613 613 613 654 654 669 686 727 727 741 782 799 799 840 840 855 855 912 913 927 968 968 968 1026 1026 1040 1041 1041 1154'
#
Spectrum_in = list(map(int, Spectrum_in.split(" ")))
#
answer = SimpleCyclopeptideSequencing(Spectrum_in)
print(answer)
def CycloPeptideScoring(peptideseq, spectrum):
"""Inputs a cyclic peptide as an AA string (petideseq) and mass spectra as
a list of integers (spectrum).
Outputs the score of the peptide against the spectrum.
Each match of spectrum to a theorectical spectrum generated from the peptide
sequence yields one point.
"""
if peptideseq == []:
return 0
else:
theoreticalspectrum = SubstringCycloMasses(peptideseq)
score = 0
for spec in spectrum:
if spec in theoreticalspectrum:
score += 1
theoreticalspectrum.remove(spec)
return score
def LinearPeptideScoring(peptideseq, Spectrum):
""" Inputs a peptide as an AA string and mass spectra as a list
of integers.
Outputs the score of the peptide against the spectrum
Each match of Spectrum to THeorectical spectrum yields
one point."""
if peptideseq == []:
return 0
else:
theoreticalspectrum = SubstringLinearMasses(peptideseq)
Score = 0
for spec in Spectrum:
if spec in theoreticalspectrum:
Score += 1
theoreticalspectrum.remove(spec)
return Score
#peptide_in = 'NQEL'
#Spectrum_in = "0 99 113 114 128 227 257 299 355 356 370 371 484"
#Spectrum_in = list(map(int, Spectrum_in.split(" ")))
#peptide_in = 'PEEP'
#Spectrum_in = "0 97 97 97 100 129 194 226 226 226 258 323 323 355 393 452"
#Spectrum_in = list(map(int, Spectrum_in.split(" ")))
#peptide_in ='QSSLNFQENVEYSVRNDFSDCIVSGQNEVRVAYYTADANGNPSHTMS'
#Spectrum_in = '0 57 71 71 71 87 87 87 87 87 87 97 99 99 99 99 101 101 103 113 113 113 113 113 114 114 114 114 114 115 115 128 128 128 129 129 131 137 137 144 147 147 156 156 156 163 163 163 171 172 174 184 186 186 186 200 200 212 212 213 215 216 218 218 224 227 227 227 228 228 229 232 234 236 238 241 243 243 244 250 251 255 255 257 257 257 258 262 264 269 269 270 275 275 285 287 292 299 302 314 315 315 319 325 326 328 328 331 335 337 340 340 340 342 342 342 347 349 350 356 357 358 359 359 365 368 369 369 371 372 376 379 383 385 388 391 392 397 402 403 404 413 415 415 422 427 427 428 429 430 438 439 451 454 456 456 456 459 462 464 470 470 471 471 473 474 475 478 478 484 484 496 498 498 503 505 505 505 506 509 510 516 517 518 521 521 526 527 532 532 543 543 552 562 564 567 569 569 571 573 574 575 576 577 577 583 584 585 588 592 592 597 599 602 603 608 608 611 614 617 619 619 620 629 631 634 634 640 645 646 654 656 665 666 671 674 677 682 683 684 687 688 690 690 690 690 691 691 698 707 711 718 720 721 721 728 732 733 733 734 739 744 745 746 748 748 753 755 755 759 761 762 764 765 767 770 785 789 790 791 796 802 804 804 812 815 818 818 819 820 820 821 824 824 826 838 840 843 846 847 847 847 849 853 858 861 861 863 863 866 868 874 876 877 881 883 892 899 902 903 909 911 911 923 925 930 932 933 933 937 939 939 945 946 947 948 948 952 960 961 962 967 975 976 976 976 977 978 982 987 990 996 1003 1010 1012 1016 1023 1024 1024 1024 1032 1033 1036 1037 1038 1039 1046 1047 1047 1047 1048 1059 1059 1061 1074 1074 1076 1078 1079 1088 1089 1090 1093 1095 1095 1095 1095 1104 1107 1109 1110 1111 1117 1123 1124 1130 1135 1138 1146 1146 1149 1149 1150 1158 1160 1160 1161 1161 1161 1175 1183 1187 1188 1192 1194 1194 1196 1203 1205 1206 1208 1209 1209 1210 1218 1222 1223 1223 1231 1237 1237 1245 1245 1250 1251 1251 1251 1260 1263 1274 1275 1280 1282 1288 1289 1291 1296 1298 1302 1302 1307 1309 1312 1317 1320 1321 1322 1322 1323 1324 1324 1325 1331 1332 1333 1336 1338 1350 1352 1359 1364 1365 1367 1373 1374 1375 1378 1379 1381 1389 1397 1404 1408 1409 1411 1412 1413 1416 1423 1424 1426 1431 1435 1435 1438 1438 1445 1446 1449 1452 1463 1468 1474 1475 1475 1478 1480 1480 1486 1487 1488 1492 1493 1495 1503 1510 1511 1522 1530 1537 1537 1538 1539 1541 1544 1545 1545 1546 1551 1552 1553 1559 1564 1568 1576 1577 1577 1579 1587 1589 1591 1600 1601 1606 1608 1616 1617 1622 1624 1627 1631 1632 1636 1638 1638 1647 1651 1658 1659 1666 1666 1666 1667 1667 1667 1667 1678 1681 1690 1690 1692 1693 1703 1704 1707 1714 1714 1715 1721 1723 1724 1731 1737 1750 1754 1755 1760 1761 1766 1772 1777 1778 1780 1780 1780 1780 1780 1785 1794 1794 1795 1795 1801 1802 1805 1808 1815 1823 1827 1828 1828 1837 1839 1844 1851 1852 1853 1859 1865 1868 1876 1879 1885 1891 1894 1894 1895 1900 1902 1908 1908 1909 1914 1914 1915 1916 1917 1922 1936 1941 1942 1942 1947 1948 1955 1958 1965 1966 1967 1972 1973 1978 1999 2007 2007 2008 2008 2009 2014 2015 2016 2018 2023 2023 2028 2035 2042 2046 2047 2049 2051 2052 2055 2055 2062 2063 2070 2071 2071 2080 2086 2094 2106 2112 2120 2122 2122 2133 2134 2137 2139 2142 2145 2145 2146 2151 2152 2154 2159 2164 2165 2167 2170 2170 2172 2174 2177 2183 2183 2184 2185 2193 2199 2199 2230 2233 2235 2241 2244 2251 2254 2259 2265 2266 2267 2269 2270 2278 2279 2282 2283 2285 2286 2287 2289 2296 2298 2298 2298 2301 2306 2333 2336 2349 2356 2356 2357 2358 2369 2370 2377 2379 2380 2382 2383 2385 2386 2388 2395 2397 2400 2401 2402 2411 2412 2414 2421 2423 2426 2432 2434 2437 2470 2482 2482 2484 2485 2485 2487 2492 2498 2499 2505 2511 2513 2514 2514 2516 2516 2519 2519 2522 2524 2526 2533 2533 2535 2539 2540 2568 2569 2569 2584 2598 2598 2601 2604 2606 2613 2615 2620 2623 2626 2627 2629 2635 2643 2646 2648 2648 2653 2654 2655 2655 2656 2663 2682 2691 2691 2697 2698 2713 2716 2726 2736 2738 2740 2742 2747 2754 2756 2760 2761 2762 2762 2762 2767 2771 2783 2784 2790 2791 2811 2813 2827 2827 2839 2841 2841 2853 2854 2854 2861 2861 2867 2873 2875 2875 2877 2884 2890 2904 2910 2912 2918 2918 2941 2948 2950 2954 2954 2955 2955 2960 2970 2983 2988 2989 2990 2991 3003 3011 3017 3024 3031 3031 3040 3046 3051 3057 3069 3075 3078 3082 3085 3090 3097 3102 3102 3104 3111 3118 3125 3130 3145 3153 3159 3159 3169 3172 3177 3182 3189 3196 3198 3206 3212 3217 3230 3240 3244 3246 3254 3258 3265 3267 3268 3269 3272 3281 3305 3313 3325 3326 3329 3331 3333 3339 3343 3345 3357 3359 3380 3382 3382 3393 3396 3416 3427 3428 3430 3444 3444 3444 3446 3453 3453 3461 3482 3494 3495 3506 3509 3511 3517 3529 3531 3531 3556 3558 3574 3581 3582 3583 3593 3600 3607 3608 3609 3623 3639 3645 3645 3657 3668 3669 3680 3682 3694 3710 3714 3722 3722 3728 3736 3738 3756 3769 3770 3786 3801 3808 3813 3823 3831 3835 3836 3841 3843 3851 3857 3857 3866 3900 3900 3914 3922 3928 3932 3944 3949 3960 3965 3971 3979 3985 3987 4013 4015 4027 4036 4059 4061 4063 4072 4078 4093 4098 4114 4126 4141 4143 4150 4160 4165 4173 4185 4192 4201 4206 4240 4254 4254 4272 4274 4279 4291 4293 4302 4329 4341 4353 4367 4368 4378 4400 4403 4405 4428 4430 4440 4454 4481 4481 4492 4531 4534 4541 4556 4568 4568 4577 4594 4621 4655 4662 4669 4678 4681 4681 4705 4749 4768 4768 4783 4806 4809 4818 4855 4896 4896 4905 4919 4937 4983 4992 5006 5024 5050 5093 5120 5137 5137 5221 5224 5224 5311 5352 5439'
#
#Spectrum_in = list(map(int, Spectrum_in.split(" ")))
#print(CycloPeptideScoring(peptide_in, Spectrum_in))
#print(LinearPeptideScoring(peptide_in, Spectrum_in))
def GenerateCycloSpectra(peptidemasses):
"""Inputs a list of integer masses (min 2 fragments) of a cyclic peptide.
Outputs the expanded spectra (sums of adjacent fragments) as a list of masses.
"""
expandedspectra = []
L = len(peptidemasses)
for peptide in peptidemasses:
expandedspectra.append(peptide)
expandedspectra.append(sum(peptidemasses))
i = 2
while i < L:
# print(L)
for n in range(L):
mass = peptidemasses[n:n+i]
# print(mass)
# print(len(mass))
if len(mass) < i:
# print("short")
mass = peptidemasses[n:]
# print(mass)
frontneeded = i - len(mass)
# print(frontneeded)
mass.extend(peptidemasses[:frontneeded])
# print(mass)
masssum = sum(mass)
expandedspectra.append(masssum)
# print(mass)
# print(masssum)
# for n in range(L):
# try:
# mass = peptidemasses[n:n+i]
# masssum = sum(mass)
# expandedspectra.append(masssum)
# except:
# print("except")
# mass = peptidemasses[n:]
# frontneeded = L - len(mass) + 1
# mass.extend(peptidemasses[:frontneeded])
# masssum = sum(mass)
# expandedspectra.append(masssum)
# print(mass)
# print(masssum)
i += 1
expandedspectra.sort()
return expandedspectra
def GenerateLinearSpectra(peptidemasses):
"""Inputs a list of integer masses (min 2 fragments) of a linear peptide.
Outpus the expanded spectra (sums of adjacent fragments) as a list of masses"""
expandedspectra = []
L = len(peptidemasses)
for peptide in peptidemasses:
expandedspectra.append(peptide)
expandedspectra.append(sum(peptidemasses))
i = 2
while i < L:
# print(L)
for n in range(L):
mass = peptidemasses[n:n+i]
# print(mass)
# print(len(mass))
if len(mass) == i:
masssum = sum(mass)
expandedspectra.append(masssum)
i += 1
expandedspectra.sort()
return expandedspectra
#peptidemasses_in = [71, 113, 129]
#print(GenerateLinearSpectra(peptidemasses_in))
def PeptideCycloScoring(peptideseq, spectrum):
"""Inputs a cyclic peptide (peptideseq) as a list of integers and mass
spectra (spectrum) as a list of integers.
Outputs the score of the peptide spectrum generated from peptideseq against
the inputted spectrum.
Each match of the peptide generated spectrum to the inputted spectrum yields
one point.
"""
# MasstoAAdict = {0: '', 57: 'G', 71: 'A', 87: 'S', 97: 'P', 99: 'V', 101: 'T', 103: 'C', 113: '(I/L)', 114: 'N', 115: 'D', 128: '(K/Q)', 129: 'E', 131: 'M', 137: 'H', 147: 'F', 156: 'R', 163: 'Y', 186: 'W'}
if peptideseq == []:
return 0
else:
peptidemasses = GenerateCycloSpectra(peptideseq)
score = 0
# print(peptidemasses)
specdup = copy.deepcopy(spectrum)
for spec in peptidemasses:
# print(spec)
if spec in specdup:
# print(spectrum)
score += 1
# print("Score")
# print(score)
specdup.remove(spec) #remove mass from spectrum to avoid duplicates
return score
def PeptideLinearScoring(peptideseq, spectrum):
"""Inputs a linear peptide as a list of integers (peptideseq)
and mass spectra as a list of integers (spectrum).
Outputs the score of the peptide generated spectrum against the
inputted spectrum.
Each match of the peptide generated spectrum to the inputted spectrum yields
one point.
"""
# MasstoAAdict = {0: '', 57: 'G', 71: 'A', 87: 'S', 97: 'P', 99: 'V', 101: 'T', 103: 'C', 113: '(I/L)', 114: 'N', 115: 'D', 128: '(K/Q)', 129: 'E', 131: 'M', 137: 'H', 147: 'F', 156: 'R', 163: 'Y', 186: 'W'}
if peptideseq == []:
return 0
else:
peptidemasses = GenerateLinearSpectra(peptideseq)
score = 0
# print(peptidemasses)
specdup = copy.deepcopy(spectrum)
for spec in peptidemasses:
# print(spec)
if spec in specdup:
# print(Spectrum)
score += 1
# print("Score")
# print(score)
specdup.remove(spec) #remove mass from spectrum to avoid duplicates
return score
def TrimPeptideLeaderboard(leaderboard, spectrum, N):
"""Inputs a list of lists of candidate peptides for an unknown peptide
as a leaderboard for the spectrum of the unknown peptide. Outputs candidate
peptides with N number of top scores.
"""
# print(leaderboard)
toppeptides = []
scoredict = {}
scorelist = []
scoreset = []
for peptide in leaderboard:
# print(peptide)
peptidejoin = ' '.join(list(map(str, peptide)))
peptidescore = PeptideLinearScoring(peptide, spectrum)
# if type(peptide) == list:
## print(peptide)
scoredict[peptidejoin] = peptidescore
scorelist.append(peptidescore)
scoreset.append(peptidescore)
# print(scoredict)
scoredict = collections.OrderedDict(scoredict)
scoredict = sorted(scoredict.items(), key=lambda t: t[1], reverse = True)
# print(scoredict)
scorelist.sort(reverse= True)
scoreset = list(set(scoreset))
scoreset.sort(reverse= True)
# print(scoreset)
#Find number of items you need from dictionary (C) for top N scores including ties
#create dictionary, countdict to count number of each score
countdict = {}
for sco in scorelist:
if sco in countdict:
countdict[sco] += 1
else:
countdict[sco] = 1
# print(countdict)
#Use set of scores and countdict to determine the number,C, of topscores to take
Z = 0
C = 0
for sc in scoreset:
Z += countdict[sc]
if Z >= N:
C = Z
break
if C != Z:
C = Z
# print(C)
L = 0
for pep in scoredict:
if L < C:
toppeptide = pep[0]
if ' ' in toppeptide:
toppeptide = list(map(int, toppeptide.split(" ")))
else:
toppeptide = [int(toppeptide)]
toppeptides.append(toppeptide)
L += 1
# print(toppeptides)
return toppeptides
#leaderboard_in = ['LAST', 'ALST', 'TLLT', 'TQAS']
#Spectrum_in = '0 71 87 101 113 158 184 188 259 271 372'
#N_in = 2
#leaderboard_in = 'IWAEQEMNHTEASPRRLTYHWSVAWFWEAFEYTYQADCM APFHLEMAVVDSWKYVTKNTYGEMMRATIIASEDDEMDT RCSIYGKFFVTYHHESMTIFIFEHDIHKYGQMFSIPLGE GDYMQKENDHPHETTQLWSCWWIPCWDCGTCLDKMHQST CTQMSWSTSRLFLSHLDDEWVEFYMKWDEVTVCAGVTPL FQCRAGVVIPVMTFCERGALMQGRAHDPDFLDANMQKEM RNSCKTAYIQHLHKQEKMVQLRNQSDHHHWEHDGRVIKP EINLHGGENMNTTKDTVVRQYTFFKSPIVKFCVGHHCSC DGHYGPNQGLPWRNIRYIKIGFDESMLMFGESEEKIPPK VMGKFMIGARQAPCPLQQPAGGEGLPFRIGQQKIEMNWC WVCIYHIMDQSYRGNPALLKLKWNDWRDRICGKPNKDWL LVAIPYYWQYYWAETYYNTADAWSPEVCISFSPDMNTTD NLFYDGMAQPLGFFCIHRYRRLYMRHNQVYAGNTKQWQN PVQVPHINQCEFQHCNWEDLNQWVDECNEMCLCWTMWHK HRVFWHLEMCKPQTRNIFFHVGNMSSCFQSMETIHKRFG QVCICEFHEYKWCIYHRFKWEKMYDYNWVKEFNFTWCTY PKKHKGHTTATVACKKFEIHKDPEMFEFGEYFDKNYLES KERFHIVVACQLQCFIYMQASYKDAYAMFHNHLHGVRWK'
#Spectrum_in = '0 57 71 71 71 71 87 97 97 97 99 99 99 101 101 103 113 113 114 114 128 128 128 128 129 129 129 129 129 129 129 131 137 137 147 147 163 163 168 170 184 185 185 185 186 186 186 210 211 214 215 218 224 225 226 226 227 228 228 230 236 241 250 256 257 257 258 260 262 264 266 276 276 276 281 282 282 286 289 298 299 300 313 314 314 315 317 323 332 338 339 342 343 347 351 353 354 356 356 357 364 365 373 377 378 385 385 387 391 395 399 399 405 405 410 413 420 429 429 436 439 442 442 443 446 449 451 461 462 465 467 471 476 484 484 484 485 485 486 486 488 490 496 500 502 506 519 524 528 528 528 533 536 541 543 549 550 557 558 562 568 568 571 571 583 587 590 590 598 599 602 603 613 614 615 618 619 621 622 625 627 629 637 646 647 647 649 656 657 659 661 670 671 671 673 686 687 697 698 699 700 701 704 708 712 715 715 716 716 718 720 744 746 747 750 756 758 772 772 774 775 784 784 785 785 786 787 788 789 799 800 800 807 815 827 828 829 829 833 841 844 844 844 847 849 849 871 872 875 883 884 884 885 885 887 900 900 901 901 901 903 913 913 918 935 936 941 943 946 955 957 962 970 970 970 972 973 975 982 986 988 1000 1002 1012 1012 1012 1012 1013 1013 1014 1014 1015 1017 1022 1028 1032 1054 1064 1069 1070 1071 1071 1073 1083 1086 1086 1099 1099 1099 1104 1109 1110 1111 1111 1115 1117 1125 1129 1131 1140 1141 1141 1142 1149 1151 1157 1159 1161 1165 1180 1183 1185 1198 1199 1200 1200 1201 1228 1228 1236 1236 1239 1239 1240 1243 1246 1248 1252 1254 1256 1258 1264 1269 1269 1269 1270 1278 1288 1290 1294 1296 1299 1314 1314 1317 1326 1327 1335 1337 1340 1365 1367 1368 1368 1369 1375 1377 1385 1385 1386 1387 1387 1389 1391 1393 1397 1398 1399 1406 1425 1431 1446 1451 1454 1455 1462 1464 1466 1469 1474 1488 1490 1493 1496 1496 1498 1498 1499 1500 1500 1502 1503 1505 1515 1522 1528 1545 1545 1553 1560 1571 1572 1583 1584 1590 1592 1595 1595 1599 1599 1600 1601 1603 1614 1616 1616 1627 1630 1632 1633 1635 1643 1657 1659 1674 1682 1685 1685 1690 1701 1712 1713 1719 1720 1724 1727 1728 1728 1729 1729 1730 1731 1732 1742 1755 1756 1763 1772 1777 1784 1786 1788 1798 1802 1802 1811 1819 1826 1827 1827 1842 1850 1856 1856 1857 1859 1861 1871 1871 1873 1883 1889 1895 1897 1898 1906 1913 1915 1916 1930 1940 1948 1949 1954 1955 1958 1970 1972 1984 1986 1987 1994 2000 2008 2013 2024 2026 2029 2035 2036 2041 2041 2043 2044 2052 2069 2083 2084 2085 2086 2095 2098 2101 2114 2123 2123 2137 2139 2140 2141 2143 2154 2155 2166 2169 2170 2171 2173 2182 2198 2211 2212 2213 2214 2214 2226 2238 2240 2251 2252 2253 2269 2270 2272 2276 2280 2282 2284 2299 2300 2310 2311 2339 2340 2342 2345 2351 2355 2367 2375 2377 2379 2380 2399 2399 2400 2400 2401 2413 2416 2436 2439 2439 2446 2448 2464 2468 2471 2479 2484 2500 2503 2508 2514 2517 2527 2528 2530 2535 2560 2565 2565 2568 2570 2576 2586 2597 2613 2627 2629 2631 2631 2631 2642 2655 2656 2657 2689 2694 2694 2698 2699 2702 2705 2712 2728 2739 2741 2742 2744 2745 2756 2757 2760 2784 2799 2815 2816 2823 2836 2838 2841 2841 2841 2842 2859 2868 2870 2870 2885 2885 2912 2913 2913 2929 2942 2967 2967 2970 2970 2970 2984 2988 2998 2998 2999 3022 3026 3026 3041 3055 3071 3095 3096 3097 3098 3099 3117 3123 3125 3126 3155 3183 3184 3185 3188 3194 3200 3223 3224 3227 3246 3252 3280 3284 3288 3302 3312 3313 3317 3323 3347 3352 3355 3387 3399 3409 3409 3413 3418 3431 3441 3460 3470 3483 3516 3528 3531 3532 3538 3538 3540 3541 3588 3599 3629 3644 3645 3659 3667 3669 3669 3685 3700 3726 3742 3756 3772 3773 3798 3813 3814 3829 3855 3870 3870 3885 3941 3945 3958 3967 3984 3999 4016 4038 4087 4096 4130 4131 4167 4202 4227 4234 4298 4305 4316 4413 4419 4484 4516 4587'
#N_in = 6
#
#leaderboard_in = leaderboard_in.split(" ")
#Spectrum_in = list(map(int, Spectrum_in.split(" ")))
##
#print(' '.join(TrimPeptideLeaderboard(leaderboard_in, Spectrum_in, N_in)))
#leaderboard_in = [[57, 57], [71, 57], [87, 57], [97, 57], [99, 57], [101, 57], [103, 57], [113, 57], [114, 57], [115, 57], [128, 57], [129, 57], [131, 57], [137, 57], [147, 57], [156, 57], [163, 57], [186, 57], [57, 71], [71, 71], [87, 71], [97, 71], [99, 71], [101, 71], [103, 71], [113, 71], [114, 71], [115, 71], [128, 71], [129, 71], [131, 71], [137, 71], [147, 71], [156, 71], [163, 71], [186, 71], [57, 87], [71, 87], [87, 87], [97, 87], [99, 87], [101, 87], [103, 87], [113, 87], [114, 87], [115, 87], [128, 87], [129, 87], [131, 87], [137, 87], [147, 87], [156, 87], [163, 87], [186, 87], [57, 97], [71, 97], [87, 97], [97, 97], [99, 97], [101, 97], [103, 97], [113, 97], [114, 97], [115, 97], [128, 97], [129, 97], [131, 97], [137, 97], [147, 97], [156, 97], [163, 97], [186, 97], [57, 99], [71, 99], [87, 99], [97, 99], [99, 99], [101, 99], [103, 99], [113, 99], [114, 99], [115, 99], [128, 99], [129, 99], [131, 99], [137, 99], [147, 99], [156, 99], [163, 99], [186, 99], [57, 101], [71, 101], [87, 101], [97, 101], [99, 101], [101, 101], [103, 101], [113, 101], [114, 101], [115, 101], [128, 101], [129, 101], [131, 101], [137, 101], [147, 101], [156, 101], [163, 101], [186, 101], [57, 103], [71, 103], [87, 103], [97, 103], [99, 103], [101, 103], [103, 103], [113, 103], [114, 103], [115, 103], [128, 103], [129, 103], [131, 103], [137, 103], [147, 103], [156, 103], [163, 103], [186, 103], [57, 113], [71, 113], [87, 113], [97, 113], [99, 113], [101, 113], [103, 113], [113, 113], [114, 113], [115, 113], [128, 113], [129, 113], [131, 113], [137, 113], [147, 113], [156, 113], [163, 113], [186, 113], [57, 114], [71, 114], [87, 114], [97, 114], [99, 114], [101, 114], [103, 114], [113, 114], [114, 114], [115, 114], [128, 114], [129, 114], [131, 114], [137, 114], [147, 114], [156, 114], [163, 114], [186, 114], [57, 115], [71, 115], [87, 115], [97, 115], [99, 115], [101, 115], [103, 115], [113, 115], [114, 115], [115, 115], [128, 115], [129, 115], [131, 115], [137, 115], [147, 115], [156, 115], [163, 115], [186, 115], [57, 128], [71, 128], [87, 128], [97, 128], [99, 128], [101, 128], [103, 128], [113, 128], [114, 128], [115, 128], [128, 128], [129, 128], [131, 128], [137, 128], [147, 128], [156, 128], [163, 128], [186, 128], [57, 129], [71, 129], [87, 129], [97, 129], [99, 129], [101, 129], [103, 129], [113, 129], [114, 129], [115, 129], [128, 129], [129, 129], [131, 129], [137, 129], [147, 129], [156, 129], [163, 129], [186, 129], [57, 131], [71, 131], [87, 131], [97, 131], [99, 131], [101, 131], [103, 131], [113, 131], [114, 131], [115, 131], [128, 131], [129, 131], [131, 131], [137, 131], [147, 131], [156, 131], [163, 131], [186, 131], [57, 137], [71, 137], [87, 137], [97, 137], [99, 137], [101, 137], [103, 137], [113, 137], [114, 137], [115, 137], [128, 137], [129, 137], [131, 137], [137, 137], [147, 137], [156, 137], [163, 137], [186, 137], [57, 147], [71, 147], [87, 147], [97, 147], [99, 147], [101, 147], [103, 147], [113, 147], [114, 147], [115, 147], [128, 147], [129, 147], [131, 147], [137, 147], [147, 147], [156, 147], [163, 147], [186, 147], [57, 156], [71, 156], [87, 156], [97, 156], [99, 156], [101, 156], [103, 156], [113, 156], [114, 156], [115, 156], [128, 156], [129, 156], [131, 156], [137, 156], [147, 156], [156, 156], [163, 156], [186, 156], [57, 163], [71, 163], [87, 163], [97, 163], [99, 163], [101, 163], [103, 163], [113, 163], [114, 163], [115, 163], [128, 163], [129, 163], [131, 163], [137, 163], [147, 163], [156, 163], [163, 163], [186, 163], [57, 186], [71, 186], [87, 186], [97, 186], [99, 186], [101, 186], [103, 186], [113, 186], [114, 186], [115, 186], [128, 186], [129, 186], [131, 186], [137, 186], [147, 186], [156, 186], [163, 186], [186, 186]]
#Spectrum_in = '0 71 113 129 147 200 218 260 313 331 347 389 460'
#
#N_in = 10
#Spectrum_in = list(map(int, Spectrum_in.split(" ")))
#print(TrimPeptideLeaderboard(leaderboard_in, Spectrum_in, N_in))
# I think this currently isn't working for these example data because I
#likely adapted code to accept peptide spectra as masses not a single code AA peptide sequences as is in the examples.
# it seems to work fine when called within other functions.
def LeaderboardCyclopeptideSequencing(spectrum, N):
"""Inputs an integer (N) and a cyclopeptide spectrum as list of integer masses.
Outputs a top scoring peptide as a list of integer masses which is the
result of iterative building of a leaderboard spectrum list and trimming to
keep entries with N top scores against the original spectrum. It is an efficient
algorithm as it trims the list each iteration.
"""
leaderboard = []
leaderpeptide = []
# peptides = []
parentmass = max(spectrum)
looper = 0
AAmasslist = [57, 71, 87, 97, 99, 101, 103, 113, 114, 115, 128, 129, 131, 137, 147, 156, 163, 186]
if 0 in spectrum: #remove 0 from spectrum
spectrum.remove(0)
monocount = 0
for spec in spectrum: # the peptide sequence should be as long as monopeptides present in spectra (unless it is missing from spectra)
if spec <= 186:
monocount += 1
for mass in AAmasslist: #Build monopeptide list and start peptides list uses all AAs in case it is missing from spectra
leaderboard.append([mass])
# print(leaderboard)
# leaderboard = Trim(leaderboard, Spectrum, N)
# print(leaderboard)
# b=0
while looper == 0: #generate strings consistent with spectra, add all possibilitiess from monopeptide/AA list and then remove combinations whose mass isn't consistent with the spectra
# print(b)
# b += 1
duppeps = copy.deepcopy(leaderboard)
leaderboard = []
for AA in AAmasslist: # generate extended peptides and add to leaderboard
# subcopy = copy.deepcopy(Spectrum)
for i in range(len(duppeps)):
# for frag in duppeps[i]:
# subcopy.remove(frag)
templist = copy.deepcopy(duppeps[i])
templist.append(AA)
leaderboard.append(templist)
# print(leaderboard)
# leaderboard = Trim(leaderboard, Spectrum, N)
leadups = copy.deepcopy(leaderboard)
for dup in leadups: # check peptide sum, if sum equal parent mass spectra reached. If it is greater than parent mass, remove.
if sum(dup) == parentmass:
# print(dup)
# print(sum(dup))
# print(parentmass)
if len(dup) >= monocount: #if number of AA in peptide reached (monocount) looper is increased to break loop.
looper += 1
if PeptideCycloScoring(dup, spectrum) > PeptideCycloScoring(leaderpeptide, spectrum):
leaderpeptide = copy.deepcopy(dup)
elif sum(dup) > parentmass:
leaderboard.remove(dup)
# print(leaderboard)
leaderboard = TrimPeptideLeaderboard(leaderboard, spectrum, N) # Trim list. Keep values of top N scores
# print(leaderboard)
# print(looper)
# print(leaderpeptide)
return leaderpeptide
#Spectrum_in = '0 71 113 129 147 200 218 260 313 331 347 389 460'
#
#N_in = 10
#Sample out 113-147-71-129 its a cyclopeptide so adjacency is important but order isn't
#
#Spectrum_in = '0 71 71 99 113 113 113 114 114 115 128 128 128 131 131 137 137 137 147 147 156 156 163 184 184 199 213 227 230 234 241 246 251 251 259 260 260 262 265 268 284 284 293 293 297 298 310 312 331 344 350 362 364 367 374 375 381 388 393 396 407 407 411 415 421 421 421 423 435 444 459 475 481 481 494 494 495 503 506 506 509 520 521 530 535 535 548 552 552 558 558 565 574 591 607 609 618 622 622 622 634 634 634 637 648 651 666 667 672 677 678 689 693 695 704 705 719 721 735 737 746 762 765 765 765 769 774 781 785 788 790 792 803 806 806 814 832 833 851 856 858 868 880 882 884 896 902 902 902 902 902 916 918 919 920 927 928 929 953 969 979 987 989 996 997 1011 1014 1015 1015 1016 1027 1030 1033 1033 1039 1041 1055 1057 1058 1066 1068 1085 1086 1100 1116 1126 1128 1129 1140 1142 1143 1148 1152 1153 1158 1161 1169 1170 1180 1181 1186 1195 1199 1199 1200 1213 1213 1240 1247 1256 1266 1268 1271 1276 1279 1289 1295 1295 1299 1309 1312 1313 1317 1317 1323 1326 1327 1337 1339 1341 1346 1350 1399 1403 1408 1410 1412 1422 1423 1426 1432 1432 1436 1437 1440 1450 1454 1454 1460 1470 1473 1478 1481 1483 1493 1502 1509 1536 1536 1549 1550 1550 1554 1563 1568 1569 1579 1580 1588 1591 1596 1597 1601 1606 1607 1609 1620 1621 1623 1633 1649 1663 1664 1681 1683 1691 1692 1694 1708 1710 1716 1716 1719 1722 1733 1734 1734 1735 1738 1752 1753 1760 1762 1770 1780 1796 1820 1821 1822 1829 1830 1831 1833 1847 1847 1847 1847 1847 1853 1865 1867 1869 1881 1891 1893 1898 1916 1917 1935 1943 1943 1946 1957 1959 1961 1964 1968 1975 1980 1984 1984 1984 1987 2003 2012 2014 2028 2030 2044 2045 2054 2056 2060 2071 2072 2077 2082 2083 2098 2101 2112 2115 2115 2115 2127 2127 2127 2131 2140 2142 2158 2175 2184 2191 2191 2197 2197 2201 2214 2214 2219 2228 2229 2240 2243 2243 2246 2254 2255 2255 2268 2268 2274 2290 2305 2314 2326 2328 2328 2328 2334 2338 2342 2342 2353 2356 2361 2368 2374 2375 2382 2385 2387 2399 2405 2418 2437 2439 2451 2452 2456 2456 2465 2465 2481 2484 2487 2489 2489 2490 2498 2498 2503 2508 2515 2519 2522 2536 2550 2565 2565 2586 2593 2593 2602 2602 2612 2612 2618 2618 2621 2621 2621 2634 2635 2635 2636 2636 2636 2650 2678 2678 2749'
#
#N_in = 331
#
#
#Spectrum_in = list(map(int, Spectrum_in.split(" ")))
##
#print("-".join(list(map(str, LeaderboardCyclopeptideSequencing(Spectrum_in, N_in)))))
def TopLeaderboardCyclopeptideSequencing(spectrum, N):
"""Inputs an integer (N) and a mass spectrum as list of integer
masses. Outputs all lead peptides of a maximized score as '\n' seperated
entries where each peptide is '-' seperated mass values that represent a
monopeptide structure. Allows for non-standard amino acids and takes
advantage of both using a leader board to keep N top scoring peptides and
scoring comparing the spectrum to each generated spectra of peptides from
the leaderboard to generate a leader peptide list where each peptide is a
maximal achieved score. Can be adjusted to generate linear peptide
sequences by using different scoring algorithms indicated in the code.
"""
leaderboard = []
leaderpeptides = []
# monopeptides = []
parentmass = max(spectrum)
looper = 0
# AAmasslist = [57, 71, 87, 97, 99, 101, 103, 113, 114, 115, 128, 129, 131, 137, 147, 156, 163, 186]
AAmasslist = list(range(57,201)) #use to account for non-standard peptides)
if 0 in spectrum: #remove 0 from spectrum
spectrum.remove(0)
# monocount = 0
# for spec in Spectrum:
# if spec <= 200:
# monocount += 1
# monopeptides.append(spec)
# leaderboard.append([spec])
# for mass in AAmasslist:
# if mass not in monopeptides:
# monopeptides.append(mass)
# leaderboard.append([mass])
# monopeptides.sort()
monocount = 0
for spec in spectrum: #Build monopeptide list
if spec <= 200:
monocount += 1
for mass in AAmasslist: #Start leadboard peptide list
leaderboard.append([mass])
# print(leaderboard)
b=0
# while b < 1:
while looper < 6: #generate strings consistent with spectra, add all possibilitiess from monopeptide/AA list and then remove combinations whose mass isn't consistent with the spectra
#the looper adjusts tolerance, increases length of final peptide examined
print(b)
b += 1
duppeps = copy.deepcopy(leaderboard)
leaderboard = []
for AA in AAmasslist: # generate extended peptides
# subcopy = copy.deepcopy(spectrum)
for i in range(len(duppeps)):
# for frag in duppeps[i]:
# subcopy.remove(frag)
templist = copy.deepcopy(duppeps[i])
templist.append(AA)
leaderboard.append(templist)
leadups = copy.deepcopy(leaderboard)
if len(leadups[0]) >= monocount: #if number of AA in peptide reached (monocount) looper is increased to break loop. In this algorithm it is allowed to loop for 5 additional times past the parent mass being reached to allow for further optimization
looper += 1
for dup in leadups:
if sum(dup) == parentmass: #if sum of peptides matches parent mass it is added to leaderpeptides list
if leaderpeptides == []:
leaderpeptides.append(copy.deepcopy(dup))
# print(dup)
# print(sum(dup))
# print(parentmass)
#Code can be adjusted to generate a linear peptide by altering scoring algorithms
# dupscore = PeptideLinearScoring(dup, spectrum)
# lpscore = PeptideLinearScoring(leaderpeptides[0], spectrum)
dupscore = PeptideCycloScoring(dup, spectrum)
lpscore = PeptideCycloScoring(leaderpeptides[0], spectrum)
if dupscore > lpscore: #if the score of the current peptide list is better than the top scoring peptide list, list is started new with the new peptide list.
leaderpeptides = []
leaderpeptides.append(copy.deepcopy(dup))
elif dupscore == lpscore: #if the score of the current peptide list is equal to those already in the leaderpeptide list it is appended.
leaderpeptides.append(copy.deepcopy(dup))
elif sum(dup) > parentmass: #if sum of peptides is greater than the parent mass it is removed.
leaderboard.remove(dup)
# print(leaderpeptides)
leaderboard = TrimPeptideLeaderboard(leaderboard, spectrum, N) # Trim list. Keep values of top N scores
# print(leaderboard)
# print(looper)
# print(leaderpeptide)
outputlist = []
for lead in leaderpeptides:
outputlist.append("-".join(list(map(str, lead))))
print(len(outputlist))
return ' '.join(outputlist)
#Spectrum_in = '0 97 99 113 114 115 128 128 147 147 163 186 227 241 242 244 244 256 260 261 262 283 291 309 330 333 340 347 385 388 389 390 390 405 435 447 485 487 503 504 518 544 552 575 577 584 599 608 631 632 650 651 653 672 690 691 717 738 745 770 779 804 818 819 827 835 837 875 892 892 917 932 932 933 934 965 982 989 1039 1060 1062 1078 1080 1081 1095 1136 1159 1175 1175 1194 1194 1208 1209 1223 1322'
#Spectrum_in ='0 97 99 114 128 147 147 163 186 227 241 242 244 260 261 262 283 291 333 340 357 385 389 390 390 405 430 430 447 485 487 503 504 518 543 544 552 575 577 584 632 650 651 671 672 690 691 738 745 747 770 778 779 804 818 819 820 835 837 875 892 917 932 932 933 934 965 982 989 1030 1039 1060 1061 1062 1078 1080 1081 1095 1136 1159 1175 1175 1194 1194 1208 1209 1223 1225 1322'
#
#N_in = 1000
#
#
#Spectrum_in = list(map(int, Spectrum_in.split(" ")))
#
#tanswer = TopLeaderboardCyclopeptideSequencing(Spectrum_in, N_in)
#print(tanswer)
def AmendSpectrum(spectrum):
"""Inputs a mass spectrum as a list of integers.
Outputs a convoluted list of masses (all non-zero products when
elements of spectrum are subtracted from one another). Helpful to fill in
missing masses from spectra.
"""
convoluted = []
if 0 not in spectrum:
spectrum.append(0)
spectrum.sort(reverse = True)
for i in range(0,len(spectrum)-1):
for j in range(i+1,len(spectrum)):
newmass = spectrum[i] - spectrum[j]
if newmass != 0:
convoluted.append(newmass)
convoluted.sort()
return convoluted
#Spectrum_in = '0 137 186 323'
#
##Spectrum_in = '0 57 118 179 236 240 301'
#
##Spectrum_in = '843 872 644 244 301 227 257 700 871 415 543 915 516 332 731 216 445 115 584 644 916 446 171 272 372 672 515 858 528 315 356 559 715 229 743 128 99 103 503 443 471 744 571 747 815 129 340 101 973 972 658 755 687 616 618 974 988 387 212 856 1087 860 572 642 114 429 959 1030 113 469 286 400 57 641 344 830 231 215 772 984 172 958 544 959 986 443 343 875 786 128 0 801'
##
#Spectrum_in = list(map(int, Spectrum_in.split(" ")))
##
#print(" ".join(list(map(str, AmendSpectrum(Spectrum_in)))))
def CorrectedAAList(spectrum, M):
"""Inputs a mass spectrum as a list of integers and an integer (M).
Generates a list of masses that represent the likely ammino acids present in
the spectra. Uses all non-zero products when elements of spectrum are
subtracted from one another and a dictionary to count the number of times a
mass appears in the amended mass list.
Ouputs a list of the top M masses (with ties) between 57-200 that appear a
minimum of M times in the convoluted mass list.
"""
spectrum.sort(reverse = True)
convoluted = [] #generate convoluted mass list
for i in range(0,len(spectrum)-1):
for j in range(i+1,len(spectrum)):
newmass = spectrum[i] - spectrum[j]
if newmass != 0:
convoluted.append(newmass)
convoluted.sort()
cl = copy.deepcopy(convoluted) # Remove masses not between 57 - 200
for mass in cl:
if mass < 57 or mass > 200:
convoluted.remove(mass)
convolutedcountd = {} #create dictionary, convolutedcountd to count number of each score
for num in convoluted:
if num in convolutedcountd:
convolutedcountd[num] += 1
else:
convolutedcountd[num] = 1
scorelist = []
for key in convolutedcountd:
scorelist.append(convolutedcountd[key])
scoredict = collections.OrderedDict(convolutedcountd) #Use set of masses and countdict to determine the number,C, of topscores to take
scoredict = sorted(scoredict.items(), key=lambda t: t[1], reverse = True)
scorelist.sort(reverse= True)
# print(scoredict)
scoreset = list(set(scorelist))
scoreset.sort(reverse= True)
countdict = {} #create another dictionary
for sco in scorelist:
if sco in countdict:
countdict[sco] += 1
else:
countdict[sco] = 1
Z = 0
C = 0
for sc in scoreset:#Find number of items you need from dictionary (C) for top N scores including ties
Z += countdict[sc]
if Z >= M:
C = Z
break
if C != Z:
C = Z
#generate masses
L = 0
AAmasslist = []
for AAmass in scoredict:
if L < C:
AAmasslist.append(AAmass[0])
L += 1
AAmasslist.sort()
return AAmasslist
#Spectrum_in = '0 57 118 179 236 240 301'
#
##Spectrum_in = '843 872 644 244 301 227 257 700 871 415 543 915 516 332 731 216 445 115 584 644 916 446 171 272 372 672 515 858 528 315 356 559 715 229 743 128 99 103 503 443 471 744 571 747 815 129 340 101 973 972 658 755 687 616 618 974 988 387 212 856 1087 860 572 642 114 429 959 1030 113 469 286 400 57 641 344 830 231 215 772 984 172 958 544 959 986 443 343 875 786 128 0 801'
##
#Spectrum_in = list(map(int, Spectrum_in.split(" ")))
##
#print(" ".join(list(map(str, CorrectedAAList(Spectrum_in, 20)))))
def CyclopeptideSequencing(spectrum, M, N):
"""Inputs two integers (M and N) and a mass spectrum of an unknown
cyclopeptide as list of integer masses.
Outputs all lead peptides of a maximal score using N as a cutoff for the Trim
function including ties as a list of lists where each peptide is a list
of integer masses.
Uses an AA list generated from processing of the
spectrum and returning M topscoring masses between 57 - 200 including ties.
Allows for non-standard amino acids and takes advantage of both using a leader
board to keep N top scoring peptides and scoring algorithsm that compare
the spectrum to each generated spectra of peptides from the leaderboard to
generate a leader peptide list where each peptide is maximal achieved score.
Can be adjusted to generate linear peptide sequences by using different
scoring algorithms indicated in the code.
"""
leaderboard = []
leaderpeptides = []
parentmass = max(spectrum)
looper = 0
AAmasslist = CorrectedAAList(spectrum, M)
if 0 in spectrum: #remove 0 from spectrum
spectrum.remove(0)
# monocount = 0
# for spec in Spectrum:
# if spec <= 200:
# monocount += 1
# monopeptides.append(spec)
# leaderboard.append([spec])
# for mass in AAmasslist:
# if mass not in monopeptides:
# monopeptides.append(mass)
# leaderboard.append([mass])
# monopeptides.sort()
monocount = 0
monopeptides = []
for spec in spectrum: #Build AA list
if spec <= 200:
monocount += 1
leaderboard.append([spec])
monopeptides.append(spec)
for mass in AAmasslist: #Start peptide list
if mass not in monopeptides:
leaderboard.append([mass])
# print(leaderboard)
b=0
# while b < 1:
while leaderboard != []: #generate strings consistent with spectra, add all possibilitiess from monopeptide/AA list and then remove combinations whose mass isn't consistent with the spectra
# while looper < 6: #the looper adjusts tolerance, increases length of final peptide examined
print(b)
b += 1
duppeps = copy.deepcopy(leaderboard)
leaderboard = []
for AA in AAmasslist: # generate extended peptides
# subcopy = copy.deepcopy(Spectrum)
for i in range(len(duppeps)):
# for frag in duppeps[i]:
# subcopy.remove(frag)
templist = copy.deepcopy(duppeps[i])
templist.append(AA)
leaderboard.append(templist)
# print(leaderboard)
leadups = copy.deepcopy(leaderboard)
if len(leadups[0]) >= monocount:
looper += 1
for dup in leadups:
if sum(dup) == parentmass:
if leaderpeptides == []:
leaderpeptides.append(copy.deepcopy(dup))
# print(dup)
# print(sum(dup))
# print(parentmass)
#Code can be adjusted to generate a linear peptide by altering scoring algorithms
# dupscore = PeptideLinearScoring(dup, Spectrum)
# lpscore = PeptideLinearScoring(leaderpeptides[0], Spectrum)
dupscore = PeptideCycloScoring(dup, spectrum)
lpscore = PeptideCycloScoring(leaderpeptides[0], spectrum)
if dupscore > lpscore:
leaderpeptides = []
leaderpeptides.append(copy.deepcopy(dup))
elif dupscore == lpscore:
leaderpeptides.append(copy.deepcopy(dup))
elif sum(dup) > parentmass: #this will result in the leader board being eventually empty.
leaderboard.remove(dup)
# print(leaderpeptides)
leaderboard = TrimPeptideLeaderboard(leaderboard, spectrum, N) # Trim list. Keep values of top N scores
# print(leaderboard)
# print(looper)
# print(leaderpeptides)
# outputlist = []
# for lead in leaderpeptides:
# outputlist.append("-".join(list(map(str, lead))))
# print(len(outputlist))
# return ' '.join(outputlist)
return leaderpeptides
def ConvertPeptideMassToAA(listofpeptidelists):
"""Inputslist of peptide list of list where each sublist is a list of interger
masses representing a peptide.
Outputs a list of peptides using one letter aminoacid code
of corresponded peptides in AA code. Nonstandard or unknown amino acids
will be shown as a number representing its integer mass enclosed in '()'.
"""
MasstoAAdict = {0: '', 57: 'G', 71: 'A', 87: 'S', 97: 'P', 99: 'V', 101: 'T', 103: 'C', 113: '(I/K)', 114: 'N', 115: 'D', 128: '(K/Q)', 129: 'E', 131: 'M', 137: 'H', 147: 'F', 156: 'R', 163: 'Y', 186: 'W'}
newpeptides = []
for lst in listofpeptidelists:
templist = []
for mass in lst:
if mass in MasstoAAdict:
templist.append(MasstoAAdict[mass])
else:
templist.append('(' + str(mass) + ')')
temppep = ''.join(templist)
newpeptides.append(temppep)
return newpeptides
#Spectrum_in = '57 57 71 99 129 137 170 186 194 208 228 265 285 299 307 323 356 364 394 422 493'
#M_in = 20
#N_in = 60
#Spectrum_in = '328 887 229 540 655 128 584 128 688 360 1143 889 832 532 129 483 1187 872 1013 1129 815 912 370 1116 412 372 559 584 418 172 584 916 1116 1059 1187 660 698 461 115 200 313 603 128 1002 1116 1015 428 469 1044 242 1115 647 1116 332 71 186 57 589 256 1001 1244 1015 556 712 114 532 185 229 315 1173 57 816 1017 303 456 784 826 1072 475 103 788 788 300 431 759 557 1145 460 660 232 101 229 546 227 889 485 227 769 1015 1141 355 641 712 357 813 597 355 874 231 1012 775 685 243 884 931 929 128 357 456 99 1130 660 687 1017 944 941 704 761 783 1058 429 0 887 988'
#
#M_in = 17
#N_in = 359
#Spectrum_in = '0 97 99 113 114 115 128 128 147 147 163 186 227 241 242 244 244 256 260 261 262 283 291 309 330 333 340 347 385 388 389 390 390 405 435 447 485 487 503 504 518 544 552 575 577 584 599 608 631 632 650 651 653 672 690 691 717 738 745 770 779 804 818 819 827 835 837 875 892 892 917 932 932 933 934 965 982 989 1039 1060 1062 1078 1080 1081 1095 1136 1159 1175 1175 1194 1194 1208 1209 1223 1322'
#
#M_in = 20
#N_in = 1000
#
#
#Spectrum_in = list(map(int, Spectrum_in.split(" ")))
#
#petideseq = CyclopeptideSequencing(Spectrum_in, M_in, N_in)
#print(petideseq)
#print(ConvertPeptideMassToAA(petideseq))
"""CHALLENGE: Tyrocidine B1 is just one of many known NRPs produced by Bacillus
brevis. A single bacterial species may produce dozens of different antibiotics,
and even after 70 years of research, there are likely undiscovered antibiotics
produced by Bacillus brevis. Try to sequence the tyrocidine corresponding to
the real experimental spectrum below. Since the fragmentation technology used
for generating the spectrum tends to produce ions with charge +1, you can
safely assume that all charges are +1. Return the peptide as a collection of
space-separated integer masses."""
def ProtonMassSpectrumToIntegerSpectrum1(spectrum):
"""Inputs a spectrum (as a list of floats) where ions are assumed to have
a +1 charge and with a mass error of 0.3 Da.
Outputs an integer mass spectrum."""
oneless = np.array(spectrum)
oneless = oneless - 1 #remove +1 charge
# print(oneless)
#
rounded = []
for mass in oneless: #resolve integermasses from uncharged spectrum:use three numbers, mass, mass +0.3, mass -0.3
if mass - int(mass) == 0.5: #if num is 0.5 from an int use two ints: rounded up and down
rounded.append(int(mass)+1)
rounded.append(int(mass))
else: #generate 3 ints, take the number repeated as the mass
temp = [round(mass - 0.3), round(mass), round(mass + 0.3)]
curr = []
for num in temp:
if len(curr) == 0:
curr.append(int(num))
elif curr[0] == int(num):
top = curr[0]
else:
curr[0] = int(num)
rounded.append(top)
rounded.sort()
return rounded
def ProtonMassSpectrumToIntegerSpectrum2(spectrum):
"""Inputs a spectrum (as a list of floats) where ions are assumed to have
a +1 charge and with a mass error of 0.3 Da.
Outputs an integer mass spectrum."""
oneless = np.array(spectrum)
oneless = oneless - 1 #remove +1 charge
rounded = []
for mass in oneless:#resolve integermasses from uncharged spectrum:use three numbers, mass, mass +0.3, mass -0.3
if mass - int(mass) == 0.5: #if num is 0.5 from an int use two ints: rounded up and down
rounded.append(int(mass)+1)
rounded.append(int(mass))
else: #generate 3 ints, take the two ints generated
temp = [round(mass - 0.3), round(mass), round(mass + 0.3)]
temp = list(set(temp))
for num in temp:
rounded.append(int(num))
rounded.sort()
return rounded
def ProtonMassSpecCyclopeptideSequencing(massspectrum, M, N):
spectrum = ProtonMassSpectrumToIntegerSpectrum1(massspectrum)
sequences = CyclopeptideSequencing(spectrum, M, N)
return sequences
massspec_in = '371.5 375.4 390.4 392.2 409.0 420.2 427.2 443.3 446.4 461.3 471.4 477.4 491.3 505.3 506.4 519.2 536.1 546.5 553.3 562.3 588.2 600.3 616.2 617.4 618.3 633.4 634.4 636.2 651.5 652.4 702.5 703.4 712.5 718.3 721.0 730.3 749.4 762.6 763.4 764.4 779.6 780.4 781.4 782.4 797.3 862.4 876.4 877.4 878.6 879.4 893.4 894.4 895.4 896.5 927.4 944.4 975.5 976.5 977.4 979.4 1005.5 1007.5 1022.5 1023.7 1024.5 1039.5 1040.3 1042.5 1043.4 1057.5 1119.6 1120.6 1137.6 1138.6 1139.5 1156.5 1157.6 1168.6 1171.6 1185.4 1220.6 1222.5 1223.6 1239.6 1240.6 1250.5 1256.5 1266.5 1267.5 1268.6'
massspec_in = list(map(float, massspec_in.split(" ")))
M_in = 20
N_in = 10000
#print(ProtonMassSpectrumToIntegerSpectrum1(massspec_in))
#print(ProtonMassSpectrumToIntegerSpectrum2(massspec_in))
pepseq = ProtonMassSpecCyclopeptideSequencing(massspec_in, M_in, N_in)
print(pepseq)
print(ConvertPeptideMassToAA(pepseq))
#
#""" using proton mass 2 M20, N1000 -> 6 answers
#147-112-130-164-98-113-131-146-145-82 146-113-130-164-98-113-131-146-145-82 130-114-130-131-131-146-96-129-164-97 114-130-131-131-146-96-129-164-97-130 114-129-164-98-113-130-147-145-97-131 114-129-164-98-113-130-128-164-97-131"""
#using ProtonMassSpectrumToIntegerSpectrum1
#[[115, 146, 130, 113, 129, 163, 82, 163, 112, 115], [97, 164, 128, 116, 130, 113, 128, 164, 99, 129], [164, 98, 128, 145, 116, 129, 163, 82, 113, 130]]
#['D(146)(130)(I/K)EY(82)Y(112)D', 'P(164)(K/Q)(116)(130)(I/K)(K/Q)(164)VE', '(164)(98)(K/Q)(145)(116)EY(82)(I/K)(130)']
#using ProtonMassSpectrumToIntegerSpectrum2
#[[147, 112, 130, 164, 98, 113, 131, 146, 145, 82], [146, 113, 130, 164, 98, 113, 131, 146, 145, 82], [130, 114, 130, 131, 131, 146, 96, 129, 164, 97], [114, 130, 131, 131, 146, 96, 129, 164, 97, 130], [114, 129, 164, 98, 113, 130, 147, 145, 97, 131], [114, 129, 164, 98, 113, 130, 128, 164, 97, 131]]
#['F(112)(130)(164)(98)(I/K)M(146)(145)(82)', '(146)(I/K)(130)(164)(98)(I/K)M(146)(145)(82)', '(130)N(130)MM(146)(96)E(164)P', 'N(130)MM(146)(96)E(164)P(130)', 'NE(164)(98)(I/K)(130)F(145)PM', 'NE(164)(98)(I/K)(130)(K/Q)(164)PM']
#
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#def CalcAAFragMassDict(frag):
# """inputs an amino acid fragment and outputs its integer mass
# It uses a combination of a stored dictionary and a calculation"""
# AAtoMassdict = {'': 0, 'A': 71, 'C': 103, 'D': 115, 'E': 129, 'F': 147, 'G': 57, 'H': 137, 'I': 113, 'K': 128, 'L': 113, 'M': 131, 'N': 114, 'P': 97, 'Q': 128, 'R': 156, 'S': 87, 'T': 101, 'V': 99, 'W': 186, 'Y': 163}
# mass = 0
# with open("PeptideMassDict.txt", "r+") as textfile:
# PeptideMassDict = textfile.read().split("\n")
# if frag in PeptideMassDict:
# return PeptideMassDict[frag]
# else:
# for AA in frag:
# mass += AAtoMassdict[AA]
# PeptideMassDict[frag] = mass
# textfile.write(PeptideMassDict)
# return mass
#def CyclopeptideSequencing1(Spectrum):
# """Inputs a Mass spectrum as list of integer masses of fragments.
# Outputs a cyclic peptide consistent with the spectrum.
# Note: Due to duplicated masses I represents both I and L and K
# represents both K and Q in AAs list"""
# AAs = 'GASPVTCINDKEMHFRYW'
# peptides = ['']
# foundmasses = []
## remainingmasses = copy.deepcopy(Spectrum)
# for AA in AAs:
# peptides.append(AA)
# while foundmasses.sort() != Spectrum:
# for peptide in peptides:
# peptidemass = CalcAAFragMass(peptide)
# if peptidemass in Spectrum: #adds mass to masslist if found, removes peptide if no match in spectra
# foundmasses.append(peptidemass)
## remainingmasses.remove(peptidemass)
# else:
# peptides.remove(peptide)
# for pep in peptides:
# if pep != '':
# for AA in AAs:
# peptides.append(pep+AA)
# peptides.append(AA+pep)
## print(peptides)
#Spectrum_in = '0 113 128 186 241 299 314 427'
#Spectrum_in = Spectrum_in.split(" ")
#
#print(CyclopeptideSequencing1(Spectrum_in))
# while max(peptidemasses) != max(Spectrum):
# for ma in peptidemasses:
# for AAmass in AAmasses:
# m = ma + AAmass
# if m in Spectrum and ma != 0:
# peptidemasses.append(m)
# peptidemstr.append(str(ma)+"-" + str(AAmass))
# peptidemstr.append(str(AAmass)+"-" + str(ma))
# if AAmass in peptidemassdict:
# for y in range(len(peptidemassdict[AAmass])):
# if m not in peptidemassdict:
# peptidemassdict[m] = [str(ma)+"-"+ peptidemassdict[AAmass][y], peptidemassdict[AAmass][y] +"-" + str(ma)]
# else:
# peptidemassdict.setdefault(m, []).append(str(ma)+"-"+ peptidemassdict[AAmass][y])
# peptidemassdict.setdefault(m, []).append(peptidemassdict[AAmass][y] +"-" + str(ma))
# #peptidemassdict[m].append(str(ma)+"-"+ peptidemassdict[AAmass][y])
# #peptidemassdict[m].append(peptidemassdict[AAmass][y] +"-" + str(ma))
# else:
# peptidemassdict[m] = [str(ma)+"-" + str(AAmass), str(AAmass)+"-" + str(ma)]
#
## peptidemasses = peptidemasses.sort()
## peptidemstr = peptidemstr.sort(key=int)
# print("peptidemasses")
# print(peptidemasses)
# print("peptidemassdict")
# print(peptidemstr)
# raise SystemExit(0)
#
#def CyclopeptideMassFrags(Spectrum):
# """Inputs a Mass spectrum as list of integer masses of fragments.
# Outputs a cyclic peptide as masses of fragments """
## AAs = 'GASPVTCINDKEMHFRYW'
# AAmasses = [0, 57, 71, 87, 97, 99, 101, 103, 113, 114, 115, 128, 129, 131, 137, 147, 156, 163, 186]
## intactmass = max(Spectrum)
# peptidemstr = []
# peptidemasses = []
# peptidemassdict = {}
## foundmasses = []
# for mass in AAmasses:
# if mass in Spectrum:
# peptidemasses.append(mass)
# peptidemassdict[mass] = [str(mass)]
# peptidemstr.append(str(mass))
# print("peptidemasses")
# print(peptidemasses)
# print("peptidemstr")
# print(peptidemstr)
# print("peptidemassdict")
# print(peptidemassdict)
# while max(peptidemasses) != max(Spectrum):
# for ma in peptidemasses:
# for AAmass in AAmasses:
# m = ma + AAmass
# if m in Spectrum and ma != 0:
# peptidemasses.append(m)
# peptidemstr.append(str(ma)+"-" + str(AAmass))
# peptidemstr.append(str(AAmass)+"-" + str(ma))
# if AAmass in peptidemassdict:
# for y in range(len(peptidemassdict[AAmass])):
# if m not in peptidemassdict:
# peptidemassdict[m] = [str(ma)+"-"+ peptidemassdict[AAmass][y], peptidemassdict[AAmass][y] +"-" + str(ma)]
# else:
# peptidemassdict.setdefault(m, []).append(str(ma)+"-"+ peptidemassdict[AAmass][y])
# peptidemassdict.setdefault(m, []).append(peptidemassdict[AAmass][y] +"-" + str(ma))
# #peptidemassdict[m].append(str(ma)+"-"+ peptidemassdict[AAmass][y])
# #peptidemassdict[m].append(peptidemassdict[AAmass][y] +"-" + str(ma))
# else:
# peptidemassdict[m] = [str(ma)+"-" + str(AAmass), str(AAmass)+"-" + str(ma)]
#
## peptidemasses = peptidemasses.sort()
## peptidemstr = peptidemstr.sort(key=int)
# print("peptidemasses")
# print(peptidemasses)
# print("peptidemassdict")
# print(peptidemstr)
# raise SystemExit(0)
#def GenerateMassAAFragdictionary(k):
# """ Generates a dictionary of all possible permuations of AA up to length k with their
# integer masses. Note: Due to duplicated I represents both I and L, K
# represents both K and Q"""
# AAs = 'GASPVTCINDKEMHFRYW'
# permslist = []
# MassAAFragdictionary = {}
# for AA in AAs:
# permslist.append(AA)
# for i in range(1,k+1): #i is size of fragment
# perms = list(itertools.permutations(AAs,i))
# for item in perms:
# permslist.append(''.join(list(item)))
# for frag in permslist:
# MassAAFragdictionary[frag] = CalcAAFragMass(frag)
#
# return str(MassAAFragdictionary)
#
#with open("PeptideMassDict.txt", "w+") as textfile:
# textfile.write(GenerateMassAAFragdictionary(4))
#def CyclopeptideMassFrags(Spectrum):
# """Inputs a Mass spectrum as list of integer masses of fragments.
# Outputs a cyclic peptide as masses of fragments """
## AAs = 'GASPVTCINDKEMHFRYW'
# MasstoAAdict = {0: '', 57: 'G', 71: 'A', 87: 'S', 97: 'P', 99: 'V', 101: 'T', 103: 'C', 113: 'I', 114: 'N', 115: 'D', 128: 'K', 129: 'E', 131: 'M', 137: 'H', 147: 'F', 156: 'R', 163: 'Y', 186: 'W'}
# AAmasses = [57, 71, 87, 97, 99, 101, 103, 113, 114, 115, 128, 129, 131, 137, 147, 156, 163, 186]
# intactmass = max(Spectrum)
## peptidemstr = []
# peptidemasses = []
# peptidemassdict = {}
# if 0 in Spectrum: #remove 0 from spectrum
# Spectrum.remove(0)
# for mass in AAmasses:
# if mass in Spectrum:
# peptidemasses.append(mass)
# peptidemassdict[mass] = MasstoAAdict[mass]
## peptidemstr.append(str(mass))
# L = len(peptidemasses)
# for i in range(L,len(Spectrum)):
# print(i)
# specmass = Spectrum[i]
# for AAmass in peptidemasses:
# submass = specmass - AAmass
# if submass in Spectrum:
# peptidemassdict[specmass] = MasstoAAdict[submass] + MasstoAAdict[AAmass]
# MasstoAAdict[specmass] = MasstoAAdict[submass] + MasstoAAdict[AAmass]
#
## print("peptidemasses")
## print(peptidemasses)
## print("peptidemassdict")
## print(peptidemassdict)
## print(peptidemassdict)
## peptidemassdict[specmass] = set(peptidemassdict[specmass])
## peptidemassdict[specmass] = list(peptidemassdict[specmass])
## print(peptidemassdict)
## print("LAST")
## print(peptidemassdict)
# completelist = peptidemassdict[intactmass]
# print(completelist)
# possibilities = [completelist]
# first = completelist[:1]
# firstreverse = first[::-1]
# second = completelist[1]
# third = completelist[2:]
# thirdreverse = third[::-1]
# inverse = thirdreverse + second + firstreverse
# possibilities.append(inverse)
# print(possibilities)
## for j in range(1,len(completelist)):
#
## listdict = {}
## for j in range(len(completelist)):
## if isinstance(completelist[j], int):
## listdict[j] = 1
## else:
## listdict[j] = len(completelist[j])
## print(listdict)
## peptidemassfrags = []
### for x in range(len(completelist)):
### if listdict[x] ==
#
# return peptidemassdict[intactmass]
#def CyclopeptideSequencing(Spectrum):
# peptides = []
## peptideseq = []
## peptidemasses = []
# AAs = []
## AAmasses = []
# parentmass = max(Spectrum)
## AAtoMassdict = {'': 0, 'A': 71, 'C': 103, 'D': 115, 'E': 129, 'F': 147, 'G': 57, 'H': 137, 'I': 113, 'K': 128, 'L': 113, 'M': 131, 'N': 114, 'P': 97, 'Q': 128, 'R': 156, 'S': 87, 'T': 101, 'V': 99, 'W': 186, 'Y': 163}
# AAmasslist = [57, 71, 87, 97, 99, 101, 103, 113, 114, 115, 128, 129, 131, 137, 147, 156, 163, 186]
## MasstoAAdict = {0: '', 57: 'G', 71: 'A', 87: 'S', 97: 'P', 99: 'V', 101: 'T', 103: 'C', 113: 'I', 114: 'N', 115: 'D', 128: 'K', 129: 'E', 131: 'M', 137: 'H', 147: 'F', 156: 'R', 163: 'Y', 186: 'W'}
# if 0 in Spectrum: #remove 0 from spectrum
# Spectrum.remove(0)
# for mass in Spectrum:
# if mass in AAmasslist:
# if len(str(mass)) < 3:
# peptides.append('0'+str(mass))
# AAs.append('0'+str(mass))
# else:
# peptides.append(str(mass))
# AAs.append(str(mass))
## print("peptides")
## print(peptides)
## print("AAs")
## print(AAs)
# i=1
# looper = 0
# while looper == 0:
# duppeps = copy.deepcopy(peptides)
## print(duppeps)
# print(i)
# i += 1
# for pep in duppeps:
## print(pep)
# if len(pep) > 3:
# pepmass = 0
## print("helll")
# for t in range(0,len(pep),3):
# pepmass += int(pep[t:t+3])
## print(pepmass)
# else:
# pepmass = int(pep)
## print("else")
# for AA in AAs:
## print(pepmass)
## print(AA)
# if pepmass + int(AA) in Spectrum:
## print(pep+AA)
# peptides.append(pep+AA)
# peptides.append(AA+ pep)
# L = len(pep+AA)
##INSTEAD OF USING STRINGS, USE NESTED LISTS IN PEPTIDES, THEN GET RID OF SMALLEST LISTS
## LAST LOOP WILL BE A LOOP OF "-".JOIN() MUCH FASTER.
## print(L)
# if pepmass == parentmass:
## print(pepmass)
## print(parentmass)
# looper += 1
## print(peptides)
# dupdup = copy.deepcopy(peptides)
# for peptide in dupdup:
# if len(peptide) < L:
# peptides.remove(peptide)
#
# peptides = set(peptides)
# peptides = list(peptides)
## print("peptides")
## print(peptidelist)
# outputlist = []
# for p in peptides:
# temp = ""
# for t in range(0,len(p),3):
# if p[t:t+3][0] == '0':
# temp += p[t+1:t+3] + "-"
# else:
# temp += p[t:t+3] + "-"
## print(temp)
# outputlist.append(temp[:-1])
# return " ".join(outputlist)
#def pathchecker(spectrum, path1, path2):
# # Takes two paths, and checks they are connected by a single AA overlap.
# # The paths are represented as integers in lists.
# # Returns the path if they are connected, or an empty list if not.
# # path1 can be any length, path2 should be a dipeptide.
# # Also makes sure that any amino acids are present in the spectrum the
# # correct number of times.
# if path1[-1] != path2[0]:
# return []
# sublist = path1[:-1]
# sublist = sublist + path2
# spec = copy.deepcopy(spectrum)
# if sum(sublist) not in spec:
# return []
# if sum(sublist) in spectrum:
# for x in sublist:
# if x in spec:
# spec.remove(x)
# else:
# return []
# return sublist
#def pathchecker(Spectrum, path1, path2):
# # Takes two paths, and checks they are connected by a single AA overlap.
# # The paths are represented as integers in lists.
# # Returns True if they are connected, False if they are not.
# if path1[-1] != path2[0]:
# return []
# sublist = path1[:-1]
# sublist = sublist + path2
# if sum(sublist) in Spectrum:
# return sublist
# if sum(sublist) not in Spectrum:
# return []
#def CPeptideSequence(Spectrum):
# peptides = []
# dipeptides = []
# AAs = []
# intactpeptide = max(Spectrum)
# AAmasslist = [57, 71, 87, 97, 99, 101, 103, 113, 114, 115, 128, 129, 131, 137, 147, 156, 163, 186]
# if 0 in Spectrum: #remove 0 from spectrum
# Spectrum.remove(0)
# specmasslist = copy.deepcopy(Spectrum)
# specmasslist.remove(intactpeptide)
# for mass in AAmasslist:
# if mass in Spectrum:
# peptides.append(mass)
# AAs.append(mass)
# looper = 0
# b= 0
# # Generates a list of all dipeptides present in spectrum.
# for pep in peptides:
# subcopy = copy.deepcopy(AAs)
# subcopy.remove(pep)
# for AA in AAs:
# if AA in subcopy and pep + AA in Spectrum:
# dipeptides.append([pep, AA])
# peptides = copy.deepcopy(dipeptides)
#
# while looper == 0:
# print(b)
# b += 1
# paths = []
# for peptide in peptides:
# subcopy = copy.deepcopy(AAs)
# for f in peptide:
# subcopy.remove(f)
# for frag in dipeptides:
# if frag[1] in subcopy:
# fragpath = pathchecker(Spectrum, peptide, frag)
# if fragpath != []:
# paths.append(fragpath)
# if sum(fragpath) == intactpeptide:
# looper = 1
# peptides = copy.deepcopy(paths)
## print(paths)
# outputlist = []
# for path in paths:
# outputlist.append("-".join(list(map(str, path))))
# return " ".join(outputlist)
#def CyclopeptideMassFrags(Spectrum):
# """Inputs a Mass spectrum as list of integer masses of fragments.
# Outputs a cyclic peptide as masses of fragments """
## AAs = 'GASPVTCINDKEMHFRYW'
# AAmasses = [57, 71, 87, 97, 99, 101, 103, 113, 114, 115, 128, 129, 131, 137, 147, 156, 163, 186]
# intactmass = max(Spectrum)
## peptidemstr = []
# peptidemasses = []
# peptidemassdict = {}
# if 0 in Spectrum: #remove 0 from spectrum
# Spectrum.remove(0)
# for mass in AAmasses:
# if mass in Spectrum:
# peptidemasses.append(mass)
# peptidemassdict[mass] = [str(mass)]
## peptidemstr.append(str(mass))
# L = len(peptidemasses)
# try:
# for i in range(L,len(Spectrum)):
# print(i)
# specmass = Spectrum[i]
## print(specmass)
# for AAmass in peptidemasses:
# submass = specmass - AAmass
# if submass in Spectrum:
# if submass in peptidemassdict:
# for m in peptidemassdict[submass]:
# peptidemassdict.setdefault(specmass, []).append(m+"-"+ peptidemassdict[AAmass][0])
# peptidemassdict.setdefault(specmass, []).append(peptidemassdict[AAmass][0]+"-"+ m)
## peptidemassdict[specmass] = [peptidemassdict[submass][m], peptidemassdict[AAmass]]
# else:
# print("Error: " + str(submass) +" submass not in peptide dictionary")
# except Exception as ex:
# template = "An exception of type {0} occurred. Arguments:\n{1!r}"
# message = template.format(type(ex).__name__, ex.args)
# print(message)
#
#
## peptidemassdict[specmass] = set(peptidemassdict[specmass])
## peptidemassdict[specmass] = list(peptidemassdict[specmass])
#
## print("peptidemasses")
## print(peptidemasses)
## print("peptidemassdict")
## print(peptidemassdict)
## print("peptidemassdict")
## print(peptidemassdict)
#
# return peptidemassdict[intactmass]
#def CyclopeptideSequencing(Spectrum):
# peptides = []
# peptideseq = []
# peptidemasses = []
# AAs = []
# AAmasses = []
# parentmass = max(Spectrum)
# AAtoMassdict = {'': 0, 'A': 71, 'C': 103, 'D': 115, 'E': 129, 'F': 147, 'G': 57, 'H': 137, 'I': 113, 'K': 128, 'L': 113, 'M': 131, 'N': 114, 'P': 97, 'Q': 128, 'R': 156, 'S': 87, 'T': 101, 'V': 99, 'W': 186, 'Y': 163}
# AAmasslist = [57, 71, 87, 97, 99, 101, 103, 113, 114, 115, 128, 129, 131, 137, 147, 156, 163, 186]
# MasstoAAdict = {0: '', 57: 'G', 71: 'A', 87: 'S', 97: 'P', 99: 'V', 101: 'T', 103: 'C', 113: 'I', 114: 'N', 115: 'D', 128: 'K', 129: 'E', 131: 'M', 137: 'H', 147: 'F', 156: 'R', 163: 'Y', 186: 'W'}
# if 0 in Spectrum: #remove 0 from spectrum
# Spectrum.remove(0)
# for mass in AAmasslist:
# if mass in Spectrum:
# peptides.append(MasstoAAdict[mass])
# peptidemasses.append(str(mass))
# AAmasses.append(mass)
# AAs.append(MasstoAAdict[mass])
## print("peptides")
## print(peptides)
## print("AAs")
## print(AAs)
## L = len(AAs)
# i=1
# while peptideseq == []:
# duppeps = copy.deepcopy(peptides)
# print(i)
# i += 1
# for pep in duppeps:
# for AA in AAs:
# peptides.append(pep+AA)
# peptides.append(AA+pep)
# peptides = set(peptides)
# peptides = list(peptideseq)
## print("peptides")
## print(peptides)
# for p in peptides :
## print(p)
# if p in AAtoMassdict:
# fragmass = AAtoMassdict[p]
# else:
# fragmass = CalcAAFragMass(p)
# AAtoMassdict[p] = fragmass
# if fragmass not in Spectrum:
# peptides.remove(p)
# if fragmass == parentmass:
# peptideseq.append(p)
## peptideseq = set(peptideseq)
## peptideseq = list(peptideseq)
# return peptideseq
#def SequenceCycloPeptide(Spectrum):
# peptides = []
# AAs = []
# intactpeptide = max(Spectrum)
# AAmasslist = [57, 71, 87, 97, 99, 101, 103, 113, 114, 115, 128, 129, 131, 137, 147, 156, 163, 186]
# if 0 in Spectrum: #remove 0 from spectrum
# Spectrum.remove(0)
# for mass in AAmasslist:
# if mass in Spectrum:
# if len(str(mass)) < 3:
# peptides.append([str(mass)])
# AAs.append(str(mass))
# else:
# peptides.append([str(mass)])
# AAs.append(str(mass))
#
# b=1
# looper = 0
# while looper == 0:
## while i < 3:
# duppeps = copy.deepcopy(peptides)
## print(duppeps)
# print(b)
# b += 1
# for AA in AAs:
#
# for i in range(len(duppeps)):
# # for peplist in duppeps:
# # print(pep)
# pepmass = 0
#
# for pep in duppeps[i]:
# pepmass += int(pep)
# fragmass = pepmass + int(AA)
# if fragmass in Spectrum:
## print(pep+AA)
# reverse = duppeps[i][::-1]
# reverse.insert(0,AA)
# peptides[i].append(AA)
# peptides.append(reverse)
##NEED TO FIND A WAY SO THAT SMALLEST ONE DOESN'T ALWAYS GO FIRST NEED 186-128-113
# L = len(peptides[i])
## print(peptides[i])
# print(peptides)
# num = len(peptides)
# intactcount = 0
# for peptide in peptides:
# sumpeps = sum(list(map(int, peptide)))
# if sumpeps == intactpeptide:
# intactcount += 1
# if intactcount == num:
# looper += 1
#
# dupdup = copy.deepcopy(peptides)
#
# for peptidelist in dupdup:
# if len(peptidelist) < L:
# peptides.remove(peptidelist)
# print(peptides)
# for peptidelist in dupdup:
# peptides.append(peptidelist[::-1])
# dupdup = copy.deepcopy(peptides)
# outputlist = []
# for peppep in peptides:
# outputlist.append("-".join(peppep))
# outputlist = set(outputlist)
# outputlist = list(outputlist)
# return " ".join(outputlist)
# b=1
# looper = 0
# while looper == 0:
## while i < 3:
# duppeps = copy.deepcopy(peptides)
## print(duppeps)
# print(b)
# b += 1
# for i in range(len(duppeps)):
## for peplist in duppeps:
## print(pep)
# pepmass = 0
#
# for pep in duppeps[i]:
# pepmass += int(pep)
# for AA in AAs:
## print(pepmass)
## print(AA)
# fragmass = pepmass + int(AA)
# if fragmass in Spectrum:
## print(pep+AA)
# reverse = duppeps[i][::-1]
# reverse.insert(0,AA)
# peptides[i].append(AA)
# peptides.append(reverse)
##NEED TO FIND A WAY SO THAT SMALLEST ONE DOESN'T ALWAYS GO FIRST NEED 186-128-113
# L = len(peptides[i])
## print(peptides[i])
# sumpeps = sum(list(map(int, peptides[i])))
# if sumpeps == intactpeptide:
## print(pepmass)
## print(parentmass)
# looper += 1
# print(peptides)
# dupdup = copy.deepcopy(peptides)
## for peptidelist in dupdup:
## peptides.append(peptidelist[::-1])
## dupdup = copy.deepcopy(peptides)
# for peptidelist in dupdup:
# if len(peptidelist) < L:
# peptides.remove(peptidelist)
# print(peptides)
## outputlist = []
## for peppep in peptides:
## outputlist.append("-".join(peppep))
## return " ".join(outputlist)
#def CyclopeptideSequencing1(Spectrum):
# peptides = []
# AAs = []
# parentmass = max(Spectrum)
# AAmasslist = [57, 71, 87, 97, 99, 101, 103, 113, 114, 115, 128, 129, 131, 137, 147, 156, 163, 186]
# #remove 0 and parent mass from spectrum
# Spectrum.remove(0)
# Spectrum.remove(parentmass)
# #Build monopeptide list and start peptides list
# for mass in AAmasslist:
# if mass in Spectrum:
# peptides.append([mass])
# AAs.append(mass)
# #generate strings consistent with spectra, add all possibilitiess from
# #monopeptide/AA list and then remove combinations whose mass isn't
# #consistent with the spectra
# b=0
# looper = 0
# duppeps = copy.deepcopy(peptides)
# peptides = []
## while b < 1:
# while looper == 0:
# print(b)
# b += 1
# if peptides != []:
# duppeps = copy.deepcopy(peptides)
# for AA in AAs:
# subcopy = copy.deepcopy(AAs)
# for i in range(len(duppeps)):
## reverse = duppeps[i][::-1]
## reverse.insert(0,AA)
## duppeps[i].append(AA)
# for frag in duppeps[i]:
# subcopy.remove(frag)
# if AA in subcopy:
# templist = copy.deepcopy(duppeps[i])
# templist.append(AA)
# peptides.append(templist)
# subcopy = copy.deepcopy(AAs)
## peptides.append(reverse)
## peptides = copy.deepcopy(duppeps)
## print(peptides)
# peppeps = copy.deepcopy(peptides)
# for j in range(len(peppeps)):
# sumpeps = sum(peppeps[j])
# if sumpeps not in Spectrum and sumpeps != parentmass:
# peptides.remove(peppeps[j])
# if sumpeps == parentmass:
# looper += 1
# L = len(peppeps[j])
#
# print(peptides)
# peptidesolve = []
# for i in range(2, L):
# for j in range(len(peptides)):
# for k in range(len(peptides)):
# if len(peptides[j]) == len(peptides[k]) == i:
# if peptides[j][-1] == peptides[k][0]:
# plist = peptides[j][:]
# plist.extend(peptides[k][1:])
# peptidesolve.append(plist)
# print(peptidesolve)
# peptidetemp = copy.deepcopy(peptidesolve)
# for pep in peptidetemp:
# if pep in peptidesolve and len(pep) < L:
# peptidesolve.remove(pep)
# elif pep in peptidesolve and pep not in peptides:
# peptidesolve.remove(pep)
# print(peptidesolve)
# outputlist = []
# for p in peptidesolve:
# outputlist.append("-".join(list(map(str, p))))
# outputlist = set(outputlist)
# outputlist = list(outputlist)
# return " ".join(outputlist)
## sumpeps = sum(list(map(int, peppep[j])))
|
[
"jennifermichaud@Jennifers-MBP-5.hsd1.ca.comcast.net"
] |
jennifermichaud@Jennifers-MBP-5.hsd1.ca.comcast.net
|
20bd26d012b69d5dfc6723ff2ae9d1f80631982a
|
22cb5739c017767afc8b55d2b3c72d9d92dd25bf
|
/rent/home/migrations/0008_auto_20200427_1928.py
|
16d654a882cabf6f94db105e8ef3431321a414e9
|
[] |
no_license
|
Rochakdh/GetWheels
|
f2be91f688df9d58179adbf9248c663dfa9b044e
|
33dfffbf0c3de6c7c960d30b6f509c22fa07e440
|
refs/heads/master
| 2023-03-21T13:00:47.859683
| 2021-03-06T07:00:03
| 2021-03-06T07:00:03
| 276,622,563
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
# Generated by Django 3.0.4 on 2020-04-27 13:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0007_vehicleavailable_user'),
]
operations = [
migrations.AlterField(
model_name='vehicleavailable',
name='slug',
field=models.SlugField(unique=True),
),
]
|
[
"rochakdh@gmail.com"
] |
rochakdh@gmail.com
|
3f459c98571bce7eed53ccbf2f805857aae44171
|
1cdd95a72c9b253fe7f6fc3fe0fd87c5e055a632
|
/apps/operation/__init__.py
|
2a4e6d4d5d8908cb6f40fd6781153b7fe1dbaad5
|
[] |
no_license
|
ShoutangYang/djangoOnline
|
b40d3d601b3c0096ae4e84a62b7ba5bffeeeaf0d
|
31838384d59e8b08561c17236151589276bef015
|
refs/heads/master
| 2020-03-14T22:28:17.491991
| 2018-06-25T16:12:11
| 2018-06-25T16:12:11
| 131,821,866
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 81
|
py
|
# 设置用户显示的名称
default_app_config='operation.apps.OperationConfig'
|
[
"to_yst@163.com"
] |
to_yst@163.com
|
219fdfefbdd96987c78fcb73a76f371345edb5ec
|
62745a26c8bb36c4e0e0f36328059aa99201850c
|
/Fix_files.txt.py
|
48865ffb64e46454c34555dc395a84082a00c2d0
|
[] |
no_license
|
shubham1637/Orphan-CRISPR-project
|
134ccb0c01315db2e3b2e9af6269432c3729f7e7
|
549e343c7a0d835ed641c543501cd80af585ed80
|
refs/heads/master
| 2020-04-03T22:53:04.871620
| 2018-05-28T21:37:30
| 2018-05-28T21:37:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,830
|
py
|
import itertools
import re
#
# Labels = ["merge", "labels", "height"]
#
# for uno in Labels:
# with open("//panfs/pan1/orphancrispr/IslandsCluster/wg_output_" + str(uno) + ".txt","w") as File:
# count = 0
# print(uno)
# for line in open("//panfs/pan1/orphancrispr/IslandsCluster/wg_" + str(uno) + ".txt","r"):
# count +=1
# if count == 1:
# continue
# LineValues = line[:-1].split(' ')
# File.write('\t'.join(LineValues[1:]).replace('"', '') + '\n')
# sum = 0
# count = 0
# wg = []
# for line in open('//panfs/pan1/orphancrispr/IslandsCluster/weights2003.output', "r"):
# count +=1
# sum += float(line[:-1].split('\t')[1])
# wg.append(line[:-1].split('\t')[0])
# print(sum, ' ', count)
# with open('/panfs/pan1/orphancrispr/Spacers_from_finalOrphans.txt', "w") as File:
# for line in open('/panfs/pan1/orphancrispr/OrphanArrays_with_Spacers.txt', "r"):
# count = 0
# ID = line[:-1].split('\t')[0]
# Spacers = line[:-1].split('\t')[1].split(',')
# for spacer in Spacers:
# count+=1
# File.write('>' + ID + '_' + str(count) + '_' + "spacer" + '\n' +
# spacer + '\n')
#
# with open("/panfs/pan1/orphancrispr/SpacersFromArraysCluster/IdentifiedSpacerHits_filtered90-90.hits", "w") as FilteredSpacerHitsFile:
# for line in open("/panfs/pan1/orphancrispr/SpacersFromArraysCluster/Spacers_from_Identified_AllagainstAll.hits" ,"r"):
# if line[0] == "#":
# continue
# LineValues = line[:-1].split('\t')
# if float(LineValues[6]) >= 90.0 and float(LineValues[7]) >= 90.0:
# #print('YES' , ID[0], ' ', identity, '% ', coverage, '%')
# FilteredSpacerHitsFile.write(line)
# # Set1 = set()
# Set2 = set()
# List = []
# for line in open("//panfs/pan1/orphancrispr/IslandsCluster/IslandsID_spacers_withNumber.txt", "r"):
# Set1.add(line[:-1].split('\t')[0] )
# for line in open("//panfs/pan1/orphancrispr/IslandsCluster/IslandsID_pfam_withNumber.txt", "r"):
# List.append(line[:-1].split('\t')[0])
# Set2.add(line[:-1].split('\t')[0])
# List = list(Set2 - Set1)
# print(List)
# #
# SpacersBaseFileName = "/panfs/pan1.be-md.ncbi.nlm.nih.gov/prokdata/CRISPRicity/SpacerAnalysis/AllIteration/TmpFolder/Spacers.fna"
#
# with open('//panfs/pan1/orphancrispr/IslandsCluster/Missing31_arraySpacers.txt',"w") as File:
# with open(SpacersBaseFileName, "r") as f:
# for line1, line2 in itertools.zip_longest(*[f] * 2):
# for ID in List:
# Match = re.findall(ID, line1)
# if Match:
# print(Match)
# File.write(line1 + line2)
|
[
"utkinaira@Irinas-MacBook-Pro.local"
] |
utkinaira@Irinas-MacBook-Pro.local
|
e685a0cd0f729a503d834bf8df8e34a47a329fe2
|
8922075b4fd3179cec07f1b0f5dc47fc92533f8d
|
/task1.py
|
6954f1d5b22c93fe90c5d9e89ae5c2b0827adbf6
|
[] |
no_license
|
akbeki21/week4_thursday_task
|
2d647ff72adf9e31347f3d658355a727ed1e471b
|
53a375d82c4704265d09d691b3d00937274c2ec6
|
refs/heads/master
| 2023-01-21T21:21:46.768467
| 2020-11-07T17:26:56
| 2020-11-07T17:26:56
| 310,597,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,451
|
py
|
# Task 1
# def last_one():
# num =input("Enter any number:")
# print(num[-1])
# last_one()
# Task 2
# def isdigit():
# digit = input("Enter any number: ")
# digit = int(digit)
# if digit %2 ==0:
# print("This number is even")
# else:
# print("This number is odd")
# isdigit()
# Task 4
# def palindrom():
# word = input("Enter any word: ")
# if word[::-1] == word:
# print("This word is palindrom")
# else:
# print("This is not palindrom")
# palindrom()
# Task 6
# def sum_ofnumbers():
# num = input ('Введите трехзначное число : ')
# if len(num) < 3 or len(num) > 3 :
# print('Введите трехзначное число!!!')
# else:
# num = str(num)
# print(int(num[0]) + int(num[1]) + int(num[2]))
# sum_ofnumbers()
# Task 7
# def number_rank():
# num = input("Enter any number: ")
# print('Количество разрядов ' + str(len(num)))
# number_rank()
# Task 3
# def square () :
# num = input ('Введите число : ')
# squares = []
# for number in range (int(num)):
# number = number ** 2
# squares.append(str(number))
# print(','.join(squares))
# square()
# Task 5
# import datetime
# def check_date () :
# day = int(input("Введите день : "))
# month = int(input("Введите месяц : "))
# year = int(input("Введите год : "))
# try:
# data = datetime.date(year, month, day)
# print (data)
# print (True)
# except:
# print (False)
# check_date()
# Task 8
# def max_num () :
# nums = []
# bol = -1
# while True:
# try :
# n = input('("exit" чтобы выйти) Введите число: ')
# if n == 'exit':
# print('Максимальное число : ' + bol)
# break
# nums.append(n)
# for x in nums :
# if int(x) > int(bol) :
# bol = x
# except ValueError :
# raise TypeError ('Это программа принимает только числа!!')
# max_num()
# Task 9
def calculator () :
try:
operations = ['+', '-', '*', '/']
operation = input ('| + | - | * | / | ')
while operation not in operations :
raise TypeError ('Выбрана неверная операция!!')
operation = input ('| + | - | * | / | ')
num1 = input ('Введите первое число : ')
num2 = input ('Введите второе число : ')
if operation == '+' :
res = int(num1) + int(num2)
print(f'Ответ : {res}')
elif operation == '-' :
res = int(num1) - int(num2)
print(f'Ответ : {res}')
elif operation == '*' :
res = int(num1) * int(num2)
print(f'Ответ : {res}')
elif operation == '/' :
res = int(num1) / int(num2)
print(f'Ответ : {res}')
except ZeroDivisionError :
raise TypeError ('На ноль делить нельзя !!!')
except ValueError :
raise TypeError ('Это программа принимает только цифры !!!')
finally :
print('Пока!')
calculator()
|
[
"akmatovabeka21@gmail.com"
] |
akmatovabeka21@gmail.com
|
10984f7273d2e462fc205946946f58747af35492
|
843136a88f04b9eb42dc967ea8cac7625cfaffef
|
/WDStartup/wsgi.py
|
cfd3c946f83b95707aaadb6259ce244f2cc3bb58
|
[] |
no_license
|
skrulling/WDStartup
|
66fdef4a3a791cd7a200127507cea7c5108d91b0
|
ae4c19bdcb23e80c46fea226346ecf5560aa2a29
|
refs/heads/master
| 2020-05-21T07:50:08.872998
| 2018-09-27T18:35:32
| 2018-09-27T18:35:32
| 56,911,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for WDStartup project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "WDStartup.settings")
application = get_wsgi_application()
|
[
"mikkeleb@gmail.com"
] |
mikkeleb@gmail.com
|
f501d353de483ac071efcd002afb39410d8a1a5f
|
472d508190fd7167bd6dbd5d1680019dd7af993c
|
/Exercicios/Aula 12/036.py
|
d326128df61b915e8319094a24278c8ae0d7f557
|
[] |
no_license
|
FefAzvdo/Python-Training
|
f3b8cca72d47f641c504b19b60864cde829b1935
|
0e97d64c7c00bc1eae721a18af7bd96c2484ca34
|
refs/heads/master
| 2022-12-25T22:23:49.802465
| 2020-09-30T03:00:47
| 2020-09-30T03:00:47
| 298,328,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
n1 = int(input("Digite um número :"))
n2 = int(input("Digite outro número :"))
if n1 > n2:
print(f"{n1} é maior do que {n2}")
elif n1 == n2:
print(f"{n1} é igual a {n2}")
else:
print(f"{n2} é maior do que {n1}")
|
[
"wfernandoaugusto01@gmail.com"
] |
wfernandoaugusto01@gmail.com
|
207e03c01c90dceb13e5e4fcc561d33cce794d0b
|
8439766cd9c4667f9cbd041baab9b6ef74312e1c
|
/BACKEND/main.py
|
61a4fad308bc52abe84cf27bfe4e94c7bdeab676
|
[] |
no_license
|
ifigueroa065/PROYECTO2_201904013
|
3881224d68a5a759ea0191a7181cc351793c4dc1
|
65e947e31c3b55608155837ce042a001e8056539
|
refs/heads/main
| 2023-01-06T11:30:29.351811
| 2020-11-09T09:06:22
| 2020-11-09T09:06:22
| 303,919,819
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,367
|
py
|
from flask import Flask,jsonify, request
from flask_cors import CORS
from Usuario import Us
from Canciones import Cancion
import json
from Comentarios import Comentario
from Playlist import Play
import re
app=Flask(__name__)
CORS(app)
#ARREGLOS PARA ALMACENAR DATOS
cont_canciones=0
cont_solicitudes=0
PLAYLIST=[]
COMENTARIOS=[]
USERS=[]
CANCIONES=[]
SOLICITUDES=[]
USERS.append(Us('Usuario','Maestro','admin','admin','0'))
@app.route('/', methods=['GET'])
def Rutainicial():
return('PAGINA INICIAL')
#VISTA GENERAL
@app.route('/Usuario', methods=['GET'])
def ObtenerPersonas():
global USERS
Temp = []
for i in USERS:
Temp2 = {
'nombre': i.getNombre(),
'apellido': i.getApellido(),
'usuario': i.getUsuario(),
'password':i.getPassword(),
'tipo':i.getTipo()
}
Temp.append(Temp2)
respuesta = jsonify(Temp)
return(respuesta)
#VISTA GENERAL CANCIONES
@app.route('/CANCIONES', methods=['GET'])
def MOSTRARCANCIONES():
global CANCIONES,cont_canciones
TempZ = []
for i in CANCIONES:
Temp3 = {
'nombre' : i.getNombre(),
'artista' : i.getArtista(),
'album' : i.getAlbum(),
'imagen' : i.getImagen(),
'fecha' : i.getFecha(),
'linkS' : i.getLinkS(),
'linkYT' : i.getLinkYT(),
'identificador' : i.getID()
}
TempZ.append(Temp3)
res = jsonify(TempZ)
return(res)
#VISTA GENERAL DE SOLICITUDES
@app.route('/SOLICITUDES', methods=['GET'])
def MOSTRARSOLICITUDES():
global SOLICITUDES
TempZ = []
for i in SOLICITUDES:
Temp3 = {
'nombre' : i.getNombre(),
'artista' : i.getArtista(),
'album' : i.getAlbum(),
'imagen' : i.getImagen(),
'fecha' : i.getFecha(),
'linkS' : i.getLinkS(),
'linkYT' : i.getLinkYT(),
'identificador' : i.getID()
}
TempZ.append(Temp3)
res = jsonify(TempZ)
return(res)
#VISTA GENERAL COMENTARIOS
@app.route('/COMENTARIOS', methods=['GET'])
def MOSTRARCOMENTS():
global COMENTARIOS
TempZ = []
for i in COMENTARIOS:
Temp3 = {
'comentario' : i.getComentario(),
'usuario' : i.getUsuario(),
'id' : i.getID()
}
TempZ.append(Temp3)
res = jsonify(TempZ)
return(res)
#VISTA GENERAL PLAYLIST
@app.route('/PLAYLIST', methods=['GET'])
def MOSTRARPLAYLIST():
global PLAYLIST
TempZ = []
for i in PLAYLIST:
Temp3 = {
'usuario' : i.getUsuario(),
'id' : i.getID()
}
TempZ.append(Temp3)
res = jsonify(TempZ)
return(res)
#BUSCAR NOMBRE ESPECIFICO
@app.route('/Usuario/<string:usuario>', methods=['GET'])
def ObtenerUsuario(usuario):
global USERS
for i in USERS:
if i.getUsuario()==usuario:
Temp2 = {
'nombre': i.getNombre(),
'apellido': i.getApellido(),
'usuario': i.getUsuario(),
'password':i.getPassword(),
'tipo':i.getTipo()
}
break
respuesta = jsonify(Temp2)
return(respuesta)
#BUSCAR CANCION ESPECIFICA
@app.route('/CANCIONES/<int:id>', methods=['GET'])
def CANCIONESPE(id):
global CANCIONES
for i in CANCIONES:
if i.getID()==id:
Temp2 = {
'nombre' : i.getNombre(),
'artista' : i.getArtista(),
'album' : i.getAlbum(),
'imagen' : i.getImagen(),
'fecha' : i.getFecha(),
'linkS' : i.getLinkS(),
'linkYT' : i.getLinkYT()
}
break
respuesta = jsonify(Temp2)
return(respuesta)
#MODIFICAR UN DATO ESPECIFICO
@app.route('/Usuario/<string:usuario>', methods=['PUT'])
def ActualizarUsuario(usuario):
global USERS
confirmar=True
cor=False
Muser= request.json['usuario']
confirm = request.json['password']
contra = request.json['contraM']
if contra==confirm:
confirmar=False
else:
DAT = {
'message':'Failed',
'reason':'NO COINCIDEN CONTRASEÑAS'
}
for i in range(len(USERS)):
if Muser==USERS[i].getUsuario() and Muser != usuario:
cor=True
break
if cor==False:
for i in range(len(USERS)):
if usuario == USERS[i].getUsuario() and confirmar==False:
USERS[i].setNombre(request.json['nombre'])
USERS[i].setApellido(request.json['apellido'])
USERS[i].setUsuario(Muser)
USERS[i].setPassword(contra)
DAT = {
'message':'Sucess'
}
break
else:
DAT = {
'message':'Failed',
'reason':'EL USUARIO YA ESTÁ EN USO'
}
respuesta = jsonify(DAT)
return(respuesta)
#BUSCAR COMENTARIOS ESPECÍFICOS
@app.route('/COMENTARIOS/<int:id>', methods=['GET'])
def COMENT(id):
global COMENTARIOS
Temp=[]
for i in COMENTARIOS:
if i.getID()==id:
Temp2 = {
'comentario' : i.getComentario(),
'usuario' : i.getUsuario()
}
Temp.append(Temp2)
respuesta = jsonify(Temp)
return(respuesta)
#BUSCAR PLAYLIST DE USUARIO ESPECÍFICO
@app.route('/PLAYLIST/<string:usuario>', methods=['GET'])
def VERPLAYLIST(usuario):
global PLAYLIST,CANCIONES
TEMP=[]
for i in PLAYLIST:
if i.getUsuario()==usuario:
CANCION=i.getID()
for j in CANCIONES:
if CANCION==j.getID():
Temp3 = {
'nombre' : j.getNombre(),
'artista' : j.getArtista(),
'album' : j.getAlbum(),
'imagen' : j.getImagen(),
'fecha' : j.getFecha(),
'linkS' : j.getLinkS(),
'linkYT' : j.getLinkYT()
}
TEMP.append(Temp3)
break
res = jsonify(TEMP)
return(res)
#MODIFICAR CANCION ESPECÍFICA
@app.route('/CANCIONES/<int:id>', methods=['PUT'])
def ActualizarCANCION(id):
global CANCIONES
cor=True
nombre = request.json['nombre']
artista = request.json['artista']
album = request.json['album']
imagen = request.json['imagen']
fecha = request.json['fecha']
linkSpotify = request.json['linkS']
linkYoutube = request.json['linkYT']
if cor==True:
for i in range(len(CANCIONES)):
if id == CANCIONES[i].getID():
CANCIONES[i].setNombre(nombre)
CANCIONES[i].setArtista(artista)
CANCIONES[i].setAlbum(album)
CANCIONES[i].setImagen(imagen)
CANCIONES[i].setFecha(fecha)
CANCIONES[i].setLinkS(linkSpotify)
CANCIONES[i].setLinkYT(linkYoutube)
DAT = {
'message':'Sucess'
}
break
else:
DAT = {
'message':'Failed',
'reason':'EL USUARIO YA ESTÁ EN USO'
}
respuesta = jsonify(DAT)
return(respuesta)
#ELIMINAR UN USUARIO ESPECIFICO
@app.route('/Usuario/<string:usuario>', methods=['DELETE'])
def EliminarUsuario(usuario):
global USERS
for i in range(len(USERS)):
if usuario == USERS[i].getUsuario():
del USERS[i]
break
respuesta = jsonify({'message':'se eliminó usuario correctamente'})
return(respuesta)
#ELIMINAR UNA CANCION ESPECIFICA
@app.route('/CANCIONES/<int:id>', methods=['DELETE'])
def EliminarCancion(id):
global CANCIONES
for i in range(len(CANCIONES)):
if id == CANCIONES[i].getID():
del CANCIONES[i]
break
respuesta = jsonify({'message':'se eliminó cancion correctamente'})
return(respuesta)
#AGREGAR USUARIOS
@app.route('/Usuarios/', methods=['POST'])
def AgregarUsuario():
global USERS
TempADD= Us(request.json['nombre'],request.json['apellido'],request.json['usuario'],request.json['password'],request.json['tipo'])
USERS.append(TempADD)
return('SE AGREGO EL USUARIO')
#MÉTODO PARA LOGEAR USUARIOS
@app.route('/Login/', methods=['POST'])
def Login():
global USERS
usuario = request.json['usuario']
password = request.json['password']
for i in USERS:
if i.getUsuario() == usuario and i.getPassword() == password:
Dato={
'message': 'Sucess',
'usuario': i.getUsuario(),
'tipo':i.getTipo()
}
break
else:
Dato={
'message': 'Failed',
'usuario': ''
}
respuesta=jsonify(Dato)
return (respuesta)
#MÉTODO RECUPERAR CONTRASEÑA
@app.route('/Recuperar/', methods=['POST'])
def Recuperar():
global USERS
usuario = request.json['usuarioR']
for i in range(len(USERS)):
if USERS[i].getUsuario() == usuario:
Dato={
'message': 'Sucess',
'lol': USERS[i].getPassword()
}
break
else:
Dato={
'message': 'Failed',
'lol': 'no, no hay'
}
respuesta=jsonify(Dato)
return (respuesta)
#REGISTRAR TIPO USUARIO
@app.route('/Registrar/', methods=['POST'])
def SIGNUP():
global USERS
condicion=True
nombreR = request.json['nombreR']
apellidoR = request.json['apellidoR']
usuarioR = request.json['usuarioR']
contraR = request.json['contraR']
contrasR = request.json['contraRR']
tipo = "1"
if contraR == contrasR:
for i in range(len(USERS)):
if USERS[i].getUsuario() == usuarioR:
Dat={
'message': 'Failed',
'motivo' : 'EL USUARIO YA EXISTE'
}
condicion=False
break
else:
condicion=True
if condicion==True:
NUSER= Us(nombreR,apellidoR,usuarioR,contraR,tipo)
USERS.append(NUSER)
Dat = {
'message': 'Sucess'
}
else:
Dat = {
'message': 'Failed',
'motivo': 'NO COINCIDEN CONTRASEÑAS'
}
respo=jsonify(Dat)
return(respo)
#REGISTRAR TIPO ADMINISTRADOR
@app.route('/RegistrarADMIN/', methods=['POST'])
def SIGNUPADMIN():
global USERS
condicion=True
nombreR = request.json['nombreR']
apellidoR = request.json['apellidoR']
usuarioR = request.json['usuarioR']
contraR = request.json['contraR']
contrasR = request.json['contraRR']
tipo = "0"
if contraR == contrasR:
for i in range(len(USERS)):
if USERS[i].getUsuario() == usuarioR:
Dat={
'message': 'Failed',
'motivo' : 'EL USUARIO YA EXISTE'
}
condicion=False
break
else:
condicion=True
if condicion==True:
NUSAD= Us(nombreR,apellidoR,usuarioR,contraR,tipo)
USERS.append(NUSAD)
Dat = {
'message': 'Sucess'
}
else:
Dat = {
'message': 'Failed',
'motivo': 'NO COINCIDEN CONTRASEÑAS'
}
respo=jsonify(Dat)
return(respo)
#SOLICITAR UNA CANCION
@app.route('/SOLICITAR/', methods=['POST'])
def CrearSolicitud():
global SOLICITUDES,cont_solicitudes
nombre = request.json['NombreS']
artista = request.json['ArtistaS']
album = request.json['AlbumS']
imagen = request.json['ImagenS']
fecha = request.json['FechaS']
linkSpotify = request.json['LinkSS']
linkYoutube = request.json['LinkYTS']
ident=cont_solicitudes
NSOLI=Cancion(nombre,artista,album,imagen,fecha,linkSpotify,linkYoutube,ident)
SOLICITUDES.append(NSOLI)
cont_solicitudes+=1
respo=jsonify({'message':'SE CREÓ SOLICITUD'})
return(respo)
#AGREGAR UNA CANCION
@app.route('/CANCIONES/', methods=['POST'])
def AGREGARCANCION():
global CANCIONES,cont_canciones
nombre = request.json['NombreS']
artista = request.json['ArtistaS']
album = request.json['AlbumS']
imagen = request.json['ImagenS']
fecha = request.json['FechaS']
linkSpotify = request.json['LinkSS']
linkYoutube = request.json['LinkYTS']
ident=cont_canciones
NSOLI=Cancion(nombre,artista,album,imagen,fecha,linkSpotify,linkYoutube,ident)
CANCIONES.append(NSOLI)
cont_canciones +=1
respo=jsonify({'message':'SE CARGÓ UNA CANCION'})
return(respo)
#AGREGAR UNA CANCION DE SOLICITUDES
@app.route('/SOLICITUDES/<int:id>', methods=['POST'])
def AGREGARSOLICITUD(id):
global CANCIONES,cont_canciones
global SOLICITUDES,cont_solicitudes
for i in SOLICITUDES:
if i.getID()==id:
nombre = i.getNombre()
artista = i.getArtista()
album = i.getAlbum()
imagen = i.getImagen()
fecha = i.getFecha()
linkSpotify =i.getLinkS()
linkYoutube =i.getLinkYT()
break
ident=cont_canciones
NSOLI=Cancion(nombre,artista,album,imagen,fecha,linkSpotify,linkYoutube,ident)
CANCIONES.append(NSOLI)
cont_canciones +=1
for i in range(len(SOLICITUDES)):
if id == SOLICITUDES[i].getID():
del SOLICITUDES[i]
break
respo=jsonify({'message':'SE ACEPTÓ UNA CANCION'})
return(respo)
#RECHAZAR UNA SOLICITUD
@app.route('/SOLICITUDES/<int:id>', methods=['DELETE'])
def RECHAZARSOLICITUD(id):
global SOLICITUDES,cont_solicitudes
for i in range(len(SOLICITUDES)):
if id == SOLICITUDES[i].getID():
del SOLICITUDES[i]
break
respo=jsonify({'message':'SE RECHAZÓ LA CANCION'})
return(respo)
#AGREGAR COMENTARIOS
@app.route('/COMENTARIO/<int:id>', methods=['POST'])
def SAVECOMENT(id):
global COMENTARIOS
comen=request.json['comentario']
usuario=request.json['usuario']
NCOMENT=Comentario(comen,usuario,id)
COMENTARIOS.append(NCOMENT)
respo=jsonify({'message':'SE AGREGÓ EL COMENTARIO'})
return(respo)
#AGREGAR CANCION A PLAYLIST
@app.route('/PLAYLIST/<int:id>', methods=['POST'])
def SAVECANCION(id):
global PLAYLIST,CANCIONES
val=True
usuario=request.json['usuario']
for i in PLAYLIST:
if i.getUsuario()==usuario:
if i.getID()==id:
val=False
break
if val==True:
NCOMENT=Play(usuario,id)
PLAYLIST.append(NCOMENT)
DATO={
'message':'Sucess'
}
else:
DATO={
'message':'Failed'
}
respo=jsonify(DATO)
return(respo)
#BUSCADOR
@app.route('/BUSCAR/<string:word>', methods=['GET'])
def BUSCADOR(word):
global CANCIONES
Temp=[]
for i in CANCIONES:
chain=i.getNombre()
if chain.find(word)>=0:
Temp2 = {
'nombre' : i.getNombre(),
'artista' : i.getArtista(),
'album' : i.getAlbum(),
'imagen' : i.getImagen(),
'fecha' : i.getFecha(),
'linkS' : i.getLinkS(),
'linkYT' : i.getLinkYT(),
'identificador' : i.getID()
}
Temp.append(Temp2)
respuesta = jsonify(Temp)
return(respuesta)
if __name__ == "__main__":
app.run(port=3000,debug=True)
|
[
"isaiimiff13@gmail.com"
] |
isaiimiff13@gmail.com
|
a6fb847c0765aa03676507099ad81c8f60fdb6a0
|
fff64930b1cf4f1d99062903cc5bea941bc5eb95
|
/viikko2/verkkokauppa-1/src/varasto.py
|
cfe16e83bd0b556e93537999743a9fbdde64f659
|
[] |
no_license
|
jpiiroin/ohtu-tehtavat
|
a2879053e9a42fea47ad289a273c37ef9234b0c1
|
2b205d7cc9d593ed73cf4a25c048bf942bb34e60
|
refs/heads/main
| 2023-04-02T00:30:53.535820
| 2021-04-09T15:26:27
| 2021-04-09T15:26:27
| 337,110,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,285
|
py
|
from tuote import Tuote
from kirjanpito import kirjanpito as default_kirjanpito
class Varasto:
def __init__(self, kirjanpito=default_kirjanpito):
self._kirjanpito = kirjanpito
self._saldot = {}
self._alusta_tuotteet()
def hae_tuote(self, id):
tuotteet = self._saldot.keys()
for tuote in tuotteet:
if tuote.id == id:
return tuote
return None
def saldo(self, id):
tuote = self.hae_tuote(id)
return self._saldot[tuote]
def ota_varastosta(self, tuote):
saldo = self.saldo(tuote.id)
self._saldot[tuote] = saldo - 1
self._kirjanpito.lisaa_tapahtuma(f"otettiin varastosta {tuote}")
def palauta_varastoon(self, tuote):
saldo = self.saldo(tuote.id)
self._saldot[tuote] = saldo + 1
self._kirjanpito.lisaa_tapahtuma(f"palautettiin varastoon {tuote}")
def _alusta_tuotteet(self):
self._saldot[Tuote(1, "Koff Portteri", 3)] = 100
self._saldot[Tuote(2, "Fink Bräu I", 1)] = 25
self._saldot[Tuote(3, "Sierra Nevada Pale Ale", 5)] = 30
self._saldot[Tuote(4, "Mikkeller not just another Wit", 7)] = 40
self._saldot[Tuote(5, "Weihenstephaner Hefeweisse", 4)] = 15
varasto = Varasto()
|
[
"jpiiroin@hotmail.com"
] |
jpiiroin@hotmail.com
|
0275fc13ee1c1285d19946c7a42126d679e48c48
|
adb0dd445fe9dc72f7f940be12909769a422f0bb
|
/sample.py
|
731c0848870fcc51bf47f38ad503bc73f7762c37
|
[
"MIT"
] |
permissive
|
yng87/Gasyori100knock
|
40fd5c20043dd3822b183b6a4fb9d33c5c497be2
|
cdf5e375e8f882420ceb54d170d9c015dcd50722
|
refs/heads/master
| 2020-12-27T04:30:33.106121
| 2020-02-05T02:55:55
| 2020-02-05T02:55:55
| 237,765,829
| 0
| 0
|
MIT
| 2020-02-02T12:11:23
| 2020-02-02T12:11:22
| null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
import cv2
import numpy as np
img = cv2.imread("assets/imori.jpg")
img2= img.copy().astype(np.float32)
img2[60:100, 60:100, 0] = 260
cv2.imshow("imori", img2.astype(np.uint8))
cv2.waitKey(0)
cv2.destroyAllWindows()
img2[np.where(img2 > 255)] = 255
cv2.imshow("imori", img2.astype(np.uint8))
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite("sample.jpg", img2)
|
[
"k.yanagi07@gmail.com"
] |
k.yanagi07@gmail.com
|
a99571385b1152a602ec8571c137d01f376a98f9
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_212/ch11_2020_04_12_13_52_22_999006.py
|
63db066fd32828470eb88e06314bcd1d455f5303
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
def distancia_euclidiana (x1, y1, x2, y2):
d1 = x1 - y1
d2 = x2 - y2
return d1, d2
|
[
"you@example.com"
] |
you@example.com
|
b8d4164de6616526a5aee2128aa4ece67648f082
|
1b66406251b3480403e452352c17a0cccd009bf6
|
/PAT (Advanced Level) Practice/1003Emergency.py
|
59f8849c52d5184f2c54580c1cd72da47654b0eb
|
[] |
no_license
|
NeoFantom/PAT
|
eb997166382e01006fb73b070e458bb6ead40a29
|
bd2fd76c40f7e1779240168dc87989685c295e8e
|
refs/heads/master
| 2023-03-14T10:39:09.010227
| 2021-03-03T15:49:36
| 2021-03-03T15:49:36
| 293,193,209
| 1
| 0
| null | 2020-09-06T03:05:16
| 2020-09-06T03:05:15
| null |
UTF-8
|
Python
| false
| false
| 1,603
|
py
|
from queue import PriorityQueue
def inputs():
return [int(s) for s in input().split(' ')]
def dijkstra(graph, rescues, s, d):
lenG = len(graph)
pathCount = [0] * lenG
rescueCount = [0] * lenG
dist = [0x4fffffff] * lenG
visited = [False] * lenG
q = PriorityQueue(lenG ** 2)
pathCount[s] = 1
rescueCount[s] = rescues[s]
dist[s] = 0
q.put((0, s))
while not q.empty():
_, city = q.get()
if not visited[city]:
visited[s] = True
if city == d:
return pathCount[d], rescueCount[d]
for tocity, distance in graph[city].items():
if dist[city] + distance < dist[tocity]:
pathCount[tocity] = pathCount[city]
rescueCount[tocity] = rescueCount[city] + rescues[tocity]
dist[tocity] = dist[city] + distance
q.put((dist[tocity], tocity))
elif dist[city] + distance == dist[tocity]:
if rescueCount[city] + rescues[tocity] > rescueCount[tocity]:
rescueCount[tocity] = rescueCount[city] + rescues[tocity]
pathCount[tocity] += pathCount[city]
# =========================main=========================
N, M, C1, C2 = inputs()
numberOfRrescues = inputs()
assert len(numberOfRrescues) == N
graph = [{} for i in range(N)]
for i in range(M):
c1, c2, L = inputs()
graph[c1][c2] = L
graph[c2][c1] = L
print(*dijkstra(graph, numberOfRrescues, C1, C2))
|
[
"neohuiqixue@foxmail.com"
] |
neohuiqixue@foxmail.com
|
3c95a2c8629a2f7a82ff4e1e0db977aee608c498
|
681c7bd995adb41cf8400c4848f4be3c182ced47
|
/l2tteBE/chat/consumers.py
|
ccd22e6393b7258330a9016dc7bea075cbedbd1b
|
[] |
no_license
|
imdangodaane/l2tte
|
fa201df4375bbe85fe2153f0989851f0d6b8dd3e
|
c582519e95492af57e4cf612b377967bb36e152a
|
refs/heads/master
| 2022-06-28T05:32:52.346823
| 2019-12-01T16:02:18
| 2019-12-01T16:02:18
| 225,188,690
| 0
| 0
| null | 2022-05-26T21:26:24
| 2019-12-01T16:01:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,572
|
py
|
from channels.generic.websocket import AsyncWebsocketConsumer
from api.account.models import Login, Token
from asgiref.sync import async_to_sync
from .models import ChatBoxMessageModel
import json
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
# a = self.scope['url_route']['kwargs']['chat_room_type']
# print('chat_room_type: ', a)
# self.room_name = 'latte-chat-room'
self.room_name = self.scope['url_route']['kwargs']['chat_room_type']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
print('Server received:', message)
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
# Receive message from room group
async def chat_message(self, event):
message = event['message']
# Send message to WebSocket
await self.send(text_data=json.dumps({
'message': message
}))
|
[
"imdangodaane@gmail.com"
] |
imdangodaane@gmail.com
|
bf6ddec1223087c1b1cf8e30b49b6d980d81ac61
|
71e831b3cd97d027c3961ef735a141aaf6266be8
|
/setup.py
|
fdddc00670362ca2cd025105b920186d45c9ffee
|
[] |
no_license
|
sokolovdp/translator
|
3b38ba9236923340f59bd2c6d17f14b1fbb41ff3
|
5d8a8a009bb0e11d40391367fbe1d95d45babc3b
|
refs/heads/master
| 2021-07-08T15:02:57.462824
| 2017-10-06T10:21:06
| 2017-10-06T10:21:06
| 105,991,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
#!/usr/bin/env python
# encoding: utf-8
# from distutils.core import setup
from setuptools import setup
long_description = 'This a simple text translator, which can use Yandex or MyMemory translation services'
setup(
name='translator',
version="1.0.1",
author='Dmitrii Sokolov',
author_email='sokolovdp@gmail.com',
description="Simple text translator",
long_description=long_description,
url=None,
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'License :: Freeware',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Education',
'Programming Language :: Python',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'],
install_requires=['requests',],
scripts=['translator.py'],
)
|
[
"sokolovdp@gmail.com"
] |
sokolovdp@gmail.com
|
e6ce796941dee8ab5b65a845430c95a51b4abf11
|
048abdd7e0676cf5ed9134f23858e462bf954901
|
/day03_Matplotlib数据可视化/venv/bin/tensorboard
|
35118ca358abebaf3d2071b69f889cc41bc55508
|
[] |
no_license
|
MirduSandy/Tersonflow2
|
0f6886021e95b85a04c7c592ee3f5a0b20c8dc75
|
7d380a9ae72d2bd5dac720ae4e6a28c409013560
|
refs/heads/master
| 2022-11-28T02:18:58.879018
| 2020-08-08T17:51:05
| 2020-08-08T17:51:05
| 273,542,070
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
#!/Users/a1/Documents/Projects/Python/day03_Matplotlib数据可视化/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from tensorboard.main import run_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_main())
|
[
"2367738403@qq.com"
] |
2367738403@qq.com
|
|
b025683518d73f680071522ffc13d707ebcf92cc
|
0518d2845240436fac9b34aa068dead635cede31
|
/test/test_add_project.py
|
2b36e924151c7b163e25e805beb08976e9da6a55
|
[] |
no_license
|
bogdanova1/python_training_mantis
|
3e01a61d0a23b5e74479c4a28851053228b31483
|
508a71a49cebf6bc7fe9d7e861480a51e9548e82
|
refs/heads/master
| 2021-03-12T14:20:47.279070
| 2020-03-22T14:16:11
| 2020-03-22T14:16:11
| 246,628,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 779
|
py
|
from model.project import Project
def test_add_project(app, db):
username = app.config["webadmin"]["username"] #administrator"
password = app.config["webadmin"]["password"] #"root"
app.session.login(username, password)
name=app.random_string("Name",10)
id = db.get_id_project_by_name(name)
if id is not None:
app.project.delete_project_by_id(id[0])
project = Project(name=name)
old_projects = app.soap.get_project_list_from_soap(username, password) #db.get_project_list()
app.project.create(project)
new_projects = app.soap.get_project_list_from_soap(username, password) #db.get_project_list()
old_projects.append(project)
assert sorted(old_projects, key=Project.id_or_max) == sorted(new_projects, key=Project.id_or_max)
|
[
"Natalia.Bogdanova@firstlinesoftware.com"
] |
Natalia.Bogdanova@firstlinesoftware.com
|
c0f4f9ad2aea75b19e5d011df5a178bc5d9e350d
|
79dab81736be472e94485ddc7c4a21c0aae07c07
|
/Backend/minProject/blog/serializers.py
|
39166b517bc9c83da3afb3756f6da7b45c3168b1
|
[] |
no_license
|
Pjambhale01/LibraryManagementSystem
|
9076cdaa447cf1fde9b15918329ba9c4cd05cc2b
|
1a42b7b3f09b4d8f0e509a66412b60a7a9517e2c
|
refs/heads/master
| 2023-08-22T04:19:32.968782
| 2021-10-29T19:16:40
| 2021-10-29T19:16:40
| 422,687,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
from django.contrib.auth import models
from django.db.models import fields
from rest_framework import serializers
from django.contrib.auth.models import User
from rest_framework.validators import UniqueValidator
from .models import Blogs
class UserSerializers(serializers.ModelSerializer):
username = serializers.CharField(required=True,validators=[UniqueValidator(queryset=User.objects.all())])
email = serializers.EmailField(required=True,validators=[UniqueValidator(queryset=User.objects.all())])
password = serializers.CharField(required=True, write_only=True)
class Meta:
model = User
fields = ['id','username','email','password']
# def get_first_name(self,obj):
# return obj.username.capitalize()
class BlogSerializers(serializers.ModelSerializer):
class Meta:
model = Blogs
fields = ["id","title","discription","created_at","user"]
|
[
"jamble085@gmail.com"
] |
jamble085@gmail.com
|
a9052cfede6cd3dfe1bef8ee2bdef09dd268cd4c
|
62dd63d1c0fab618575d00882122004b301d978d
|
/RTS/applyAlembicOnSelected_v03.py
|
178d7828e55c9a49d152ac5db1de6075a772a37c
|
[] |
no_license
|
benjamintomad/modo-scripts
|
4cb241ce9b7614a597581d3f6bef9e8ad55f9c36
|
a1b27eb7c3f7e5b2671ab4e8e871dea0a5835abc
|
refs/heads/master
| 2021-01-16T19:16:51.897211
| 2016-01-13T16:33:36
| 2016-01-13T16:33:36
| 21,005,240
| 0
| 0
| null | 2015-03-24T16:27:02
| 2014-06-19T15:21:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,589
|
py
|
#python
import modo
import os
import re
scene = modo.scene.current()
basepath = r'W:\RTS\Caches\tch'
endpath = r'publish\maya\pcache'
seq = scene.name.split('_')[0]
shot = seq+'_'+scene.name.split('_')[1]
versions = []
for i in scene.selected:
cacheForMesh = {}
# checks the caches on the drive
if i.type == 'locator' or i.type == 'wtdloc':
asset = i.name.split('_')[1]
folder = os.path.join(basepath,seq,shot,endpath)
for file in os.listdir(folder):
if file.endswith(".abc") and asset in file:
versions.append(int(file.replace('.abc', '').split('_')[-1].replace('v', '')))
lastversion = 'v'+str(max(versions)).zfill(3)
for file in os.listdir(folder):
if file.endswith(".abc") and asset in file and lastversion in file:
abcfile = os.path.join(folder,file)
# applies the cache
abclist = lx.eval('alembicinfo "%s" ?' % abcfile)
meshes = i.children(recursive=True, itemType="mesh")
for alembic in abclist:
shortname = alembic.replace('Deformed', "").replace('Shape', "").split('/')[-1].replace('_MSH', "")
for mesh in meshes:
if re.search(shortname, mesh.name, re.IGNORECASE) or shortname in mesh.name:
cacheForMesh[mesh] = alembic
for key, value in cacheForMesh.iteritems():
scene.select(key)
if len(key.deformers) == 0:
lx.eval('item.addDeformer ABCstreamDeform')
for mesh in meshes:
if len(mesh.deformers) == 1:
abc = mesh.deformers[0]
abc.channel('filePath').set(abcfile)
abc.channel('scale').set(0.01)
for key, value in cacheForMesh.iteritems():
if key == mesh:
abc.channel('itemPath').set(value)
|
[
"benjamin.tomad@gmail.com"
] |
benjamin.tomad@gmail.com
|
0509e5bf1e76eaa4213818e9501d2ba02ed8c58f
|
d9fa1e903ab6057b8841268a35c76fd2955b57f0
|
/no_live/utils_no_live.py
|
ad0b45a0493f69015b3f12d3f1152eb82ac41f49
|
[
"MIT"
] |
permissive
|
mepearson/Dash
|
d4c770209d231218c9cc6a9d245718133fe8c21e
|
30257d8c6298bee32105f46207bf3dbfbda178c3
|
refs/heads/master
| 2020-07-20T06:52:15.512682
| 2020-04-06T19:26:42
| 2020-04-06T19:26:42
| 206,593,624
| 0
| 1
|
MIT
| 2019-09-10T13:20:02
| 2019-09-05T15:12:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,544
|
py
|
from urllib.parse import urlparse, parse_qs
import os
from sqlalchemy import create_engine
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import dash_table as dt
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
##New Libraries
from sqlalchemy import create_engine
def parse_search(search, key):
query = urlparse(search).query
query_dict = parse_qs(query)
if key in query_dict:
print("returing {}".format(query_dict[key][0]))
return query_dict[key][0]
return None
DATABASES = {
'production':{
'NAME': os.environ['DATABASE_NAME'],
'USER': os.environ['DATABASE_USER'],
'PASSWORD': os.environ['DATABASE_PASSWORD'],
'HOST': os.environ['DATABASE_HOSTNAME'],
'PORT': 5432,
},
}
# DATABASES = {
# 'production':{
# 'NAME': 'publicingestion',
# 'USER': 'testing',
# 'PASSWORD': '',
# 'HOST': 'aws1.mint.isi.edu',
# 'PORT': 5432,
# },
# }
# choose the database to use
db = DATABASES['production']
# construct an engine connection string
engine_string = "postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}".format(
user = db['USER'],
password = db['PASSWORD'],
host = db['HOST'],
port = db['PORT'],
database = db['NAME'],
)
con = create_engine(engine_string)
|
[
"maxiosorio@gmail.com"
] |
maxiosorio@gmail.com
|
57b4f9c19b39642979779df10ecd081cde515fd3
|
edd75f46663df54ad9f73ea69119ef1717f40497
|
/Tuples.py
|
81e2bed0c931f67c1ade7f89bdc02f81826a16cf
|
[] |
no_license
|
Titchy15/HackerRankPython
|
14de49ed632e0e81d23d56ba057d462a9c91612b
|
629ddfe8e68a1774dc67b79685311eafda425744
|
refs/heads/master
| 2022-10-26T22:16:20.653317
| 2020-06-18T07:36:56
| 2020-06-18T07:36:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
if __name__ == '__main__':
n = int(input())
num = input().split()
print(hash(tuple(int(i) for i in num)))
|
[
"noreply@github.com"
] |
Titchy15.noreply@github.com
|
7dc02c16f177c0e093640dd68dd9b794b75d7b47
|
35a0480f0562a966daca155ac36b481fc2b6396f
|
/venv/Lib/site-packages/pipenv/patched/notpip/_vendor/urllib3/util/ssl_.py
|
ef320b35ac44f22a9cff36488e2b18337bf6ddc8
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"Apache-2.0"
] |
permissive
|
ryantan-sk/mudkip-discordbot
|
14cae2cdf8496bc610541f81b82def353ab8f7b3
|
2b7e9fd238636ec624996f6c54293e54795e26b4
|
refs/heads/master
| 2020-08-29T05:19:48.597640
| 2019-11-19T15:14:01
| 2019-11-19T15:14:01
| 217,940,688
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,055
|
py
|
from __future__ import absolute_import
import errno
import warnings
import hmac
import socket
from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
from ..packages import six
SSLContext = None
HAS_SNI = False
IS_PYOPENSSL = False
IS_SECURETRANSPORT = False
# Maps the length of a digest to a possible hash function producing this digest
HASHFUNC_MAP = {
32: md5,
40: sha1,
64: sha256,
}
def _const_compare_digest_backport(a, b):
"""
Compare two digests of equal length in constant time.
The digests must be of type str/bytes.
Returns True if the digests match, and False otherwise.
"""
result = abs(len(a) - len(b))
for l, r in zip(bytearray(a), bytearray(b)):
result |= l ^ r
return result == 0
_const_compare_digest = getattr(hmac, 'compare_digest',
_const_compare_digest_backport)
try: # Test for SSL features
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
# Python 2.7 and earlier didn't have inet_pton on non-Linux
# so we fallback on inet_aton in those cases. This means that
# we can only detect IPv4 addresses in this case.
if hasattr(socket, 'inet_pton'):
inet_pton = socket.inet_pton
else:
# Maybe we can use ipaddress if the user has urllib3[secure]?
try:
from pipenv.patched.notpip._vendor import ipaddress
def inet_pton(_, host):
if isinstance(host, six.binary_type):
host = host.decode('ascii')
return ipaddress.ip_address(host)
except ImportError: # Platform-specific: Non-Linux
def inet_pton(_, host):
return socket.inet_aton(host)
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The base intent is:
# - Prefer TLS 1.3 cipher suites
# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
# security,
# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_CIPHERS = ':'.join([
'TLS13-AES-256-GCM-SHA384',
'TLS13-CHACHA20-POLY1305-SHA256',
'TLS13-AES-128-GCM-SHA256',
'ECDH+AESGCM',
'ECDH+CHACHA20',
'DH+AESGCM',
'DH+CHACHA20',
'ECDH+AES256',
'DH+AES256',
'ECDH+AES128',
'DH+AES',
'RSA+AESGCM',
'RSA+AES',
'!aNULL',
'!eNULL',
'!MD5',
])
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
import sys
class SSLContext(object): # Platform-specific: Python 2 & 3.1
supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
(3, 2) <= sys.version_info)
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, cafile=None, capath=None):
self.ca_certs = cafile
if capath is not None:
raise SSLError("CA directories not supported in older Pythons")
def set_ciphers(self, cipher_suite):
if not self.supports_set_ciphers:
raise TypeError(
'Your version of Python does not support setting '
'a custom cipher suite. Please upgrade to Python '
'2.7, 3.2, or later if you need this functionality.'
)
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None, server_side=False):
warnings.warn(
'A true SSLContext object is not available. This prevents '
'urllib3 from configuring SSL appropriately and may cause '
'certain SSL connections to fail. You can upgrade to a newer '
'version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings',
InsecurePlatformWarning
)
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
'ca_certs': self.ca_certs,
'cert_reqs': self.verify_mode,
'ssl_version': self.protocol,
'server_side': server_side,
}
if self.supports_set_ciphers: # Platform-specific: Python 2.7+
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
else: # Platform-specific: Python 2.6
return wrap_socket(socket, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
fingerprint = fingerprint.replace(':', '').lower()
digest_length = len(fingerprint)
hashfunc = HASHFUNC_MAP.get(digest_length)
if not hashfunc:
raise SSLError(
'Fingerprint of invalid length: {0}'.format(fingerprint))
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
cert_digest = hashfunc(cert).digest()
if not _const_compare_digest(cert_digest, fingerprint_bytes):
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(fingerprint, hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbreviation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from pipenv.patched.notpip._vendor.urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ciphers=None, ssl_context=None,
ca_cert_dir=None):
"""
All arguments except for server_hostname, ssl_context, and ca_cert_dir have
the same meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support. This is not
supported on Python 2.6 as the ssl module does not support it.
:param ca_cert_dir:
A directory containing CA certificates in multiple separate files, as
supported by OpenSSL's -CApath flag or the capath argument to
SSLContext.load_verify_locations().
"""
context = ssl_context
if context is None:
# Note: This branch of code and all the variables in it are no longer
# used by urllib3 itself. We should consider deprecating and removing
# this code.
context = create_urllib3_context(ssl_version, cert_reqs,
ciphers=ciphers)
if ca_certs or ca_cert_dir:
try:
context.load_verify_locations(ca_certs, ca_cert_dir)
except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
elif getattr(context, 'load_default_certs', None) is not None:
# try to load OS default certs; works well on Windows (require Python3.4+)
context.load_default_certs()
if certfile:
context.load_cert_chain(certfile, keyfile)
# If we detect server_hostname is an IP address then the SNI
# extension should not be used according to RFC3546 Section 3.1
# We shouldn't warn the user if SNI isn't available but we would
# not be using SNI anyways due to IP address for server_hostname.
if ((server_hostname is not None and not is_ipaddress(server_hostname))
or IS_SECURETRANSPORT):
if HAS_SNI and server_hostname is not None:
return context.wrap_socket(sock, server_hostname=server_hostname)
warnings.warn(
'An HTTPS request has been made, but the SNI (Server Name '
'Indication) extension to TLS is not available on this platform. '
'This may cause the server to present an incorrect TLS '
'certificate, which can cause validation failures. You can upgrade to '
'a newer version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings',
SNIMissingWarning
)
return context.wrap_socket(sock)
def is_ipaddress(hostname):
"""Detects whether the hostname given is an IP address.
:param str hostname: Hostname to examine.
:return: True if the hostname is an IP address, False otherwise.
"""
if six.PY3 and isinstance(hostname, six.binary_type):
# IDN A-label bytes are ASCII compatible.
hostname = hostname.decode('ascii')
families = [socket.AF_INET]
if hasattr(socket, 'AF_INET6'):
families.append(socket.AF_INET6)
for af in families:
try:
inet_pton(af, hostname)
except (socket.error, ValueError, OSError):
pass
else:
return True
return False
|
[
"32567080+Ryantansk@users.noreply.github.com"
] |
32567080+Ryantansk@users.noreply.github.com
|
17e697b542e82af40f6684793faaf8f231bddf86
|
2ab2993c8695af6ecad97428b8f703c1af4708bf
|
/pages/migrations/0004_cart.py
|
5d5794d38aea5452a783f274636df3bb7a84592a
|
[] |
no_license
|
gfdperez/LBYCPD2_Group5
|
fcaeba7bd17073448d49b92bcef306ad13f4a5ec
|
df627378e7e175c4849f8af6fd8811c4c197d947
|
refs/heads/main
| 2023-05-12T06:55:38.454682
| 2021-06-03T12:50:49
| 2021-06-03T12:50:49
| 350,992,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
# Generated by Django 3.2 on 2021-05-20 08:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0003_drink_classname'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=30)),
('item_name', models.CharField(max_length=50)),
('quantity', models.PositiveIntegerField()),
('size', models.CharField(max_length=10)),
('price', models.PositiveIntegerField()),
],
),
]
|
[
"brendon_medrano@dlsu.edu.ph"
] |
brendon_medrano@dlsu.edu.ph
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.