blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6168e486610759f5d2d1fc74d45ace0aa72be9aa
|
7b4e414b4d6823417807ad57c20a5ec6d7c4a9b7
|
/tree_serialization.py
|
025644e138c8cddc3a2bdc5887824da0fd235000
|
[] |
no_license
|
alvinkaiser/DailyInterviewQuestion
|
6effe8ad3b41fe56ba96afce767c9f791315a6ad
|
859a8d77c014181fe353a88c3e530d0aa676bc5e
|
refs/heads/master
| 2023-03-18T08:08:19.016763
| 2020-03-12T06:39:48
| 2020-03-12T06:39:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
l
def rabin_karp(s1,s2):
assert len(s1) >= len(s2)
current_hash = target_hash =0
same = True
x = 53
for i in range(len(s2)):
if same and s1[i] != s2[i]:
same = False
current_hash = current_hash * x + ord(s1[i])
target_hash = target_hash *x + ord(s2[i])
power = x**(len(s2) - 1)
for i in range(len(s2),len(s1)):
letter_to_remove,letter_to_add = s1[i - len(s2)],s1[i]
current_hash = (current_hash - power * ord(letter_to_remove)) * x + ord(letter_to_add)
if current_hash == target_hash and s1[i - len(s2) + 1:i + 1] == s2:
return i - len(s2) + 1
return -1
class Node:
def __init__(self,value):
self.value = value
self.left = self.right = None
def __repr__(selelf):
return f"Node({self.value})"
def serialize(root):
if not root:
return '#'
s = str(root.value)
s += ' ' + serialize(root.left)
s += ' ' + serialize(root.right)
return s
def deserialize(data):
def helper():
value = next(values)
if value == '#':
return None
node = Node(int(value))
node.left = helper()
node.right = helper()
return node
values = iter(data.split())
return node
if __name__ == "__main__":
n1 = Node(1)
n2 = Node(2)
n3 = Node(3)
n4 = Node(4)
n5 = Node(5)
n7 = Node(7)
n1.left = n3
n1.right = n4
n3.left = n2
n3.right = n5
n4.right = n7
print(serialize(n1))
|
[
"ctabatab@gmail.com"
] |
ctabatab@gmail.com
|
4635f911b8a061b79945d181271ee84668e3a7f8
|
d56f4fe901208960ea7328dc63ba455f7664d0a9
|
/top/routes.py
|
3bb02280218e2d6e79843fc78b8c5fc1a10bfd88
|
[] |
no_license
|
bivshaya/tops
|
53e4a77207329f8dd51b001fc5b1c85b8393bc04
|
7dd18c954e36403949736eedea1bb486bdb1e287
|
refs/heads/master
| 2022-12-18T07:38:49.669155
| 2019-09-13T11:07:57
| 2019-09-13T11:07:57
| 208,220,011
| 0
| 0
| null | 2022-12-08T06:09:32
| 2019-09-13T07:52:20
|
CSS
|
UTF-8
|
Python
| false
| false
| 277
|
py
|
from flask import render_template, Markup, request, jsonify
from top.utils import log_me
from top import offers
from top import top
@log_me
@top.route('/')
def index():
products = offers.read_offers()
return render_template("app-top.html", product_dict=products)
|
[
"oleksii.mits@gmail.com"
] |
oleksii.mits@gmail.com
|
adb2f67e9c44084ad1f9ff53b27bcf8bb14e13b6
|
8abcbaf205a707382facbbaa6633943a11dec997
|
/binary_tree/inorderTraversal.py
|
f4f7cd36b5d29523be9df8220dbbfd4f49cd3129
|
[] |
no_license
|
sepulworld/leet
|
09b1ba378452cd53ee86a58179bd61a2b4bbbdbd
|
4801a4f14181e956c0698b3bc8f06d662cba89a0
|
refs/heads/master
| 2020-04-26T07:36:53.716238
| 2019-04-09T22:16:20
| 2019-04-09T22:16:20
| 173,398,608
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def __init__(self):
self.output = []
def inorderTraversal(self, root: TreeNode) -> List[int]:
if root:
self.inorderTraversal(root.left)
self.output.append(root.val)
self.inorderTraversal(root.right)
return self.output
|
[
"zane@ugh.cloud"
] |
zane@ugh.cloud
|
c0b3cf431a445b2bc0eeb4c15daff6cc571037d5
|
cc9a653f937d5f49f1c21088e84bb11a7bd2be03
|
/exoensewebsite/urls.py
|
71dfee63473064ad9d2ea93aa00479309c8b6c2a
|
[] |
no_license
|
Mahmoud-m-bahget/expenses-website
|
20cd9108fabf3b5ce0f552f63ccb2ed112b74410
|
f534592a821fa6dfe167f283a734a7c743d07a45
|
refs/heads/main
| 2023-02-20T17:07:47.018322
| 2021-01-26T15:26:07
| 2021-01-26T15:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
"""exoensewebsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path ,include
urlpatterns = [
path('',include('expenses.urls')),
path('authentication/',include('authentication.urls')),
path('preferences/',include('userpreferences.urls')),
path('income/',include('userincome.urls')),
path('admin/', admin.site.urls),
]
|
[
"69098676+Mahmoud-m-bahget@users.noreply.github.com"
] |
69098676+Mahmoud-m-bahget@users.noreply.github.com
|
1c66ee7590418088986353d5bd24aba38b3ae6a5
|
239cb750a0d5fe2bdd0907a2933fc2e6f0894db4
|
/sightseq/__init__.py
|
3c87b075599a3f81c83ef774aa7e497e3cde2024
|
[
"Apache-2.0"
] |
permissive
|
qiongxiao/image-captioning
|
b2ff9d18983c5983b68bdd10961dc79a8cb76cba
|
16a4884ef816245577d106b82b0cbe785fd083a4
|
refs/heads/master
| 2020-06-19T23:38:53.360549
| 2019-07-14T13:08:41
| 2019-07-14T13:08:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
# Copyright (c) 2019-present, Zhiqiang Wang.
# All rights reserved.
import sightseq.criterions
import sightseq.models
import sightseq.modules
import sightseq.tasks
import sightseq.data
|
[
"zhiqwang@outlook.com"
] |
zhiqwang@outlook.com
|
759650dfdc4173a40d5c61b65fc2a0492f304817
|
1ee6f5aed273f0ef7a96fa42fc52e65b32ccaf38
|
/shop/views/category_views.py
|
279d77596d0688b279c5a39a8bb24d29387bf100
|
[] |
no_license
|
ianastewart/guestandgray
|
e9892711ff1e09a4a94b9889027ab00606d525a9
|
337d8d017f1f2fece4a641ed415b9837cf278b47
|
refs/heads/master
| 2023-08-31T02:53:21.623469
| 2023-08-30T10:08:10
| 2023-08-30T10:08:10
| 202,700,063
| 0
| 0
| null | 2021-01-07T23:08:56
| 2019-08-16T09:32:24
|
Python
|
UTF-8
|
Python
| false
| false
| 5,686
|
py
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import JsonResponse
from django.shortcuts import redirect, reverse
from django.urls import reverse_lazy
from django.views.generic import CreateView, DetailView, TemplateView, UpdateView, View
from django.templatetags.static import static
from shop.cat_tree import tree_json, tree_move
from shop.forms import CategoryForm
from shop.models import Category, Item
from shop.tables import CategoryTable
from table_manager.views import FilteredTableView
from table_manager.mixins import StackMixin
class CategoryCreateView(LoginRequiredMixin, StackMixin, CreateView):
model = Category
form_class = CategoryForm
template_name = "shop/category_form.html"
title = "Create category"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["images_url"] = reverse("category_images")
return context
def form_valid(self, form):
d = form.cleaned_data
parent = Category.objects.get(id=d["parent_category"])
node = parent.add_child(name=d["name"], description=d["description"])
node.post_save()
ref = d.get("category_ref", None)
if ref:
item = Item.objects.get(ref=ref)
node.image = item.image
ref = d.get("archive_ref", None)
if ref:
item = Item.objects.get(ref=ref)
node.archive_image = item.image
node.save()
return redirect(self.get_success_url())
class CategoryUpdateView(LoginRequiredMixin, StackMixin, UpdateView):
model = Category
form_class = CategoryForm
template_name = "shop/category_form.html"
title = "Edit category"
def get_initial(self):
initial = super().get_initial()
cat = self.object
self.parent = cat.get_parent()
if self.parent:
initial["parent_category"] = self.parent.pk
if cat.image:
initial["category_ref"] = cat.image.item.ref
if cat.archive_image:
initial["archive_ref"] = cat.archive_image.item.ref
return initial
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["images_url"] = reverse("category_images")
context["is_root"] = self.parent is None
return context
def form_valid(self, form):
d = form.cleaned_data
ref = d.get("category_ref", None)
if ref:
item = Item.objects.get(ref=ref)
self.object.image = item.image
ref = d.get("archive_ref", None)
if ref:
item = Item.objects.get(ref=ref)
self.object.archive_image = item.image
old_parent = self.object.get_parent()
new_parent = Category.objects.get(id=d["parent_category"])
response = super().form_valid(form)
if old_parent and old_parent.id != new_parent.id:
self.object.move(new_parent, "sorted-child")
new_parent.post_save()
return response
class CategoryTreeView(LoginRequiredMixin, StackMixin, TemplateView):
template_name = "shop/category_tree.html"
def get(self, request):
if request.META.get("HTTP_X_REQUESTED_WITH") == "XMLHttpRequest":
return JsonResponse(tree_json(), safe=False)
else:
self.clear_stack(request)
return super().get(request)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
a, b, c, d, e = Category.find_problems()
context["errors"] = a or b or c or d or e
return context
def post(self, request):
if "fix" in request.POST:
Category.fix_tree()
return redirect("category_tree")
# Ajax response to move node
p = request.POST
tree_move(p["node"], p["target"], p["previous"], p["position"] == "inside")
return JsonResponse("OK", safe=False)
class CategoryListView(LoginRequiredMixin, FilteredTableView):
model = Category
table_class = CategoryTable
table_pagination = {"per_page": 100}
heading = "Categories"
def get_queryset(self):
root = Category.objects.get(name="Catalogue")
return root.get_descendants().order_by("name")
class CategoryDetailView(LoginRequiredMixin, StackMixin, DetailView):
model = Category
template_name = "shop/category_detail.html"
context_object_name = "category"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["shop_items"] = self.object.shop_items()
context["archive_items"] = self.object.archive_items()
context["images_url"] = reverse("category_images")
return context
def post(self, request, **kwargs):
if "return" in request.POST:
pass
elif "delete" in request.POST:
self.get_object().delete()
return redirect(self.get_success_url())
class CategoryImagesView(View):
def get(self, request, *args, **kwargs):
ref = request.GET.get("ref", None).strip()
target = request.GET.get("target", None)
data = {}
if ref and target:
try:
item = Item.objects.get(ref=ref)
if item.image is not None:
data["image"] = item.image.file.url
else:
data["error"] = f"Item {ref} has no image"
except Item.DoesNotExist:
data["error"] = f"There is no item with reference {ref}"
else:
data["image"] = static("/shop/images/no_image.png")
return JsonResponse(data)
|
[
"is@ktconsultants.co.uk"
] |
is@ktconsultants.co.uk
|
7b2c2de23fb780350f4b5685fefa6d4dcf750f2d
|
9b3edf55beca0176fac208efdf2e2bb2ef8f9600
|
/libcloudforensics/providers/aws/internal/account.py
|
cbd4461653afbd001926391883a68686dcfe8edf
|
[
"Apache-2.0"
] |
permissive
|
aarontp/cloud-forensics-utils
|
cc81530dcb1f32b906cf53c911f8c49b46d17fa4
|
efc65b526bb551595353f0012d46381792a3684e
|
refs/heads/master
| 2023-01-16T01:18:33.503447
| 2020-10-29T16:34:35
| 2020-10-29T16:34:35
| 275,072,971
| 1
| 0
|
Apache-2.0
| 2020-06-26T04:27:36
| 2020-06-26T04:27:35
| null |
UTF-8
|
Python
| false
| false
| 6,019
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for incident response operations on AWS EC2.
Library to make forensic images of Amazon Elastic Block Store devices and create
analysis virtual machine to be used in incident response.
"""
from typing import Optional, TYPE_CHECKING
import boto3
from libcloudforensics.providers.aws.internal import ec2, ebs, kms
if TYPE_CHECKING:
import botocore
class AWSAccount:
"""Class representing an AWS account.
Attributes:
default_availability_zone (str): Default zone within the region to create
new resources in.
default_region (str): The default region to create new resources in.
aws_profile (str): The AWS profile defined in the AWS
credentials file to use.
session (boto3.session.Session): A boto3 session object.
_ec2 (AWSEC2): An AWS EC2 client object.
_ebs (AWSEBS): An AWS EBS client object.
_kms (AWSKMS): An AWS KMS client object.
"""
def __init__(self,
default_availability_zone: str,
aws_profile: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None) -> None:
"""Initialize the AWS account.
Args:
default_availability_zone (str): Default zone within the region to create
new resources in.
aws_profile (str): Optional. The AWS profile defined in the AWS
credentials file to use.
aws_access_key_id (str): Optional. If provided together with
aws_secret_access_key and aws_session_token, authenticate to AWS
using these parameters instead of the credential file.
aws_secret_access_key (str): Optional. If provided together with
aws_access_key_id and aws_session_token, authenticate to AWS
using these parameters instead of the credential file.
aws_session_token (str): Optional. If provided together with
aws_access_key_id and aws_secret_access_key, authenticate to AWS
using these parameters instead of the credential file.
"""
self.aws_profile = aws_profile or 'default'
self.default_availability_zone = default_availability_zone
# The region is given by the zone minus the last letter
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#using-regions-availability-zones-describe # pylint: disable=line-too-long
self.default_region = self.default_availability_zone[:-1]
if aws_access_key_id and aws_secret_access_key and aws_session_token:
self.session = boto3.session.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token)
else:
self.session = boto3.session.Session(profile_name=self.aws_profile)
self._ec2 = None # type: Optional[ec2.EC2]
self._ebs = None # type: Optional[ebs.EBS]
self._kms = None # type: Optional[kms.KMS]
@property
def ec2(self) -> ec2.EC2:
"""Get an AWS ec2 object for the account.
Returns:
AWSEC2: Object that represents AWS EC2 services.
"""
if self._ec2:
return self._ec2
self._ec2 = ec2.EC2(self)
return self._ec2
@property
def ebs(self) -> ebs.EBS:
"""Get an AWS ebs object for the account.
Returns:
AWSEBS: Object that represents AWS EBS services.
"""
if self._ebs:
return self._ebs
self._ebs = ebs.EBS(self)
return self._ebs
@property
def kms(self) -> kms.KMS:
"""Get an AWS kms object for the account.
Returns:
AWSKMS: Object that represents AWS KMS services.
"""
if self._kms:
return self._kms
self._kms = kms.KMS(self)
return self._kms
def ClientApi(self,
service: str,
region: Optional[str] = None) -> 'botocore.client.EC2': # pylint: disable=no-member
"""Create an AWS client object.
Args:
service (str): The AWS service to use.
region (str): Optional. The region in which to create new resources. If
none provided, the default_region associated to the AWSAccount
object will be used.
Returns:
botocore.client.EC2: An AWS EC2 client object.
"""
if region:
return self.session.client(service_name=service, region_name=region)
return self.session.client(
service_name=service, region_name=self.default_region)
def ResourceApi(self,
service: str,
# The return type doesn't exist until Runtime, therefore we
# need to ignore the type hint
# pylint: disable=line-too-long
region: Optional[str] = None) -> 'boto3.resources.factory.ec2.ServiceResource': # type: ignore
# pylint: enable=line-too-long
"""Create an AWS resource object.
Args:
service (str): The AWS service to use.
region (str): Optional. The region in which to create new resources. If
none provided, the default_region associated to the AWSAccount
object will be used.
Returns:
boto3.resources.factory.ec2.ServiceResource: An AWS EC2 resource object.
"""
if region:
return self.session.resource(service_name=service, region_name=region)
return self.session.resource(
service_name=service, region_name=self.default_region)
|
[
"noreply@github.com"
] |
aarontp.noreply@github.com
|
2f01d7850de360df662f532c1ecb1c04d04f2bcb
|
3cef0aae15f155239a1bde7de9ada79e90d4d6a1
|
/keygen-practice/crack.py
|
89e4d7b72db6f68ad13fa40881d90c5a5026603e
|
[] |
no_license
|
neelpatel05/crackmes
|
edb3a8954df8892b21f5487bf2551adf4ca02b80
|
dc0fe1233b1d02060f7960f6406d41fae5a8db53
|
refs/heads/master
| 2022-12-05T13:12:08.901471
| 2020-08-18T19:29:19
| 2020-08-18T19:29:19
| 276,696,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
final = 52650
divisible = []
for i in range(65, 90):
if final % i == 0:
divisible.append(i)
quotient = []
for i in divisible:
x = final / i
quotient.append(x)
print(quotient)
for i,j in zip(divisible, quotient):
print(chr(i)*j)
|
[
"neel.patel6573@gmail.com"
] |
neel.patel6573@gmail.com
|
0a8b93c86f1f59ac957d675eef30b726dc06c777
|
52a4d869976a97498bdf56a8d0ff92cac138a136
|
/Algorithmic Heights/rosalind_3_degarray.py
|
1ed82de8791f3769afe522fe22c1bee1abb2a87e
|
[] |
no_license
|
aakibinesar/Rosalind
|
d726369a787d848cc378976b886189978a60a3a5
|
375bbdbfb16bf11b2f980701bbd0ba74a1605cdb
|
refs/heads/master
| 2022-08-18T09:36:00.941080
| 2020-05-24T18:49:38
| 2020-05-24T18:49:38
| 264,722,651
| 0
| 0
| null | 2020-05-17T17:51:03
| 2020-05-17T17:40:59
| null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
file = open('rosalind_deg.txt','r').readlines()
vertices, edges = (int(val) for val in file[0].split())
my_data = [[int(val) for val in line.split()] for line in file[1:]]
count = 0
L = []
for k in range(1,vertices+1):
count = 0
for i in range(2):
for j in range(0,edges):
if my_data[j][i] == k:
count+=1
L.append(count)
print(' '.join(str(num) for num in L))
|
[
"noreply@github.com"
] |
aakibinesar.noreply@github.com
|
6be4192f3fc52227e191ab2489000cf461ddea87
|
d43a182f9fa65dae58f87b0a9aae2f831c587b6e
|
/mysite/polls/admin.py
|
dc8f548a0a106b264d07391b283eae6f50408327
|
[] |
no_license
|
ahmedosamataha/first_steps_in_django
|
946058f1fa03a1fcd3146ca1e8b47fa48615b032
|
54f97ef4cef46f4a09cb205c262cf6df39821f8a
|
refs/heads/master
| 2020-03-30T11:19:05.249736
| 2018-10-01T22:10:14
| 2018-10-01T22:10:14
| 151,167,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
from django.contrib import admin
from .models import Question , Choice
# Register your models here.
admin.site.register(Question)
admin.site.register(Choice)
|
[
"ahmedosama.t123@gmail.com"
] |
ahmedosama.t123@gmail.com
|
3e677c83fd12cc5c2661147aa8b3dca9d0b689e4
|
15c4278a1a70ad3c842b72cba344f96fca43f991
|
/newpro/newapp/admin.py
|
37dac0ac21c8bfb1c6e0d008a060f3977faa28a0
|
[] |
no_license
|
nivyashri05/Task1
|
d9914cf5bb8947ef00e54f77480c6f5f375c76ad
|
9e9b03961eb1144d1b1a936159082ad80d32ce31
|
refs/heads/master
| 2023-01-06T01:04:17.321503
| 2020-11-10T15:31:02
| 2020-11-10T15:31:02
| 311,691,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from newapp.models import User
class UserAdmin(BaseUserAdmin):
list_display = ('email','username','phone','is_admin','is_staff','timestamp')
search_fields = ('email','username',)
readonly_fields=('date_joined', 'last_login')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
admin.site.register(User, BaseUserAdmin)
|
[
"nivyainventateq@gmail.com"
] |
nivyainventateq@gmail.com
|
4ff946b307a86955672b905e0806efb85572c652
|
198f759dc334df0431cbc25ed4243e86b93571eb
|
/drop/wsgi.py
|
0dbf4b2caf4a9cd90fadce8e2d1d88950fcb3cfe
|
[] |
no_license
|
miladhzz/django-muliple-db
|
ec2074b14dd67a547c982f20b2586f435e7e0d6c
|
56ff2555e498d9105cad215daf4c3d4da59d7d9a
|
refs/heads/master
| 2022-12-25T08:08:05.761226
| 2020-10-06T06:38:30
| 2020-10-06T06:38:30
| 301,636,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
"""
WSGI config for drop project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'drop.settings')
application = get_wsgi_application()
|
[
"miladhzz@gmail.com"
] |
miladhzz@gmail.com
|
21ae88cb38d2277d10ef58534ab938812f72fd97
|
a0b857f7cd610ae077138dbc69b3abf7b08e9e31
|
/api/models/permission.py
|
64872b23374aa99bdd07660cd9a671946d8163c0
|
[] |
no_license
|
CalvinHuynh/project-olympic
|
3122f6b9d9cb1532494bb2aa5443337efac8f519
|
f73de5dd356b680ee8efe1d1914266d5523084d2
|
refs/heads/master
| 2022-12-13T09:03:14.881268
| 2020-02-06T14:05:04
| 2020-02-06T14:05:04
| 207,567,536
| 0
| 0
| null | 2022-12-08T06:44:16
| 2019-09-10T13:30:27
|
Python
|
UTF-8
|
Python
| false
| false
| 242
|
py
|
# from peewee import CharField, DateTimeField, PrimaryKeyField
# from .base import Base
# class Permission(Base):
# id = PrimaryKeyField()
# permission_name = CharField(unique=True, null=False)
# created_date = DateTimeField()
|
[
"huynhck001@gmail.com"
] |
huynhck001@gmail.com
|
427feb09933b065ae020efa261753f3a2df525d7
|
be3f9044ca524bd507bf8f38a5753f5a72d8f086
|
/12-computing-simple-interest/computing-simple-interest.py
|
9f2c83d072816b4dd6e8aa4209e21af9d270c516
|
[] |
no_license
|
ville6000/EFP-Python
|
41a5ccf307ff0b96d60b56e2814489503dd8806b
|
0076222203f4a09afe9f7954905b36355171dccb
|
refs/heads/master
| 2021-04-15T06:38:12.982733
| 2018-09-24T07:07:26
| 2018-09-24T07:07:26
| 126,884,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
import locale
locale.setlocale( locale.LC_ALL, 'en_CA.UTF-8' )
def calculate_simple_interest(principal, rate_of_interest, years):
return int(principal) * (1 + (float(rate_of_interest) / 100) * int(years))
principal = input('Enter the principal: ')
rate_of_interest = input('Enter the rate of interest: ')
years = input('Enter the number of years: ')
worth = calculate_simple_interest(principal, rate_of_interest, years)
print(locale.currency(worth))
|
[
"ville.viklund@morgan.fi"
] |
ville.viklund@morgan.fi
|
17122f65e0d8729a00a8bd125c9cd4e3087399da
|
53cb878e54f08d8cf59118e313f773fd99a690fc
|
/FirstWindow.py
|
415be3585b73c72f4af53b0ab528eee47290eb53
|
[] |
no_license
|
Dudoserovich/Coursework-1
|
17340f08a68e9072f1b7962195674a04d843a533
|
f41225494ddfeb0e52ff60f79ada9ba8bd63e390
|
refs/heads/main
| 2023-07-28T03:30:32.417528
| 2021-09-14T06:56:11
| 2021-09-14T06:56:11
| 406,258,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,717
|
py
|
import BaseCreateWindow as BaseWin
import tkinter.messagebox as mb
import tkinter as tk
import os
import random
from tkinter import filedialog, DISABLED, NORMAL, LEFT, BOTH
import ExWindow as ExWin
class WorkFile:
def _fill_up(self, file, count_num: int, min: int, max: int) -> None:
new_max = int(max / count_num)
rand = random.randint(min, new_max)
file.write(str(rand) + ' ')
for i in range(count_num - 1):
new_max += int(count_num)
rand = random.randint((new_max - int(count_num)), new_max)
if i != count_num - 2:
file.write(str(rand) + ' ')
else:
file.write(str(rand))
def _fill_down(self, file, count_num: int, max: int) -> None:
new_max = max - int(count_num)
new_min = new_max - int(count_num)
rand = random.randint(new_min, new_max)
file.write(str(rand) + ' ')
for i in range(count_num - 1):
new_max -= int(count_num) // 100000
new_min -= int(count_num) // 100000
rand = random.randint(new_min, new_max)
if i != count_num - 2:
file.write(str(rand) + ' ')
else:
file.write(str(rand))
def _fill_file(self, file_extension: str, file) -> None:
min = 1
max = 50000000
check = False
if file_extension == ".up":
self._fill_up(file, int(self.spin.get()), min, max)
elif file_extension == ".down":
self._fill_down(file, int(self.spin.get()), max)
elif file_extension == ".rand": # случайная последовательность
for i in range(int(self.spin.get())):
file.write(str(random.randint(min, max)) + ' ')
file.truncate(file.tell() - 1)
elif file_extension == ".sim": # одно и то же число
num = random.randint(min, max)
for i in range(int(self.spin.get()) - 1):
file.write(str(num) + ' ')
file.write(str(num))
elif file_extension == ".up_down": # убывающие и возрастающие подпоследовательности
# print("up_down")
num_up = int(self.spin1.get())
num_down = int(self.spin2.get())
countUpDown = num_up + num_down
count = int(int(self.spin.get()) / countUpDown) # общее количество эл-ов в одной подпоследовательности
if int(self.spin.get()) % countUpDown != 0:
check = True
if num_up != 0:
self._fill_up(file, count + int(self.spin.get()) % countUpDown, min, max)
num_up -= 1
if num_up != 0 & num_down != 0:
file.write(' ')
elif num_down != 0:
self._fill_down(file, count + int(self.spin.get()) % countUpDown, min)
num_down -= 1
if num_up != 0 & num_down != 0:
file.write(' ')
while num_up != 0 & num_down != 0:
num_up -= 1
file.write(' ')
self._fill_down(file, count, max)
num_down -= 1
file.write(' ')
while num_up != 0:
if check:
file.write(' ')
check = True
self._fill_up(file, count, min, max)
num_up -= 1
while num_down != 0:
if int(self.spin.get()) % countUpDown != 0 or (not check):
file.write(' ')
self._fill_down(file, count, max)
num_down -= 1
file.write(' ')
def _create_file(self, name: str, file_extension: str) -> None:
directory = r'C:/MyPrograms/Kursach/generation_files'
files = os.listdir(directory)
this = name + file_extension
count = 1
while this in files:
count += 1
this = name + file_extension + '(' + str(count - 1) + ')'
if count == 1:
file = open("./generation_files/" + name + file_extension, "w")
else:
file = open("./generation_files/" + name + file_extension + '(' + str(count - 1) + ')', "w")
# сама генерация в файле
self._fill_file(file_extension, file)
file.close()
# класс начального окна
class FirstWindow(BaseWin.BaseCreateWindow, WorkFile):
def __init__(self):
super().__init__()
self.iconbitmap(r'./icon.ico')
# количество созданных файлов
self.count = 0
self.check_gen = False
self.title("Экспериментальное исследование сортировок Merge sort")
self.geometry('500x400')
# создание заголовка
self._create_label("Параметры генерации", 0, 0, 4, 2, "w", "Arial Bold", 14, self)
self._create_label("Кол-во элементов последовательности:", 0, 1, 12, 2, "w", "Arial Bold", 10, self)
self.spin = tk.Spinbox(self, from_=2, to=10000000, width=10)
self.spin.grid(column=0, row=2, padx=16, pady=5, sticky="w")
self._create_label("Укажите тип последовательности:", 0, 3, 12, 2, "w", "Arial Bold", 10, self)
chk_state1 = tk.BooleanVar()
self._create_check_button(chk_state1, 'Отсортированная по возрастанию', 0, 5, 12, 2, "w")
chk_state2 = tk.BooleanVar()
self._create_check_button(chk_state2, 'Отсортированная по убыванию', 0, 5, 12, 2, "e")
chk_state3 = tk.BooleanVar()
self._create_check_button(chk_state3, 'Случайная', 0, 6, 12, 2, "w")
chk_state4 = tk.BooleanVar()
self._create_check_button(chk_state4, 'С многократноповторяющимся \nодним элементом', 0, 6, 7, 2, "e")
# canvas = tk.Canvas(self)
# canvas.create_line(16, 25, 400, 25)
# canvas.grid(column=0, row=7, padx=0, pady=0, columnspan=3, rowspan=5, sticky="n")
chk_state5 = tk.BooleanVar()
chk_state5.set(False)
frame3 = tk.Frame(master=self,
relief=tk.SUNKEN,
borderwidth=1)
frame3.grid(column=0, row=8, padx=5, pady=5)
self.chk = tk.Checkbutton(master=frame3, text='Состоящая из k возрастающих \n'
'и j убывающих подпоследовательностей', var=chk_state5, state=DISABLED, justify=LEFT)
self.chk.grid(column=0, row=8, padx=12, pady=2, sticky="w")
self._create_label("Количество возрастающих \nподпоследовательностей:", 0, 9, 12, 2, "w", "Arial Regular", 10, frame3)
self.spin1 = tk.Spinbox(master=frame3, from_=0, to=5000000, width=10)
self.spin1['state'] = tk.DISABLED
self.spin1.grid(column=0, row=10, padx=16, pady=5, sticky="wn")
self._create_label("Количество убывающих \nподпоследовательностей:", 1, 9, 12, 2, "w", "Arial Regular", 10, frame3)
self.spin2 = tk.Spinbox(master=frame3, from_=0, to=5000000, width=10)
self.spin2['state'] = tk.DISABLED
self.spin2.grid(column=1, row=10, padx=16, pady=5, sticky="wn")
# массив с состояниеми чекбоксов
self.chk_arr = [chk_state1, chk_state2, chk_state3, chk_state4, chk_state5]
frame4 = tk.Frame(master=self,
relief=tk.FLAT,
borderwidth=1)
frame4.grid(column=0, row=11, padx=5, pady=5)
self._create_button(frame4, "Сгенерировать \nпоследовательность", "blue",
"white", self.__click_generation, 0, 11, 0, 5, "ne", 2, 0, 1)
self._create_button(frame4, "Эксперимент", "green", "white", self.__click_ex, 1, 11, 10, 5, "nw")
def __generation(self, num: int) -> None:
if num == 0:
self._create_file(self.spin.get(), ".up")
elif num == 1:
self._create_file(self.spin.get(), ".down")
elif num == 2:
self._create_file(self.spin.get(), ".rand")
elif num == 3:
self._create_file(self.spin.get(), ".sim")
elif num == 4:
self._create_file(self.spin.get() + '_' + self.spin1.get() + '_' + self.spin2.get(), ".up_down")
def __click_generation(self) -> None:
check = False
check1 = False
if 1 < int(self.spin.get()) <= 1000000:
# проверяем количество подпоследовательностей последнего чекбокса
for i in range(5):
if self.chk_arr[i].get():
if i != 4:
check = True
self.__generation(i) # создание файла генерации без подпоследовательностей
elif (self.chk_arr[4].get() == True) & (int(self.spin1.get()) +
int(self.spin2.get()) <= int(self.spin.get()) / 2) \
& (int(self.spin1.get()) + int(self.spin2.get()) != 0):
self.__generation(i) # создание файла генерации с подпоследовательностями
check = True
elif self.chk_arr[4].get():
check1 = True
if check1:
mb.showerror("Ошибка", "Неправильное количество подпоследовательностей у последнего чек-бокса!")
elif not check:
mb.showerror("Ошибка", "Не было выбрано ни одного чек-бокса!")
else:
mb.showinfo("Информация", "Последовательность успешно сгенерирована")
self.check_gen = True
else:
mb.showerror("Ошибка", "Неправильное количество элементов последовательности!")
def __click_ex(self) -> None:
# Выбор файлов для эксперемента
tk.Tk().withdraw()
# отображает только файлы генерации
files_name = filedialog.askopenfilenames(initialdir="C:\MyPrograms\Kursach\generation_files",
title="Выбор файлов для эксперемента",
filetypes=(("Files", "*.up *.up*) *.down *.down*) "
"*.rand *.rand*) "
"*.sim *.sim*) *.up_down *.up_down*)"),))
# переходим к новому окну с эксперементом
if files_name != "":
# self.withdraw() # скрывает окно
self.destroy() # закрытие окна
ex_window = ExWin.ExWindow(files_name)
ex_window.resizable(False, False)
ex_window.mainloop()
else:
mb.showwarning("Предупреждение", "Файлы не были выбраны")
|
[
"mr.egoryk@mail.ru"
] |
mr.egoryk@mail.ru
|
16d4ac62c0efe8567434b83a272a3035cd8c8990
|
d75371f629cf881de3c49b53533879a5b862da2e
|
/python/search-a-2d-matrix.py
|
3ce6ce1d52b91816fccec4a1e5592f5c548b2cf5
|
[] |
no_license
|
michaelrbock/leet-code
|
7352a1e56429bb03842b588ba6bda2a90315a2f4
|
070db59d4e0ded3fb168c89c3d73cb09b3c4fe86
|
refs/heads/master
| 2020-04-01T05:40:49.262575
| 2019-10-10T22:03:10
| 2019-10-10T22:03:10
| 152,914,631
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,614
|
py
|
def binary_row(rows, target):
if len(rows) == 1:
return 0, None
if len(rows) == 2:
return (1, None) if target >= rows[1] else (0, None)
lo = 0
hi = len(rows)
while lo < hi:
mid = (lo + hi) // 2
if rows[mid] == target:
return mid, True
if mid == len(rows) - 1:
return len(rows) - 1, None
if rows[mid] < target and rows[mid + 1] > target:
return mid, None
elif target > rows[mid]:
lo = mid
else:
hi = mid
return len(rows) - 1, None
def binary_search(lst, target):
if not lst:
return False
if len(lst) == 1:
return lst[0] == target
lo = 0
hi = len(lst)
while lo <= hi:
mid = (lo + hi) // 2
if lst[mid] == target:
return True
elif target > lst[mid]:
if lo == mid:
break
lo = mid
elif target < lst[mid]:
hi = mid
return False
class Solution1:
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix or not matrix[0] or matrix[0][0] > target:
return False
row, result = binary_row([row[0] for row in matrix], target)
if result is not None:
return result
return binary_search(matrix[row], target)
def _translate(index, rows, cols):
"""Returns (row, col) for overall index."""
row = index // cols
col = index % cols
return row, col
class Solution:
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix or not matrix[0]:
return False
# Strategy: binary search, but treat the matrix as if
# it was one long array. Translate overall index into
# row/col indices.
m, n = len(matrix), len(matrix[0]) # num row, num cols
start = 0 # indices as if matrix was one long list
end = m * n - 1 # incluive
while start <= end and start >= 0 and end < m * n:
mid = (start + end) // 2
row, col = _translate(mid, m, n)
if target == matrix[row][col]:
return True
elif target > matrix[row][col]:
start = mid + 1
else: # target < matrix[row][col]
end = mid - 1
return False
s = Solution()
assert not s.searchMatrix([[-10,-8,-8,-8],[-5,-4,-2,0]], 7)
assert s.searchMatrix([[1, 3, 5, 7],[10, 11, 16, 20],[23, 30, 34, 50]], 3)
assert not s.searchMatrix([[1, 3, 5, 7],[10, 11, 16, 20],[23, 30, 34, 50]], 13)
assert not s.searchMatrix([[1, 1]], 0)
assert not s.searchMatrix([[1, 1]], 2)
assert not s.searchMatrix([[-10,-8,-8,-8],[-5,-4,-2,0]], 7)
print('All tests passed!')
|
[
"mykel.bock@gmail.com"
] |
mykel.bock@gmail.com
|
d7a94e8e68011d579a02bc724c942353b0af1cc0
|
2ec1d45341efe23b85019c7596df5fbcfcdcef3e
|
/model/cnn.py
|
c36025930684f64e703a2c17b443755d7d6cb5e1
|
[] |
no_license
|
mostafaalishahi/Genetic_engineering_attribution_challenge_2020
|
9d6201e716d932a429d62ca242be5bb04dae6a6c
|
c5bc830d311f15cc1468fb308dbacd5d6678b7ce
|
refs/heads/master
| 2023-01-03T14:06:32.210148
| 2020-10-23T18:16:06
| 2020-10-23T18:16:06
| 306,711,461
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,041
|
py
|
import torch
from torch import nn
class MODEL(nn.Module):
def __init__(self, target_size, n_filters=512, rv_comp=True, metadata=True, padding_idx=0):
super().__init__()
self.rv_comp = rv_comp
self.metadata = metadata
self.filters = n_filters
self.cnn1 = nn.Conv1d(4, self.filters, kernel_size=12, stride=1)
self.maxpool = nn.AdaptiveMaxPool1d(1)
if self.rv_comp:
self.batchnorm1 = nn.BatchNorm1d(self.filters*2)
if self.metadata:
self.dense1 = nn.Linear((self.filters*2)+39, self.filters)
else:
self.dense1 = nn.Linear(self.filters*2, self.filters)
else:
self.batchnorm1 = nn.BatchNorm1d(self.filters)
if self.metadata:
self.dense1 = nn.Linear(self.filters+39, self.filters)
else:
self.dense1 = nn.Linear(self.filters, self.filters)
self.activation = nn.ReLU()
self.batchnorm2 = nn.BatchNorm1d(self.filters)
self.hidden2tag = nn.Linear(self.filters, target_size)
self.dropout = nn.Dropout(0.3)
self.inp_dropout = nn.Dropout(0.05)
def forward(self, sequence, sequence_rc, ft):
sequence = self.inp_dropout(sequence)
cnn1 = self.cnn1(sequence)
maxpool = self.maxpool(cnn1).squeeze(-1)
if self.rv_comp:
sequence_rc = self.inp_dropout(sequence_rc)
cnn1_rc = self.cnn1(sequence_rc)
maxpool_rc = self.maxpool(cnn1_rc).squeeze(-1)
bn1 = self.batchnorm1(torch.cat([maxpool, maxpool_rc], axis=-1))
else:
bn1 = self.batchnorm1(maxpool)
dp1 = self.dropout(bn1)
if self.metadata:
dense1 = self.dense1(torch.cat([dp1, ft],axis=-1))
else:
dense1 = self.dense1(dp1)
activation = self.activation(dense1)
bn2 = self.batchnorm2(activation)
dp2 = self.dropout(bn2)
tag_scores = self.hidden2tag(dp2)
return tag_scores
|
[
"mostafa.alishahi@gmail.com"
] |
mostafa.alishahi@gmail.com
|
3609259e320c1b9024140e43446e259fc8811f1b
|
c5adccfb62906c2b7ed75388f964e94111782598
|
/Python/venv/lib/python3.8/site-packages/doqu/validation/__init__.py
|
42fedd829f0054a6d382ce38b330f1c6fd1f3e16
|
[] |
no_license
|
FranciscoMaya20/MobileWebScraping
|
531e9dfc77c1d7ddd90dea5874a4c7f5316d608d
|
979d310a7751ad27b96679316494023b6d5bd1b8
|
refs/heads/master
| 2023-01-28T01:34:33.312236
| 2020-11-30T16:39:11
| 2020-11-30T16:39:11
| 317,284,048
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
# -*- coding: utf-8 -*-
#
# Doqu is a lightweight schema/query framework for document databases.
# Copyright © 2009—2010 Andrey Mikhaylenko
#
# This file is part of Doqu.
#
# Doqu is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Doqu is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Doqu. If not, see <http://gnu.org/licenses/>.
#import validators
#class ValidatedDictMixin(object):
# # TODO
# pass
|
[
"fjm1001@sru.edu"
] |
fjm1001@sru.edu
|
16573c15b3817ed9f64b13f466428536b50da9d6
|
5b4312ddc24f29538dce0444b7be81e17191c005
|
/autoware.ai/1.12.0_cuda/build/waypoint_follower/catkin_generated/generate_cached_setup.py
|
01cfc657005e5167ef4e8abd08b42b76f522be17
|
[
"MIT"
] |
permissive
|
muyangren907/autoware
|
b842f1aeb2bfe7913fb2be002ea4fc426b4e9be2
|
5ae70f0cdaf5fc70b91cd727cf5b5f90bc399d38
|
refs/heads/master
| 2020-09-22T13:08:14.237380
| 2019-12-03T07:12:49
| 2019-12-03T07:12:49
| 225,167,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,929
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_health_checker;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/amathutils_lib;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/tablet_socket_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_system_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_config_msgs;/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/install/autoware_build_flags;/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/build/waypoint_follower/devel/env.sh')
output_filename = '/home/muyangren907/autoware/autoware.ai/1.12.0_cuda/build/waypoint_follower/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"907097904@qq.com"
] |
907097904@qq.com
|
efaec29f5e6d47e75369e75f63b91d4a4bcd3b8a
|
589d36690594f8ddfc1b47bd3a3781abcff2bb6d
|
/test2.py
|
04f69123eac59477f23a64729e8b13ca222673a3
|
[] |
no_license
|
dwij2812/Robotics-makeathon
|
4275beb660dff128a6aef06dfc0025c9e980a57d
|
fd12aba84374a6320dfe4b2bced40e89d5dc2388
|
refs/heads/master
| 2023-07-25T01:27:38.379290
| 2018-06-05T08:01:00
| 2018-06-05T08:01:00
| 106,436,126
| 1
| 1
| null | 2022-03-07T08:42:14
| 2017-10-10T15:31:23
|
Java
|
UTF-8
|
Python
| false
| false
| 1,330
|
py
|
import serial
import time
import requests
import json
import re
firebase_url = 'https://iot-data-d6cb8.firebaseio.com/'
#Connect to Serial Port for communication
ser = serial.Serial('COM11', 250000, timeout=0)
#Setup a loop to send Temperature values at fixed intervals
#in seconds
fixed_interval = 60
while 1:
x = []
try:
#temperature value obtained from Arduino + DH11 Temp Sensor
sensor_c = ser.readline().decode('utf-8')
x=re.split(" ",sensor_c)
#current time and date
time_hhmmss = time.strftime('%H:%M:%S')
date_mmddyyyy = time.strftime('%d/%m/%Y')
#current location name
#print (temperature_c + ',' + time_hhmmss + ',' + date_mmddyyyy + ',' + temperature_location)
#insert record
if(len(x)==5):
data = {'date':date_mmddyyyy,'time':time_hhmmss,'Temperature':x[0],'Humidity':x[1],'HeartRate_constant':x[2],'Shock Switch':x[3],'Alert Button Press Status':x[4]}
result = requests.post(firebase_url + '/sensor.json', data=json.dumps(data))
print ('Record inserted. Result Code = ' + str(result.status_code) + ',' + result.text)
time.sleep(fixed_interval)
else:
print("Please Wait Initializing......")
except IOError:
print('Error! Something went wrong.')
time.sleep(fixed_interval)
|
[
"noreply@github.com"
] |
dwij2812.noreply@github.com
|
8f9842cabc131fddc1025c2ab9121b0af86a3297
|
d9a65120e6b8d20d3b568acde8ceb66f908d1ffc
|
/django1/src/vote/urls.py
|
68755a03d624470c3b5e239836982709943bda16
|
[] |
no_license
|
omniverse186/django1
|
aba57d705bd7b3a142f627e566853811038d6d6c
|
f257c34c9d09467170a5f3bd24598d97dcf64f4f
|
refs/heads/master
| 2020-04-21T23:11:17.677609
| 2019-02-10T03:20:47
| 2019-02-10T03:20:47
| 169,938,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
'''
Created on 2019. 1. 20.
@author: user
'''
#하위 URLConf
#app_name : 하위 URLConf 파일의 등록된 URL들의 그룹명
#urlpatterns : URL과 뷰함수를 리스트 형태로 등록하는 변수
from django.urls import path
from .views import *
app_name = 'vote'
urlpatterns = [
#name : 해당 URL, 뷰함수 등록에 대해서 별칭을 지정
path('', index, name= 'index'),
path('<int:q_id>/', detail, name='detail'),
path('vote/', vote, name='vote'),
path('result/<int:q_id>',result, name='result'),
path('qr/', qregister, name='qr' ),
path('qu/<int:q_id>/', qupdate, name = 'qu'),
path('qd/<int:q_id>/', qdelete, name='qd'),
path('cr/', cregister, name='cr'),
path('cu/<int:c_id>/', cupdate, name='cu'),
path('cd/<int:c_id>/', cdelete, name='cd')
]
|
[
"user@DESKTOP-37GULAI"
] |
user@DESKTOP-37GULAI
|
eb34fbed2c2a2dc76d32eeb205b7cf2a7f14c188
|
139641b0f4b047a6e980e2acc817f013ca5b487f
|
/Assignment 1/Question3.py
|
d60a6fd69032b2fb326124bad2a9a21bca226a89
|
[] |
no_license
|
JagjitUvic/MIR
|
7094b131443e82c9fb8825f5b1f03011a8770c7e
|
675f1fcc8b99cb899efff5864104251a4fc3e602
|
refs/heads/master
| 2020-05-23T10:16:02.855874
| 2017-02-16T00:22:05
| 2017-02-16T00:22:05
| 80,409,036
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.fftpack
import math
import mir
from mir import Sinusoid
sin1 = Sinusoid(Fs=256, amp=5, freq=20, phase=5)
N = len(sin1.data)
print N
signal = []
bin = 4
k = bin
cosa = []
sina = []
for n in range (N-1):
theta = 2*math.pi*k*n/N
cosa.append(math.cos(theta))
sina.append(math.sin(theta))
plt.plot(cosa)
plt.show()
plt.plot(sina)
plt.show()
|
[
"jagjitsingh.mail@gmail.com"
] |
jagjitsingh.mail@gmail.com
|
dab84d76a501ac997c5b01d258ae27a60538ddb6
|
4bf7b9ef50a28be3a2ea327ab3d89464b5c8fe9c
|
/Atividades - Python/Exerc.010 - Pedra Papel e Tesoura.py
|
0b197f29447e93c1191de2ec3766d97e0614da60
|
[
"MIT"
] |
permissive
|
mfre1re/Diversas-atividades-em-Python
|
072428d1a7fe400afd27b82005209999ea9f42bf
|
b3ca38dd71a6b333c4016838df0a822782b51619
|
refs/heads/main
| 2023-08-15T12:30:42.703641
| 2021-09-30T20:53:01
| 2021-09-30T20:53:01
| 408,258,204
| 0
| 0
|
MIT
| 2021-09-30T20:53:02
| 2021-09-19T23:17:18
|
Python
|
UTF-8
|
Python
| false
| false
| 689
|
py
|
from random import choice
print('Vamos jogar Pedra, Papel ou Tesoura?')
jg = str(input('Escolha sua mão: ')).strip()
jg = jg.lower()
lista = ['pedra', 'papel', 'tesoura']
cpu = choice(lista)
print('O computador escolheu {}.'.format(cpu))
if cpu == jg:
print('Empate')
elif cpu == 'pedra' and jg == 'tesoura':
print('Vitória cpu.')
elif cpu == 'pedra' and jg == 'papel':
print('Vitória jogador.')
elif cpu == 'papel' and jg == 'tesoura':
print('Vitória jogador.')
elif cpu == 'papel' and jg == 'pedra':
print('Vitória cpu.')
elif cpu == 'tesoura' and jg == 'papel':
print('Vitória cpu.')
elif cpu == 'tesoura' and jg == 'pedra':
print('Vitória jogador.')
|
[
"msfreire52@gmail.com"
] |
msfreire52@gmail.com
|
42602d680129030ee14c9f88495ddc649c91fac2
|
fc3a18196a9fa7a3e49700ce913350fe1a6e5d55
|
/siteplay/siteplay/urls.py
|
006dfc0851a8e93285b35e4980c84eb37f384cdc
|
[] |
no_license
|
luizxx/Projeto-de-exercicios
|
81e6ef9c874b4543de6a4726655292bc9f18cd9d
|
6d369b2d8037400662822d0a659c96c098c7e81b
|
refs/heads/main
| 2023-06-29T22:33:30.649421
| 2021-08-02T01:29:42
| 2021-08-02T01:29:42
| 390,143,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
"""siteplay URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
] + static (settings.MEDIA_URL,document_root=settings.MEDIA_ROOT )
|
[
"luizotavio308@gmail.com"
] |
luizotavio308@gmail.com
|
c5314ca04cf676a9eb3a9ed3337967a8e8814205
|
13b91cc2cf33b79230b971d7496abaaeca05a760
|
/Au plus proche dernière version.py
|
2dda3e6af08940dfc9ca17d99dfb75e9cc76ee10
|
[] |
no_license
|
DevauxRobin/Projet-Libre
|
5657a5e2fa6c2f63a87afa10845a44e6e4837bed
|
0de22a51eee2378bc664cee71ffb27559a2a1e2e
|
refs/heads/main
| 2023-04-28T21:40:28.232230
| 2021-05-27T12:27:16
| 2021-05-27T12:27:16
| 369,522,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,737
|
py
|
### On importe le module random pour générer des nombres aléatoires et le module tkinter pour l'interface
import random
import tkinter as tk
def test(event=None):
global enter_number, random_number, infos
### Récupérer le nombre
number = enter_number.get()
### Tester si le caractère est un chiffre décimal et modifier le nombre en entier
if number.isdigit():
number_proposition = int(number)
### Résultat
if number_proposition < random_number:
infos.set("Le nombre est plus haut.")
elif number_proposition > random_number:
infos.set("Le nombre est plus bas.")
else:
infos.set("Gagné !")
window.destroy()
quit()
### Annonce
else:
infos.set("Entre un nombre entre 1 et 75 :")
### Sélection du nombre et de la couleur de la fenêtre Tkinter
random_number = random.randint(1, 75)
color = "#B22222"
### Fenêtre Tkinter as TK
window = tk.Tk()
window.geometry("1200x600")
window.title("Au plus proche")
window.resizable(width=False, height=False)
window.config(bg=color)
frame = tk.Frame(window)
frame.pack(expand=True)
### Fenêtre Tkinter as Tk pour rentrer les informations et bouton Valider
enter_number = tk.Entry(frame)
enter_number.bind('<Return>', test)
enter_number.focus()
enter_number.pack()
button = tk.Button(frame, text="Valider", command=test)
button.pack()
### Texte d'information Tkinter
infos = tk.StringVar()
infos.set("Bonne chance ^^!")
information = tk.Label(window, textvariable=infos, bg=color)
information.place(x=550, y=220)
### Tout exécuter pour tout afficher
window.mainloop()
|
[
"noreply@github.com"
] |
DevauxRobin.noreply@github.com
|
83f41dac89a9225fb4404d1d8cc88b32b064ac52
|
8a17fa08f56746ec9dfc1a7043f525b60661477d
|
/递归/P77:Combinations.py
|
4743940f0e1d3c88022de0a063683425a7bf636d
|
[] |
no_license
|
BlueSky23/MyLeetCode
|
9b59ae2a5523c996755c3c56ba84943864fc7eab
|
454f804902428e410d1aafec13c1fcde2435f8d0
|
refs/heads/master
| 2021-07-10T11:53:16.830113
| 2020-12-22T02:28:58
| 2020-12-22T02:28:58
| 223,888,744
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
# 排列组合的性质:c(k,m)=c(k,m-1)+c(k-1,m-1)
# 注意边界条件
class Solution:
def combine(self, n: int, k: int):
if n <= 0 or k <= 0:
return
if n < k:
return
if k == 1:
return [[i] for i in range(1, n + 1)]
if k == n:
return [[i for i in range(1, n + 1)]]
tmp1 = self.combine(n - 1, k)
tmp2 = self.combine(n - 1, k - 1)
if tmp1 and tmp2:
for ls in tmp2:
ls.append(n)
tmp1.extend(tmp2)
return tmp1
s = Solution()
print(s.combine(4,2))
|
[
"liupeipeng@gmail.com"
] |
liupeipeng@gmail.com
|
c1b7e51aa3e1c1daed4fdf1a97394c3827b40141
|
8ec7a3e598571902d60c76fe4063401fd44c518a
|
/DataScience-Python3/venv/python/Scripts/pip3.7-script.py
|
ff4b1bc083d0e7dbc906957a27986d99ab9c403b
|
[] |
no_license
|
MinerNJ/PythonProjects
|
6af1e37c07f23cc8e81a128c3f0ab66f42733689
|
86dba59b3c1b254697c979e6115b1bd60a980cdf
|
refs/heads/master
| 2020-05-18T14:52:09.953738
| 2020-02-06T22:11:03
| 2020-02-06T22:11:03
| 184,481,518
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
#!C:\Users\nickm\PycharmProjects\DataScience\venv\python\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
|
[
"Nick.Miner18@gmail.com"
] |
Nick.Miner18@gmail.com
|
b59fa9ec2f490e6e84a85e73f44ac57ac0aa19e8
|
3fa2b0ea8b6893b4c2b54337f75fdafce03ef924
|
/BeginnerSnippets.py
|
7ea547733df49a622a60a681f6f9d89d4f8b34ad
|
[] |
no_license
|
KUHOO-S/Scripts-and-DA
|
0ab88f8fc9be1df458ef958cfa996205919bdb93
|
cd92360c354a7ecf6d491a9ec90add07ee90e5a9
|
refs/heads/master
| 2022-12-19T14:46:47.878377
| 2020-09-30T18:38:35
| 2020-09-30T18:38:35
| 298,450,996
| 0
| 6
| null | 2020-10-01T09:10:35
| 2020-09-25T02:50:29
|
Python
|
UTF-8
|
Python
| false
| false
| 247
|
py
|
def foo(x):
def retfun(y):
return y * x
return retfun
def bar(f1, f2):
def newfun(y):
return f1(y) / f2(y)
return newfun
def a(z):
return z * 10
b = foo(2)
c = bar(a, b)
print(a(5))
print(b(5))
print(c(5))
|
[
"kuhoo1999@rediffmail.com"
] |
kuhoo1999@rediffmail.com
|
ee93f67a0d3be9541f6e49464b2c681f66d57dcb
|
3b5b3ab1a5e1cfead4d09671333378995eadbc01
|
/Week_08_model_comparison/utils.py
|
fabad819cc066526228fc5907caaef99c3106961
|
[] |
no_license
|
piecesofmindlab/UNR_PSY_763
|
01ca6638fb6a2be956d8e697b444e781ad47ef80
|
d27c9007a79b90ee595021ab170ec6a510042143
|
refs/heads/master
| 2022-04-10T09:01:11.794455
| 2020-03-13T12:19:56
| 2020-03-13T12:19:56
| 118,689,922
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,924
|
py
|
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import itertools as itools
def column_corr(A, B, dof=0):
"""Efficiently compute correlations between columns of two matrices
Does NOT compute full correlation matrix btw `A` and `B`; returns a
vector of correlation coefficients. """
zs = lambda x: (x-np.nanmean(x, axis=0))/np.nanstd(x, axis=0, ddof=dof)
rTmp = np.nansum(zs(A)*zs(B), axis=0)
n = A.shape[0]
# make sure not to count nans
nNaN = np.sum(np.logical_or(np.isnan(zs(A)), np.isnan(zs(B))), 0)
n = n - nNaN
r = rTmp/n
return r
def compute_noise_ceil(data):
"""Computes noise ceiling as mean pairwise correlation between repeats
Parameters
----------
data : array-like
repeated data; should be (repeats x time x samples [voxels])
Returns
-------
cc : vector
correlation per sample (voxel)
TO DO
-----
Make this (optionally) more memory-efficient, with correlations
computed in chunks
"""
n_rpts, n_t, n_samples = data.shape
# Get all pairs of data
pairs = [p for p in itools.combinations(np.arange(n_rpts), 2)]
# Preallocate
r = np.nan*np.zeros((n_samples, len(pairs)))
for p, (a, b) in enumerate(pairs):
r[:, p] = column_corr(data[a], data[b])
cc = np.nanmean(r, 1);
return cc
def find_squarish_dimensions(n):
'''Get row, column dimensions for n elememnts
Returns (nearly) sqrt dimensions for a given number. e.g. for 23, will
return [5, 5] and for 26 it will return [6, 5]. For creating displays of
sets of images, mostly. Always sets x greater than y if they are not
equal.
Returns
-------
x : int
larger dimension (if not equal)
y : int
smaller dimension (if not equal)
'''
sq = np.sqrt(n)
if round(sq)==sq:
# if this is a whole number - i.e. a perfect square
return sq, sq
# One: next larger square
x = [np.ceil(sq)]
y = [np.ceil(sq)]
opt = [x[0]*y[0]]
# Two: immediately surrounding numbers
x += [np.ceil(sq)]
y += [np.floor(sq)]
opt += [x[1]*y[1]]
test = np.array([o-n for o in opt])
# Make sure negative values will not be chosen as the minimum
test[test < 0] = np.inf
idx = np.argmin(test)
x = x[idx]
y = y[idx]
return x, y
def slice_3d_array(volume, axis=2, fig=None, vmin=None, vmax=None, cmap=plt.cm.gray, nr=None, nc=None,
figsize=None):
'''Slices 3D matrix along arbitrary axis
Parameters
----------
volume : array (3D)
data to be sliced
axis : int | 0, 1, [2] (optional)
axis along which to divide the matrix into slices
Other Parameters
----------------
vmin : float [max(volume)] (optional)
color axis minimum
vmax : float [min(volume)] (optional)
color axis maximum
cmap : matplotlib colormap instance [plt.cm.gray] (optional)
nr : int (optional)
number of rows
nc : int (optional)
number of columns
'''
if nr is None or nc is None:
nc, nr = find_squarish_dimensions(volume.shape[axis])
if figsize is None:
figsize = (10, nr/nc * 10)
if fig is None:
fig = plt.figure(figsize=figsize)
if vmin is None:
vmin = volume.min()
if vmax is None:
vmax = volume.max()
ledges = np.linspace(0, 1, nc+1)[:-1]
bedges = np.linspace(1, 0, nr+1)[1:]
width = 1/float(nc)
height = 1/float(nr)
bottoms, lefts = zip(*list(itools.product(bedges, ledges)))
for ni, sl in enumerate(np.split(volume, volume.shape[axis], axis=axis)):
#ax = fig.add_subplot(nr, nc, ni+1)
ax = fig.add_axes((lefts[ni], bottoms[ni], width, height))
ax.imshow(sl.squeeze(), vmin=vmin, vmax=vmax, interpolation="nearest", cmap=cmap)
ax.set_xticks([])
ax.set_yticks([])
return fig
|
[
"mark.lescroart@gmail.com"
] |
mark.lescroart@gmail.com
|
a688ca2e222977722e0df277f47979059d2e8e1b
|
99eb4013a12ddac44042d3305a16edac1c9e2d67
|
/test/test_raw_shape_map.py
|
1a6b72fc298a5b35beaa25426e64cdf336fc34fa
|
[
"Apache-2.0"
] |
permissive
|
DaniFdezAlvarez/shexer
|
cd4816991ec630a81fd9dd58a291a78af7aee491
|
7ab457b6fa4b30f9e0e8b0aaf25f9b4f4fcbf6d9
|
refs/heads/master
| 2023-05-24T18:46:26.209094
| 2023-05-09T18:25:27
| 2023-05-09T18:25:27
| 132,451,334
| 24
| 2
|
Apache-2.0
| 2023-05-03T18:39:57
| 2018-05-07T11:32:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,212
|
py
|
import unittest
from shexer.shaper import Shaper
from test.const import G1, BASE_FILES, default_namespaces
from test.t_utils import file_vs_str_tunned_comparison
import os.path as pth
from shexer.consts import TURTLE
_BASE_DIR = BASE_FILES + "shape_map" + pth.sep
class TestRawShapeMap(unittest.TestCase):
def test_node(self):
shape_map = "<http://example.org/Jimmy>@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "a_node.shex",
str_target=str_result))
def test_prefixed_node(self):
shape_map = "ex:Jimmy@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "a_node.shex",
str_target=str_result))
def test_focus(self):
shape_map = "{FOCUS a foaf:Person}@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "focus_nodes.shex",
str_target=str_result))
def test_focus_wildcard(self):
shape_map = "{FOCUS foaf:name _}@<WithName>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "focus_and_wildcard.shex",
str_target=str_result))
def test_sparql_selector(self):
shape_map = "SPARQL \"select ?p where { ?p a foaf:Person }\"@<Person>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "focus_nodes.shex",
str_target=str_result))
def test_several_shapemap_items(self):
shape_map = "{FOCUS a foaf:Person}@<Person>\n{FOCUS a foaf:Document}@<Document>"
shaper = Shaper(graph_file_input=G1,
namespaces_dict=default_namespaces(),
all_classes_mode=False,
input_format=TURTLE,
disable_comments=True,
shape_map_raw=shape_map
)
str_result = shaper.shex_graph(string_output=True)
self.assertTrue(file_vs_str_tunned_comparison(file_path=_BASE_DIR + "several_shm_items.shex",
str_target=str_result))
|
[
"danifdezalvarez@gmail.com"
] |
danifdezalvarez@gmail.com
|
ed15b992f0c49401af146f3197cca0d0290ff145
|
ffde7327de8f5d3428ff7359ada54bdfcaf3a936
|
/tango_with_django_project/rango/migrations/0002_auto_20141118_0312.py
|
5f6bbb602a621d1aadffff8d920325a85f0a5489
|
[] |
no_license
|
kittozheng/Learning_projects
|
f21eaeadcddfae9a66f5e91b0cfebff1a0fcd52e
|
4a7aacaff28f8d9335614bac6479314ccdb8a4e6
|
refs/heads/master
| 2021-01-01T18:37:07.701932
| 2014-12-20T06:44:36
| 2014-12-20T06:44:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('rango', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='page',
name='category',
),
migrations.DeleteModel(
name='Category',
),
migrations.DeleteModel(
name='Page',
),
]
|
[
"zhengzhijie@acttao.com"
] |
zhengzhijie@acttao.com
|
abd2f183813a26b39b95507feb4bb61cfdad6e52
|
56c5bd20684a87ba522099ac0c2d22032c172662
|
/menu_service/migrations/0005_auto_20210519_1103.py
|
6f46237e29642fbb325f33ae9ac10b1759a08e9d
|
[] |
no_license
|
germanTM/Backend-Test-Torres-Molina
|
3fa30c0c1aed8525ef355f41fd00152e57126cd3
|
330de56a79d87875568b166f2bb17cbc2e72a060
|
refs/heads/main
| 2023-05-10T15:12:05.936666
| 2021-05-26T02:58:53
| 2021-05-26T02:58:53
| 370,290,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
# Generated by Django 3.0.8 on 2021-05-19 11:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('menu_service', '0004_auto_20210519_1102'),
]
operations = [
migrations.RenameField(
model_name='dish_ingredient',
old_name='dish_Id',
new_name='dish',
),
migrations.RenameField(
model_name='dish_ingredient',
old_name='ingredient_Id',
new_name='ingredient',
),
]
|
[
"german211221@gmail.com"
] |
german211221@gmail.com
|
a1d32b5401517d39248b3b290e7fb43137f0b596
|
7670ea280776c304c03702c434bc572a9baf9f6d
|
/btc.py
|
bafd2aa031f870d404d12625e665eab5cc0377dc
|
[] |
no_license
|
Aldobareto/bot3tele
|
4aad6a49e93bf5d9f604b740095ea968d5200951
|
d1c38af43f602ffd80ed5777ded50878ad5874cf
|
refs/heads/master
| 2020-06-22T03:42:36.197566
| 2019-06-29T18:33:22
| 2019-06-29T18:33:22
| 197,623,621
| 1
| 0
| null | 2019-07-18T16:41:33
| 2019-07-18T16:41:33
| null |
UTF-8
|
Python
| false
| false
| 12,007
|
py
|
import marshal
# AW 2000
# donate doge D5YsVdHvKqq2apzQPGeTcS9SNjuskuM8HM
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00@\x00\x00\x00sx\x06\x00\x00d\x00d\x01l\x00m\x01Z\x01m\x02Z\x02m\x03Z\x03\x01\x00d\x00d\x02l\x04m\x05Z\x05m\x06Z\x06\x01\x00d\x00d\x03l\x07m\x08Z\x08\x01\x00d\x00d\x04l\x07m\tZ\t\x01\x00d\x00d\x05l\nm\x0bZ\x0b\x01\x00d\x00d\x06l\x0cZ\x0cd\x00d\x06l\rZ\rd\x00d\x06l\x0eZ\x0ed\x00d\x06l\x0fZ\x0fy\x18d\x00d\x06l\x10Z\x10d\x00d\x07l\x11m\x12Z\x12\x01\x00W\x00n\x1c\x01\x00\x01\x00\x01\x00e\x13d\x08\x83\x01\x01\x00e\x0e\xa0\x14\xa1\x00\x01\x00Y\x00n\x02X\x00d\tZ\x15e\x13e\x15\x83\x01\x01\x00e\x16e\x0ej\x17\x83\x01d\nk\x00r\xcae\x13d\x0b\x83\x01\x01\x00e\x0e\xa0\x14d\x0c\xa1\x01\x01\x00d\rd\x0e\x84\x00Z\x18e\x10\xa0\x19\xa1\x00Z\x1ad\x0fd\x10i\x01Z\x1bd\x11Z\x1cd\x12Z\x1de\x0ej\x17d\x0c\x19\x00Z\x1ee\x01d\x13e\x1e\x17\x00e\x1ce\x1d\x83\x03Z\x1fe\x1f\xa0 \xa1\x00\x01\x00e\x1f\xa0!\xa1\x00\x90\x01s`y\x1ee\x1f\xa0"e\x1e\xa1\x01\x01\x00e\x1f\xa0#e\x1ee$d\x14\x83\x01\xa1\x02Z%W\x00n*\x04\x00e\x08k\n\x90\x01r^\x01\x00\x01\x00\x01\x00e$d\x15\x83\x01Z&e\x1f\xa0\'e\x1ee&\xa1\x02Z%Y\x00n\x02X\x00e\x1f\xa0(\xa1\x00Z)e\x0f\xa0*d\x16\xa1\x01\x01\x00e\x13e\x15\x83\x01\x01\x00e\x13d\x17\x83\x01\x01\x00e\x13d\x18e)j+d\x19\x83\x03\x01\x00e\x13d\x1a\x83\x01\x01\x00e\x0bd\x0c\x83\x01\x01\x00e\x13d\x1b\x83\x01\x01\x00e\x0bd\x0c\x83\x01\x01\x00e\x13d\x1c\x83\x01\x01\x00e\x0bd\x0c\x83\x01\x01\x00e\x13d\x1d\x83\x01\x01\x00e\x0bd\x0c\x83\x01\x01\x00e\x13d\x1e\x83\x01\x01\x00e\x0bd\x0c\x83\x01\x01\x00e\x13d\x1f\x83\x01\x01\x00e\x0bd\x0c\x83\x01\x01\x00e\x13d \x83\x01\x01\x00e\x0bd\x0c\x83\x01\x01\x00e\x13d!\x83\x01\x01\x00e\x0bd\x0c\x83\x01\x01\x00e\x13d"\x83\x01\x01\x00e\x0bd\x0c\x83\x01\x01\x00e\x13d#\x83\x01\x01\x00e\x0bd\x0c\x83\x01\x01\x00e\x13d$\x83\x01\x01\x00e\x0bd\x0c\x83\x01\x01\x00e\x13d%\x83\x01\x01\x00\x90\x04z\x16e\x1f\xa0,d&\xa1\x01Z-d&Z.\x90\x04x\x00e/d\'\x83\x01D\x00\x90\x03]\xf2Z0e\x0ej1\xa02d(\xa1\x01\x01\x00e\x0ej1\xa02d)\xa1\x01\x01\x00e\x0ej1\xa02d(\xa1\x01\x01\x00e\x0ej1\xa02d*\xa1\x01\x01\x00e\x0ej1\xa03\xa1\x00\x01\x00e\x1fj4e-d+d,\x8d\x02\x01\x00e\x0bd-\x83\x01\x01\x00e\x1fe\x05e-d\x0cd\x06d\x00d\x00d\x00d\x00d\x00d.\x8d\x08\x83\x01Z5e5j6d\x00\x19\x00j7\xa08d/\xa1\x01d0k\x03\x90\x03r\x04e\x13d1\x83\x01\x01\x00e\x0e\xa0\x14\xa1\x00\x01\x00\x90\x02qf\x90\x02yze5j6d\x00\x19\x00j9j:d\x00\x19\x00j;d\x00\x19\x00j<Z<e\x0ej1\xa02d(\xa1\x01\x01\x00e\x0ej1\xa02d2e<\x17\x00\xa1\x01\x01\x00e\x0ej1\xa03\xa1\x00\x01\x00e5j6d\x00\x19\x00j=Z=e\x1aj>e<e\x1bd3d4d5\x8d\x04Z?e\x12e?j@d6\x83\x02ZAeAj8d7d8d9\x8d\x02d\x06k\x08\x90\x04rheAj8d7d:d;\x8d\x02d\x06k\x08\x90\x04rhe\x0bd\n\x83\x01\x01\x00e\x1fe\x05e-d\x0cd\x06d\x00d\x00d\x00d\x00d\x00d.\x8d\x08\x83\x01Z5e5j6d\x00\x19\x00j7Z7e5j6d\x00\x19\x00j7\xa08d<\xa1\x01d0k\x03\x90\x03s\xfae5j6d\x00\x19\x00j7\xa08d=\xa1\x01d0k\x03\x90\x05r~e\r\xa0Bd>e7\xa1\x02ZCe\x18eDeCd\x00\x19\x00\x83\x01\x83\x01\x01\x00e\x0bd\x0c\x83\x01\x01\x00e\x1fe\x05e-d\nd\x06d\x00d\x00d\x00d\x00d\x00d.\x8d\x08\x83\x01Z5e5j6d\x0c\x19\x00j7ZEe\x0bd\x0c\x83\x01\x01\x00e\x0ej1\xa02d?eE\x17\x00d@\x17\x00\xa1\x01\x01\x00n\x00\x90\x01n\x16eAj8d7d:d;\x8d\x02d\x06k\t\x90\x05r\x00x\x80eAjFd7dAd9\x8d\x02D\x00]nZGeG\xa0>dB\xa1\x01ZHeG\xa0>dC\xa1\x01ZIeG\xa0>dD\xa1\x01ZJe\x18eDeI\x83\x01\x83\x01\x01\x00e\x1ajKdEeHeJdF\x9c\x02e\x1bd3d4dG\x8d\x05Z?e\x0c\xa0Le?jM\xa1\x01ZNe\x0ej1\xa02dHeNdI\x19\x00\x17\x00dJ\x17\x00\xa1\x01\x01\x00\x90\x04q\x8cW\x00n~e\x0ej1\xa02d(\xa1\x01\x01\x00e\x0ej1\xa02dK\xa1\x01\x01\x00e\x0ej1\xa02d(\xa1\x01\x01\x00e\x0ej1\xa02dL\xa1\x01\x01\x00e\x0ej1\xa03\xa1\x00\x01\x00e\x0bd\n\x83\x01\x01\x00e\x1fe\x06e.e=e5j6d\x00\x19\x00j9j:d\x00\x19\x00j;d\x0c\x19\x00jOdM\x8d\x03\x83\x01\x01\x00e\x0ej1\xa02dN\xa1\x01\x01\x00e\x0bd\x0c\x83\x01\x01\x00W\x00n\xd6\x01\x00\x01\x00\x01\x00e\x0bd\x0c\x83\x01\x01\x00e\x1fe\x05e-d\x0cd\x06d\x00d\x00d\x00d\x00d\x00d.\x8d\x08\x83\x01Z5e5j6d\x00\x19\x00j7Z7e5j6d\x00\x19\x00j7\xa08d<\xa1\x01d0k\x03\x90\x05s\xe8e5j6d\x00\x19\x00j7\xa08d=\xa1\x01d0k\x03\x90\x06rRe\r\xa0Bd>e7\xa1\x02ZCe\x18eDeCd\x00\x19\x00\x83\x01\x83\x01\x01\x00e\x0bd\x0c\x83\x01\x01\x00e\x1fe\x05e-d\nd\x06d\x00d\x00d\x00d\x00d\x00d.\x8d\x08\x83\x01Z5e5j6d\x0c\x19\x00j7ZEe\x0bd\n\x83\x01\x01\x00e\x0ej1\xa02d?eE\x17\x00d@\x17\x00\xa1\x01\x01\x00n\x00Y\x00n\x02X\x00\x90\x02qfW\x00W\x00d\x06e\x13dO\x83\x01\x01\x00e\x1f\xa0P\xa1\x00\x01\x00X\x00d\x06S\x00)P\xe9\x00\x00\x00\x00)\x03\xda\x0eTelegramClient\xda\x04sync\xda\x06events)\x02\xda\x11GetHistoryRequest\xda\x1bGetBotCallbackAnswerRequest)\x01\xda\x1aSessionPasswordNeededError)\x01\xda\x0eFloodWaitError)\x01\xda\x05sleepN)\x01\xda\rBeautifulSoupz\x80\x1b[1;32m# \x1b[1;31mModul Requests Dan Bs4 Belum Terinstall\n\x1b[1;30m# \x1b[1;31minstall modul : pip install requests and pip install bs4ug\x01\x00\x00\x1b[0;35m\n ___ __ ____ ___ ___ ___\n / \\ \\ / / |___ \\ / _ \\ / _ \\ / _ \\ \n / _ \\ \\ /\\ / / __) | | | | | | | | | |\n / ___ \\ V V / / __/| |_| | |_| | |_| |\n/_/ \\_\\_/\\_/ |_____|\\___/ \\___/ \\___/\n===============================================\n[\xe2\x80\xa2] Bot Btc click telegram auto skip captcha [\xe2\x80\xa2]\n[\xe2\x80\xa2] Subscribe yt AW 2000 [\xe2\x80\xa2]\n\xe9\x02\x00\x00\x00z$\n\n\n\x1b[1;32mUsage : python main.py +62\xe9\x01\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00sb\x00\x00\x00t\x00j\x01\xa0\x02d\x01\xa1\x01\x01\x00t\x00j\x01\xa0\x02d\x02\xa1\x01\x01\x00xDt\x03|\x00d\x03d\x04\x83\x03D\x00]4}\x01t\x00j\x01\xa0\x02d\x01\xa1\x01\x01\x00t\x00j\x01\xa0\x02d\x05\xa0\x04|\x01\xa1\x01\xa1\x01\x01\x00t\x00j\x01\xa0\x05\xa1\x00\x01\x00t\x06d\x06\x83\x01\x01\x00q&W\x00d\x00S\x00)\x07N\xfa\x01\rz? r\x01\x00\x00\x00\xe9\xff\xff\xff\xffz,\x1b[1;30m#\x1b[1;0m{:2d} \x1b[1;32mseconds remainingr\x0c\x00\x00\x00)\x07\xda\x03sys\xda\x06stdout\xda\x05write\xda\x05range\xda\x06format\xda\x05flushr\t\x00\x00\x00)\x02\xda\x01xZ\tremaining\xa9\x00r\x16\x00\x00\x00\xda\x00\xda\x06tunggu\x1d\x00\x00\x00s\x0e\x00\x00\x00\x00\x01\x0c\x01\x0c\x01\x12\x01\x0c\x01\x12\x01\n\x01r\x18\x00\x00\x00z\nUser-Agentz\x95Mozilla/5.0 (Linux; Android 5.1; A1603 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/43.0.2357.121 Mobile Safari/537.36iq\xf2\n\x00Z 322526d2c3350b1d3530de327cf08c07z\x08session/z \n\n\n\x1b[1;0mEnter Yout Code Code : z\x1a\x1b[1;0mYour 2fa Password : \xda\x05cleara\x80\x01\x00\x00\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=\x1b[1;35m=u\x1c\x00\x00\x00\x1b[1;32m[\xe2\x80\xa2] Your name is =>u(\x00\x00\x00\n\x1b[1;32m[\xe2\x80\xa2] Enjoy And Happy miner !!\n\nz\nproses....z\x10\x1b[1;33m+\x1b[1;33m+z \x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+z0\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+z@\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+zP\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+z`\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+zp\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+z\x80\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+z\x90\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+z\xa0\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+\x1b[1;33m+u!\x00\x00\x00\x1b[1;37m[\xe2\x80\xa2]Sending First commandz\x11@BitcoinClick_boti@KL\x00r\r\x00\x00\x00z> u\x1c\x00\x00\x00\x1b[1;35m[\xe2\x80\xa2] \x1b[1;33mopen URLu\x10\x00\x00\x00\xf0\x9f\x96\xa5 Visit sites)\x02Z\x06entity\xda\x07message\xe9\x03\x00\x00\x00)\x08Z\x04peerZ\x05limitZ\x0boffset_dateZ\toffset_idZ\x06max_idZ\x06min_idZ\nadd_offset\xda\x04hashz%Sorry, there are no new ads availabler\x0e\x00\x00\x00u"\x00\x00\x00\n\x1b[1;30m[\xe2\x80\xa2] \x1b[1;31miklan habis!\nu\x1a\x00\x00\x00\x1b[1;35m[\xe2\x80\xa2] \x1b[1;33mVisit \xe9\x0f\x00\x00\x00T)\x03\xda\x07headers\xda\x07timeout\xda\x0fallow_redirectsz\x0bhtml.parserZ\x03divz\x0bg-recaptcha)\x01Z\x06class_Z\x07headbar)\x01\xda\x02idz\rYou must stayz\x0ePlease stay onz\x0b([\\d.]*\\d+)u\x15\x00\x00\x00\r\x1b[1;35m[\xe2\x80\xa2] \x1b[1;32m\xda\x01\nz\x0fcontainer-fluidz\tdata-codez\ndata-timerz\ndata-tokenz\x1chttps://dogeclick.com/reward)\x02\xda\x04codeZ\x05token)\x04\xda\x04datar\x1e\x00\x00\x00r\x1f\x00\x00\x00r \x00\x00\x00u \x00\x00\x00\r\x1b[1;30m[\xe2\x80\xa2] \x1b[1;32mYou earned Z\x06rewardz\x1a BTC for visiting a site!\nz@ u$\x00\x00\x00\x1b[1;35m[\xe2\x80\xa2] \x1b[1;31mCaptcha Detected)\x01r$\x00\x00\x00u,\x00\x00\x00\r\x1b[1;35m[\xe2\x80\xa2] \x1b[1;31mSkipping chapta! \nZ\x04done)QZ\x08telethonr\x02\x00\x00\x00r\x03\x00\x00\x00r\x04\x00\x00\x00Z\x1etelethon.tl.functions.messagesr\x05\x00\x00\x00r\x06\x00\x00\x00Z\x0ftelethon.errorsr\x07\x00\x00\x00r\x08\x00\x00\x00Z\x04timer\t\x00\x00\x00Z\x04json\xda\x02rer\x0f\x00\x00\x00\xda\x02osZ\x08requestsZ\x03bs4r\n\x00\x00\x00\xda\x05print\xda\x04exitZ\x06banner\xda\x03len\xda\x04argvr\x18\x00\x00\x00Z\x07Session\xda\x01cZ\x02uaZ\x06api_idZ\x08api_hashZ\x0cphone_numberZ\x06clientZ\x07connectZ\x12is_user_authorizedZ\x11send_code_requestZ\x07sign_in\xda\x05input\xda\x02meZ\x05passw\xda\x05startZ\x06get_meZ\x06myself\xda\x06systemZ\nfirst_nameZ\nget_entityZ\x0echannel_entityZ\x10channel_usernamer\x12\x00\x00\x00\xda\x01ir\x10\x00\x00\x00r\x11\x00\x00\x00r\x14\x00\x00\x00Z\x0csend_messageZ\x05postsZ\x08messagesr\x1a\x00\x00\x00\xda\x04findZ\x0creply_markupZ\x04rowsZ\x07buttonsZ\x03urlr!\x00\x00\x00\xda\x03get\xda\x01rZ\x07contentZ\x04soupZ\x07findallZ\x03sec\xda\x03intZ\nmessageresZ\x08find_allZ\x03datr#\x00\x00\x00Z\x05timerZ\x06tokenaZ\x04post\xda\x05loads\xda\x04textZ\x02jsr$\x00\x00\x00Z\ndisconnectr\x16\x00\x00\x00r\x16\x00\x00\x00r\x16\x00\x00\x00r\x17\x00\x00\x00\xda\x08<module>\x01\x00\x00\x00s\x06\x01\x00\x00\x14\x01\x10\x01\x0c\x01\x0c\x01\x0c\x01 \x01\x02\x01\x08\x01\x10\x01\x06\x01\x08\x01\x0e\x0b\x04\x01\x08\x01\x0e\x01\x08\x01\n\x02\x08\n\x08\x01\x08\x04\x04\x01\x04\x01\n\x02\x10\x01\x08\x01\n\x01\x02\x01\n\x01\x14\x01\x10\x01\x08\x01\x12\x01\x08\x01\n\x01\x08\x01\x08\x01\x0e\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x02\x08\x01\x04\x01\n\x01\x04\x01\x12\x01\x0c\x01\x0c\x01\x0c\x01\x0c\x01\n\x01\x0e\x01\x08\x01\x1c\x01\x18\x01\x08\x01\x0c\x02\x04\x01\x1a\x01\x0c\x01\x10\x01\n\x01\x0c\x01\x12\x01\x0c\x01(\x01\x08\x01\x1c\x01\x0c\x010\x01\x0c\x01\x10\x01\x08\x01\x1c\x01\x0c\x01\x08\x01\x16\x02\x04\x03\x14\x01\x14\x01\n\x01\n\x01\n\x01\x0c\x01\x1a\x01\x0c\x01 \x02\x0c\x01\x0c\x01\x0c\x01\x0c\x01\n\x01\x08\x01\x04\x01\x02\x01\x02\x01 \x02\x0c\x01\x0c\x01\x06\x01\x08\x01\x1c\x01\x0c\x010\x01\x0c\x01\x10\x01\x08\x01\x1c\x01\x0c\x01\x08\x01\x16\x02\x10\x03\x08\x01'))
|
[
"noreply@github.com"
] |
Aldobareto.noreply@github.com
|
9a0b11d34c65a69702e5f2c0816a02e44ba81f6c
|
88395edcba1d35da9a39272bc94e7fc35c576b1b
|
/InitialFiltering.py
|
c219b53a0a235705d7bfc53f02b999469127e08e
|
[] |
no_license
|
vijendhervijendher/Box-office-prediction-using-different-models
|
1027b53a65f4141f52a8643d54c79f40173f007c
|
23653f12f06b3b175c89de77ce0bf55b66e50fb4
|
refs/heads/master
| 2020-03-16T04:13:41.029688
| 2018-05-07T19:58:42
| 2018-05-07T19:58:42
| 132,506,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,039
|
py
|
import pandas as pd
import json
import csv
import matplotlib.pyplot as plt
import numpy as np
movies_data = pd.read_csv('tmdb_5000_credits.csv')
credits_data = pd.read_csv('tmdb_5000_movies.csv')
# Combining the movie database data into a single csv file
output = pd.merge(movies_data,credits_data)
output.to_csv('InitialDataSet.csv')
df = pd.read_csv('InitialDataSet.csv')
# Dataset with json columns
json_columns = ['cast','crew','genres','production_companies']
for column in json_columns:
df[column] = df[column].apply(json.loads)
df_1 = df[['movie_id','title','budget','runtime','popularity','revenue','vote_average','vote_count','production_companies']].reset_index(drop=True)
df_1['runtime'] = df_1['runtime'].fillna(df_1['runtime'].mean())
def flatten_names(keywords):
return '|'.join([x['name'] for x in keywords])
# Extracting all the genres of a movie
df['genres'] = df['genres'].apply(flatten_names)
liste_genres = set()
for s in df['genres'].str.split('|'):
liste_genres = set().union(s, liste_genres)
liste_genres = list(liste_genres)
liste_genres.remove('')
# Splliting the genres into separate columns
for genre in liste_genres:
df_1[genre] = df['genres'].str.contains(genre).apply(lambda x:1 if x else 0)
def retreive_data(data, positions):
result = data
try:
for id in positions:
result = result[id]
return result
except IndexError or KeyError:
return pd.np.nan
# Extracting the actors(cast) data from the json columns
df_1['Actor 1'] = df['cast'].apply(lambda x: retreive_data(x, [0, 'id']))
df_1['Actor 2'] = df['cast'].apply(lambda x: retreive_data(x, [1, 'id']))
df_1['Actor 3'] = df['cast'].apply(lambda x: retreive_data(x, [2, 'id']))
df_1['Actor 4'] = df['cast'].apply(lambda x: retreive_data(x, [3, 'id']))
# Filling the missing values in the columns
df_1['Actor 1'] = df_1['Actor 1'].fillna('0')
df_1['Actor 2'] = df_1['Actor 2'].fillna('0')
df_1['Actor 3'] = df_1['Actor 3'].fillna('0')
df_1['Actor 4'] = df_1['Actor 4'].fillna('0')
# Extracting the names of directors from the crew column and filling the missing values in the column
def director_name(crew_data):
directors = [x['id'] for x in crew_data if x['job'] == 'Director']
return retreive_data(directors, [0])
df_1['Director'] = df['crew'].apply(director_name)
df_1['Director'] = df_1['Director'].fillna('0')
# Extracting the names of director of photography from the crew column and filling the missing values in the column
def dop_name(crew_data):
dop = [x['id'] for x in crew_data if x['job'] == 'Director of Photography']
return retreive_data(dop, [0])
df_1['DOP'] = df['crew'].apply(dop_name)
df_1['DOP'] = df_1['DOP'].fillna('0')
# Extracting the name of writer from the crew column and filling the missing values in the column
def writer_name(crew_data):
writer = [x['id'] for x in crew_data if x['job'] == 'Writer']
return retreive_data(writer, [0])
df_1['Writer'] = df['crew'].apply(writer_name)
df_1['Writer'] = df_1['Writer'].fillna('0')
# Extracting the names of screenplay head from the crew column and filling the missing values in the column
def screenplay(crew_data):
screenplay = [x['id'] for x in crew_data if x['job'] == 'Screenplay']
return retreive_data(screenplay, [0])
df_1['Screenplay'] = df['crew'].apply(screenplay)
df_1['Screenplay'] = df_1['Screenplay'].fillna('0')
# Extracting the names of music composers from the crew column and filling the missing values in the column
def music_composer_name(crew_data):
music_composer = [x['id'] for x in crew_data if x['job'] == 'Original Music Composer']
return retreive_data(music_composer, [0])
df_1['Music Composer'] = df['crew'].apply(music_composer_name)
df_1['Music Composer'] = df_1['Music Composer'].fillna('0')
# Extracting the names of stuntman from the crew column and filling the missing values in the column
def stunts_name(crew_data):
stunts = [x['id'] for x in crew_data if x['job'] == 'Stunts']
return retreive_data(stunts, [0])
df_1['Stunts Director'] = df['crew'].apply(stunts_name)
df_1['Stunts Director'] = df_1['Stunts Director'].fillna('0')
# Extracting the names of producers from the crew column and filling the missing values in the column
def producer_name(crew_data):
producer = [x['id'] for x in crew_data if x['job'] == 'Producer']
return retreive_data(producer, [0])
df_1['Producer'] = df['crew'].apply(producer_name)
df_1['Producer'] = df_1['Producer'].fillna('0')
# Extracting the names of production companies from the crew column and filling the missing values in the column
def production_company_name(production_data):
pro = [x['id'] for x in production_data]
return retreive_data(pro, [0])
df_1['production_companies'] = df['production_companies'].apply(production_company_name)
df_1['production_companies'] = df_1['production_companies'].fillna('0')
# Extracting the release year and month of a movie
from datetime import datetime
dt = df['release_date']
data = pd.to_datetime(dt)
month = data.dt.month
df_1['Release_month'] = month
year = data.dt.year
df_1['Release_year'] = year
df1 = pd.DataFrame(df_1)
df1.to_csv("output.csv",sep=',',index=False)
# Retreiving the popularity information of the cast and crew data from the TMDB API
import tmdbsimple as tmdb
import json
tmdb.API_KEY = '02d4d7373cb76210bc18a4a0912c0f31'
popularity_actor1 = []
popularity_actor2 = []
popularity_actor3 = []
popularity_actor4 = []
director = []
dop = []
screenplay = []
music_composer = []
producer = []
for i in df_1['Actor 1']:
try:
movie = tmdb.People(i)
response = movie.info()
popularity_actor1.append(response['popularity'] )
except:
popularity_actor1.append('0')
df_1['Popularity_Actor 1'] = popularity_actor1
for j in df_1['Actor 2']:
try:
movie = tmdb.People(j)
response = movie.info()
popularity_actor2.append(response['popularity'])
except:
popularity_actor2.append('0')
df_1['Popularity_Actor 2'] = popularity_actor2
for k in df_1['Actor 3']:
try:
movie = tmdb.People(k)
response = movie.info()
popularity_actor3.append(response['popularity'] )
except:
popularity_actor3.append('0')
df_1['Popularity_Actor 3'] = popularity_actor3
for m in df_1['Actor 4']:
try:
movie = tmdb.People(m)
response = movie.info()
popularity_actor4.append(response['popularity'] )
except:
popularity_actor4.append('0')
df_1['Popularity_Actor 4'] = popularity_actor4
for n in df_1['Director']:
try:
movie = tmdb.People(n)
response = movie.info()
director.append(response['popularity'] )
except:
director.append('0')
df_1['Popularity_Director'] = director
for o in df_1['DOP']:
try:
movie = tmdb.People(o)
response = movie.info()
dop.append(response['popularity'] )
except:
dop.append('0')
df_1['Popularity_DOP'] = dop
for p in df_1['Screenplay']:
try:
movie = tmdb.People(p)
response = movie.info()
screenplay.append(response['popularity'] )
except:
screenplay.append('0')
df_1['Popularity_Screenplay'] = screenplay
for q in df_1['Music Composer']:
try:
movie = tmdb.People(q)
response = movie.info()
music_composer.append(response['popularity'] )
except:
music_composer.append('0')
df_1['Popularity_MusicComposer'] = music_composer
for r in df_1['Producer']:
try:
movie = tmdb.People(r)
response = movie.info()
producer.append(response['popularity'] )
except:
producer.append('0')
df_1['Popularity_Producer'] = producer
df_1.to_csv('InitialDataSet.csv',index=False)
|
[
"noreply@github.com"
] |
vijendhervijendher.noreply@github.com
|
1c98f010be779b0df3ae626d838b4e5e5e86525c
|
d24e06a9fb04ada28de067be1b6be50a7a92f294
|
/Assignment1/svm_test.py
|
c916d079fcf86ddccff119130ecb3486e4f6dee4
|
[] |
no_license
|
sentientmachine/CS7641
|
3960b3e216f1eddc9a782318a9bf3ae38fed1959
|
a9a1369acfdd3e846e311c64498a38c8afd8fcc2
|
refs/heads/master
| 2020-12-25T03:11:46.621886
| 2017-12-24T12:24:14
| 2017-12-24T12:24:14
| 51,779,034
| 0
| 0
| null | 2016-02-15T19:17:10
| 2016-02-15T19:17:10
| null |
UTF-8
|
Python
| false
| false
| 4,649
|
py
|
import io
import pydotplus
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler, OneHotEncoder, Imputer
#from sklearn.metrics import accuracy_score
from plot_curves import *
class rb_svm_test:
def __init__(self, x_train, x_test, y_train, y_test, x_col_names, data_label, cv):
self.x_train = x_train
self.x_test = x_test
self.y_train = y_train
self.y_test = y_test
self.x_col_names = x_col_names
self.data_label = data_label
self.cv = cv
def run_cv_model(self, C=1.0, degree=3, cache_size=200, do_plot=True):
# use k-fold cross validation
# we need to standardize the data for the KNN learner
pipe_clf = Pipeline([ ('scl', StandardScaler() ),
('clf', SVC(C=C, degree=degree, cache_size=cache_size))])
# resample the test data without replacement. This means that each data point is part of a test a
# training set only once. (paraphrased from Raschka p.176). In Stratified KFold, the features are
# evenly disributed such that each test and training set is an accurate representation of the whole
# this is the 0.17 version
#kfold = StratifiedKFold(y=self.y_train, n_folds=self.cv, random_state=0)
# this is the 0.18dev version
skf = StratifiedKFold(n_folds=self.cv, random_state=0)
# do the cross validation
train_scores = []
test_scores = []
#for k, (train, test) in enumerate(kfold):
for k, (train, test) in enumerate(skf.split(X=self.x_train, y=self.y_train)):
# run the learning algorithm
pipe_clf.fit(self.x_train[train], self.y_train[train])
train_score = pipe_clf.score(self.x_train[test], self.y_train[test])
train_scores.append(train_score)
test_score = pipe_clf.score(self.x_test, self.y_test)
test_scores.append(test_score)
print('Fold:', k+1, ', Training score:', train_score, ', Test score:', test_score)
train_score = np.mean(train_scores)
print('Training score is', train_score)
test_score = np.mean(test_scores)
print('Test score is', test_score)
if do_plot:
self.__plot_learning_curve(pipe_clf)
return train_score, test_score
def run_model(self, C=1.0, degree=3, cache_size=200, do_plot=True):
# we need to standardize the data for the learner
pipe_clf = Pipeline([ ('scl', StandardScaler() ),
('clf', SVC(C=C, degree=degree, cache_size=cache_size))])
# test it: this should match the non-pipelined call
pipe_clf.fit(self.x_train, self.y_train)
# check model accuracy
train_score = pipe_clf.score(self.x_train, self.y_train)
print('Training score is', train_score)
test_score = pipe_clf.score(self.x_test, self.y_test)
print('Test score is', test_score)
if do_plot:
self.__plot_learning_curve(pipe_clf)
self.__plot_decision_boundaries(pipe_clf)
return train_score, test_score
def __plot_learning_curve(self, estimator):
plc = rb_plot_curves()
plc.plot_learning_curve(estimator, self.x_train, self.y_train, self.cv, self.data_label)
def plot_validation_curve(self, C=1.0, degree=3, cache_size=200):
estimator = Pipeline([ ('scl', StandardScaler() ),
('clf', SVC(C=C, degree=degree, cache_size=cache_size))])
param_names = ['clf__C']
param_ranges = [np.arange(1.0,10.0,1.)]
data_label = self.data_label
plc = rb_plot_curves()
for i in range(len(param_names)):
param_name = param_names[i]
param_range = param_ranges[i]
plc.plot_validation_curve(estimator, self.x_train, self.y_train,
self.cv, data_label,
param_range, param_name)
def __plot_decision_boundaries(self, estimator):
plc = rb_plot_curves()
features = pd.DataFrame(self.x_train)
features.columns = self.x_col_names
plc.plot_decision_boundaries(estimator, features, self.y_train, self.data_label)
|
[
"="
] |
=
|
9c400a046050ad984f7ea8eadc6cd7593e5b051d
|
7ab43f39d04d89d7799b264bf6b2b8cdd5d12a72
|
/store/migrations/0015_order_list_total.py
|
8d50824654c493f86b3f4ae238299292cd2e8ad4
|
[] |
no_license
|
Facfac5000-git/manam_store
|
7d168b6f2ce98a9cdfa06f3419021c878954c805
|
e774c82f198f6c8c3aeb9bd71a3bbbebd42f91cb
|
refs/heads/master
| 2023-05-28T06:20:46.590075
| 2021-06-14T21:49:31
| 2021-06-14T21:49:31
| 375,697,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
# Generated by Django 3.0.8 on 2021-05-05 02:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0014_product_list_price'),
]
operations = [
migrations.AddField(
model_name='order',
name='list_total',
field=models.FloatField(default=0.0),
preserve_default=False,
),
]
|
[
"facfac4000@hotmail.com"
] |
facfac4000@hotmail.com
|
4dabb4dcf7e5b6d492023be48a616755cb5d9b08
|
3f8bb7a9adab1ad545e1d1b7a35333b64165c145
|
/all_archives/archived_py/manly_recent.py
|
e0fe6ef9ec366ce7f5117e7f9efa5e14cbfc5885
|
[] |
no_license
|
cyntiamk/weather_prediction_1
|
3168660a439e9cee1a9462b390b0f4c828b40dca
|
283025d4305f9773684efb498702a3c2cd9fb5ff
|
refs/heads/master
| 2020-04-17T01:18:13.579954
| 2019-01-27T14:52:21
| 2019-01-27T14:52:21
| 166,086,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,533
|
py
|
# Dependencies
import csv
import openweathermapy as ow
import pandas as pd
import requests
import pprint
import time
from datetime import datetime
import os
# import api_key from config file
from config import api_key
def grab_manly():
data = []
url = "http://api.openweathermap.org/data/2.5/weather?"
units = "metric"
manly = url + "appid=" + api_key + "&q=" + 'Manly'+"&units="+ units
weather_response = requests.get(manly)
data.append(weather_response.json())
date_obj = []
temp = []
max_temp = []
min_temp = []
humidity = []
pressure = []
wind_speed = []
clouds = []
description = []
for measure in data:
date_obj.append(measure['dt'])
temp.append(measure['main']['temp'])
max_temp.append(measure['main']['temp_max'])
min_temp.append(measure['main']['temp_min'])
pressure.append(measure['main']['pressure'])
humidity.append(measure['main']['humidity'])
wind_speed.append(measure['wind']['speed'])
clouds.append(measure['clouds']['all'])
description.append(measure['weather'][0]['main'])
def calculate_dp(T, H):
return T - ((100 - H) / 5)
dew_point = []
for T ,H in zip(temp, humidity):
dp = calculate_dp(T,H)
dew_point.append(dp)
max_dew = []
for T ,H in zip(max_temp, humidity):
dp = calculate_dp(T,H)
max_dew.append(dp)
min_dew = []
for T ,H in zip(min_temp, humidity):
dp = calculate_dp(T,H)
min_dew.append(dp)
date = []
for seconds in date_obj:
timestamp = datetime.utcfromtimestamp(seconds)
day = datetime.strftime(timestamp,'%Y-%m-%d %H:%M:%S')
date.append(day)
manly_weather = {
"Date": date,
"Mean_temp": temp,
"Max_temp": max_temp,
"Min_temp": min_temp,
"Mean_dwp": dew_point,
"Max_dwp": max_dew,
"Min_dwp": min_dew,
"Pressure": pressure,
"Humidity": humidity,
"Wind": wind_speed,
"Clouds": clouds,
"Description": description
}
manly_recent = pd.DataFrame(manly_weather)
# if file does not exist write header
if not os.path.isfile('manly_recent.csv'):
manly_recent.to_csv('manly_recent.csv', header='column_names')
else: # else it exists so append without writing the header
manly_recent.to_csv('manly_recent.csv', mode='a', header=False)
while(True):
grab_manly()
time.sleep(3600)
print("Retrieving data...")
|
[
"cyntiamk@gmail.com"
] |
cyntiamk@gmail.com
|
63fca56bd0c0663f2b1060f16e06b9340e3f7ee7
|
62a6299282f65919e0a0f4241882a574e8ebb26b
|
/fifty_shades/example.py
|
a7fbf2642486869aa6d7297cf726f9aa7e7831c7
|
[
"MIT"
] |
permissive
|
a-ro/fifty-shades-of-miaw
|
53603c1935842925d27a6d139394f8f29789c490
|
0af7029673619bef76c4cbfb38bf904f4f1c4dc0
|
refs/heads/master
| 2023-02-20T02:17:25.303257
| 2021-07-16T17:04:31
| 2021-07-16T17:04:31
| 229,767,682
| 4
| 1
|
MIT
| 2023-02-16T04:14:54
| 2019-12-23T14:26:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,897
|
py
|
from os.path import join
from fifty_shades.image_processing import load_image_tensor
from fifty_shades.loader import (
ImageType,
generate_project_images,
get_result_path,
get_hero_one_file_path,
get_drawing_one_file_path,
get_cat_bromance_file_path,
)
from fifty_shades.model import NeuralStyleTransfer
def show_stylized_cat_example():
cat_tensor = load_image_tensor(get_cat_bromance_file_path())
style_tensor = load_image_tensor(get_drawing_one_file_path())
model = NeuralStyleTransfer()
predicted_image = model.predict(cat_tensor, style_tensor)
predicted_image.show()
def transform_all_cat_images(style_name: str, save_directory: str) -> None:
cat_generator = generate_project_images(ImageType.CAT)
style_generator = generate_project_images(style_name)
model = NeuralStyleTransfer()
model.predict_and_save_all(cat_generator, style_generator, save_directory)
def transform_all_cat_images_with_single_style(style_image_path: str, save_directory: str) -> None:
cat_generator = generate_project_images(ImageType.CAT)
style_tensor = load_image_tensor(style_image_path)
model = NeuralStyleTransfer()
model.predict_single_style_and_save_all(cat_generator, style_tensor, save_directory)
def transform_single_cat_image_with_all_styles(cat_image_path: str, style_name: str, save_directory: str) -> None:
cat_tensor = load_image_tensor(cat_image_path)
style_generator = generate_project_images(style_name)
model = NeuralStyleTransfer()
model.predict_single_content_and_save_all(cat_tensor, style_generator, save_directory)
def transform_canvas_images(save_directory: str) -> None:
cat_generator = generate_project_images(ImageType.CANVAS_CAT)
style_generator = generate_project_images(ImageType.CANVAS_HERO)
model = NeuralStyleTransfer()
model.predict_and_save_all(cat_generator, style_generator, save_directory)
if __name__ == "__main__":
show_stylized_cat_example()
transform_all_cat_images(ImageType.HERO, save_directory=join(get_result_path(), "all_heroes"))
transform_all_cat_images(ImageType.DRAWING, save_directory=join(get_result_path(), "all_drawings"))
transform_all_cat_images_with_single_style(
get_hero_one_file_path(), save_directory=join(get_result_path(), "single_hero")
)
transform_all_cat_images_with_single_style(
get_drawing_one_file_path(), save_directory=join(get_result_path(), "single_drawing")
)
transform_single_cat_image_with_all_styles(
get_cat_bromance_file_path(), ImageType.HERO, save_directory=join(get_result_path(), "bromance_heroes")
)
transform_single_cat_image_with_all_styles(
get_cat_bromance_file_path(), ImageType.DRAWING, save_directory=join(get_result_path(), "bromance_drawings")
)
transform_canvas_images(save_directory=join(get_result_path(), "canvas"))
|
[
"amelie.rolland.1@gmail.com"
] |
amelie.rolland.1@gmail.com
|
015b25ad507b89bc4320e695f062c1754e42a687
|
c4ae14ae4cade9cececfc1a373406ca02bff00e8
|
/pysem.py
|
7053c74e100074f1924c0dbf91f9493d9dff27ae
|
[] |
no_license
|
mawilson1234/PySem
|
33405fd90487e35b869c18f6cf2da090a9651599
|
bac65041bbfb3ada774e520e410584315be35136
|
refs/heads/master
| 2022-12-05T21:57:25.913169
| 2020-08-20T02:18:51
| 2020-08-20T02:18:51
| 288,038,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,737
|
py
|
from inspect import getsource
import re
import string
# Convert indices to subscripts for printing
SUB = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
# Get a new variable name in order so we don't repeat ourselves when doing nested predicate abstractions
def var():
x = string.ascii_lowercase.index('x')
while True:
yield string.ascii_lowercase[x]
x = x + 1 if x <= 24 else 0
# Format the assignment function
def format_g(g_local, n):
#breakpoint()
g_local_set = ['/'.join([str(i), g_local(i)]) for i in c]
g_global_set = ['/'.join([str(i), g(i)]) for i in c]
if g_local_set == g_global_set:
return f'g'
else:
mods = ')('.join([mod for mod in g_local_set if not mod in g_global_set])
return f'g({mods})'
# Format the lambda functions to a string so we can print them out in a readable way
def format_den_str(f):
if not isinstance(f, str):
formatted = getsource(f).rstrip().lstrip().rstrip(',').lstrip("'denotation' : ")
if formatted.startswith('('):
formatted = formatted.lstrip('(').rstrip(')')
formatted = re.sub(r'\n', ' ', formatted)
formatted = re.sub(r'\t', '', formatted)
formatted = re.sub(r'lambda (.*?)[:,] ', r'λ\1.', formatted)
if re.findall(r"\['set'\]", formatted):
formatted = re.sub(r"\[['\"]set['\"]\]", '', formatted)
formatted = re.sub(r' if.*$', '', formatted)
else:
formatted = re.sub('if', 'iff', formatted)
formatted = re.sub('==', '=', formatted)
formatted = re.sub(r'\[(.*?)\]', r'(\g<1>)', formatted)
else:
formatted = re.sub(r'_', ' ', f)
return formatted
# Stringify the output of function application since python lambda functions aren't output as strings
def format_application(*, f, arg):
if isinstance(f, dict):
formatted = f['den_str']
else:
formatted = f
if not isinstance(arg, str):
if not isinstance(arg['denotation'], str):
arg = arg['den_str']
# For 'the'
if not 'the unique' in formatted:
if re.findall(r'^λ.*?\.', arg):
arg = re.sub(r'^λ.*?\.', '', arg)
arg = re.sub(r'\(.*?\)', '', arg)
else:
if formatted.endswith(')'):
arg_to_apply = formatted[formatted.index('('):][1:-1]
formatted = re.sub(fr'\({arg_to_apply}\)', '', formatted)
arg = format_application(f = arg, arg = arg_to_apply)
else:
arg = arg['den_str']
# Get the label for the argument
if re.match(r'^λ(.*?)\.', formatted):
# Get the variable names used by any other lambda functions
existing_vars = re.findall(r'λ(.*?)\.', formatted)
arg_label = re.match(r'^λ(.*?)\.', formatted).groups()[0]
# Strip off that label
formatted = re.sub(r'^λ.*?\.', '', formatted)
# If the argument is also the name of a variable
if (arg in existing_vars or 'the unique' in arg) and len(existing_vars) > 1:
# Get a variable name that is not already being used
variable = var()
while (var_name := next(variable)) in existing_vars:
continue
# Replace the variable name in the formatted string with the new variable name so we don't incorrectly bind it
formatted = re.sub(fr'(^|[^A-Za-z0-9]){arg}($|[^A-Za-z0-9])', fr'\g<1>{var_name}\g<2>', formatted)
if 'the unique' in arg:
the_label = re.findall(r'the unique (.*?) s\.t\.', arg)[0]
if the_label in existing_vars:
arg = re.sub(fr'(^|[^A-Za-z0-9]){the_label}($|[^A-Za-z0-9])', fr'\g<1>{var_name}\g<2>', arg)
# Format assignment functions
#if re.findall(r'g\(.*?\/.*?\)', formatted):
# breakpoint()
# g_arg = re.findall(r'g\(.*?\/.*?\)\((.*?)\)', formatted)[0]
# g_modification_index = re.findall(r'g\((.*?)\/', formatted)[0]
# if g_arg == g_modification_index:
# g_modification_label = re.findall(r'g\(.*?\/(.*?)\)', formatted)[0]
# formatted = re.sub(fr'g\({g_modification_index}\/{g_modification_label}\)\({g_arg}\)', g_modification_label, formatted)
# Replace the argument's label with its value
formatted = re.sub(fr'(^|[^A-Za-z0-9]){arg_label}($|[^A-Za-z0-9])', fr'\g<1>{arg}\g<2>', formatted)
return formatted
# Stringify the output of predicate modification since python lambda functions aren't output as strings
def format_modification(f1, f2):
f1 = f1['den_str']
f2 = f2['den_str']
# Get the label for the argument from the first function
arg_label1 = re.match(r'^λ(.*?)\.', f1).groups()[0]
# Get the label for the argument from the second function
arg_label2 = re.match(r'^λ(.*?)\.', f2).groups()[0]
# Strip off that label for the second one
formatted2 = re.sub(r'^λ.*?\.', '', f2)
# Replace the argument's label in f2 with the label from f1
formatted2 = re.sub(fr'(^|[^A-Za-z0-9]+){arg_label2}($|[^A-Za-z0-9]+)', fr'\g<1>{arg_label1}\g<2>', formatted2)
formatted = f1 + ' & ' + formatted2
return formatted
# Semantic types
e = 'e'
t = 't'
et = [e,t]
# Define a list of words
# A lexical entry has four parts:
# A PF (string corresponding to what we want to print it as)
# A semantic type, consisting of an order list of e and t
# A denotation, which is a function that takes an argument of the specified type
# A set, which defines the results of applying the function to the argument
# (The set would probably be sufficient here, but it's nice to have a function so it looks more like your traditional lambda semantics)
jumping = {'PF' : 'jumping',
'type' : et,
'denotation' : lambda x: jumping['set'][x] if x in jumping['set'].keys() else 0,
'set' : {'John' : 1, 'Mary' : 1}}
# Note that the arguments need to be specified in the same order in the function and the set
love = {'PF' : 'love',
'type' : [e, et],
'denotation' : lambda x: lambda y: love['set'][x][y] if x in love['set'].keys() and y in love['set'][x].keys() else 0,
'set' : {'Bill' : {'Mary' : 1},
'John' : {'Susan' : 1},
'the_hat' : {'Mary' : 1}}}
# This assumes recipient theme order (when using a right-branch structure)
give = {'PF' : 'give',
'type' : [e, [e, et]],
'denotation' : (lambda x: lambda y: lambda z: give['set'][x][y][z] if
x in give['set'].keys() and
y in give['set'][x].keys() and
z in give['set'][x][y].keys() else 0),
'set' : {'the_hat' : {'Bill' : {'Mary' : 1,
'Susan' : 1},
'John' : {'Bill' : 1}},
'the_dress' : {'Susan' : {'Mary' : 1}}}}
blue = {'PF' : 'blue',
'type' : et,
'denotation' : lambda x: blue['set'][x] if x in blue['set'].keys() else 0,
'set' : {'the_hat' : 1, 'the_dress' : 1}}
hat = {'PF' : 'hat',
'type' : et,
'denotation' : lambda x: hat['set'][x] if x in hat['set'].keys() else 0,
'set' : {'the_hat' : 1,
'the_second_hat' : 1}}
dress = {'PF' : 'dress',
'type' : et,
'denotation' : lambda x: dress['set'][x] if x in dress['set'].keys() else 0,
'set' : {'the_dress' : 1}}
the_hat = {'PF' : 'the hat',
'type' : e,
'denotation' : 'the_hat'}
the_dress = {'PF' : 'the dress',
'type' : e,
'denotation' : 'the_dress'}
John = {'PF' : 'John',
'type': e,
'denotation' : 'John'}
Bill = {'PF' : 'Bill',
'type' : e,
'denotation' : 'Bill'}
Susan = {'PF' : 'Susan',
'type' : e,
'denotation' : 'Susan'}
Mary = {'PF' : 'Mary',
'type' : e,
'denotation' : 'Mary'}
word_list = [jumping, love, blue, hat, dress, the_hat, Bill, Susan, Mary, John, the_dress]
IS_PRED = {'PF' : 'is',
'type' : [et, et],
'denotation' : lambda P: lambda x: P(x),
'set' : {word['denotation'] : word['set'] for word in word_list if word['type'] == et}}
IS_IDENT = {'PF' : 'is',
'type' : [e, et],
'denotation' : lambda x: lambda y: 1 if x == y else 0,
'set' : {word['denotation'] : {word['denotation'] : 1} for word in word_list if word['type'] == e}}
word_list.extend([IS_PRED, IS_IDENT])
# Shifts IS_IDENT to IS_PRED
SHIFT = {'PF' : '(SHIFT)',
'type' : [[e, et], [et, et]],
'denotation' : lambda P: lambda Q: lambda x: Q(x),
'set' : {word1['denotation'] : word2['set']
for word2 in word_list if word2['type'] == [et, et]
for word1 in word_list if word1['type'] == [e, et]}}
word_list.extend([SHIFT])
# Definite determiner (Russellian semantics rather than presupposition)
# Note that entities must be included as words in the word list for this to work properly
# Since 'the' returns an entity, we do not define a set for it, as entities do not have
# characteristic sets in our model
the = {'PF' : 'the',
'type' : [et, e],
'den_str' : 'λP.the unique x s.t. P(x)',
'denotation' : lambda P: [word['denotation'] for word in word_list if P(word['denotation']) == 1][0] if sum([P(word['denotation']) for word in word_list]) == 1 else '#'}
that_comp = {'PF' : 'that',
'type' : [t, t],
'denotation' : lambda P: P,
'set' : {0 : 0, 1 : 1}}
word_list.extend([the, that_comp])
# Logical connectives. Have to use uppercase because lowercase are reserved Python keywords
AND = {'PF' : 'and',
'type' : [t, [t, t]],
'den_str' : 'λP.λQ.P & Q',
'denotation' : lambda P: lambda Q: 1 if P == 1 and Q == 1 else 0,
'set' : {1 : {1 : 1},
1 : {0 : 0},
0 : {1 : 0},
0 : {0 : 0}}}
OR = {'PF' : 'or',
'type' : [t, [t, t]],
'den_str' : 'λP.λQ.P \\/ Q',
'denotation' : lambda P: lambda Q: 1 if P == 1 or Q == 1 else 0,
'set' : {1 : {1 : 1},
1 : {0 : 1},
0 : {1 : 1},
0 : {0 : 0}}}
NOT = {'PF' : 'not',
'type' : [t, t],
'den_str' : 'λP.¬(P)',
'denotation' : lambda P: 0 if P == 1 else 1,
'set' : {1 : 0,
0 : 1}}
word_list.extend([AND, OR, NOT])
# Context for pronoun resolution
c = {1 : John['denotation'], 2: Mary['denotation'], 3: Bill['denotation']}
# Get a modified version of the assignment just like the one passed to it, except that it respects the mapping specified in the mod argument
def g_mod(g, mod):
# Get the index from the string
index = int(re.findall('^[0-9]*', mod)[0])
# Get the new output for that index
modified_output = re.findall('/(.*)$', mod)[0]
c_local = c.copy()
c_local.update({index : modified_output})
return lambda n: g(n) if n != index else c_local[n]
# Assignment function that maps an index to an entity in the context
def g(n):
try:
if n in c.keys():
return c[n]
else:
raise Exception
except:
print(f'{n} not in domain of assignment function g.')
pronouns = []
# Pronouns and traces are functions that return lexical entries given an index
def he(i):
he_i = {'PF' : f'he{i}'.translate(SUB),
'index' : i,
'type' : e,
'denotation' : f'g({i})'}
if not he_i in word_list:
word_list.extend([he_i])
if not he_i in pronouns:
pronouns.extend([he_i])
return he_i
def t(i):
t_i = {'PF' : f't{i}'.translate(SUB),
'index' : i,
'type' : e,
'denotation' : f'g({i})'}
if not t_i in word_list:
word_list.extend([t_i])
if not t_i in pronouns:
pronouns.extend([t_i])
return t_i
# One final thing each word has: a version of its denotation function formatted as a string
# This is just so we can print out the results of each semantic composition step in a readable way, since Python lambda functions are not output as strings
for word in word_list:
if not 'den_str' in word.keys():
word.update({'den_str' : format_den_str(word['denotation'])})
def function_application(*, f, arg):
# Return the result of function application
# PF is just concatenation of the strings
PF = f'{f["PF"]} {arg["PF"]}'.rstrip()
# Den_str is handled by the formatting function above
den_str = format_application(f = f, arg = arg)
# The type is the result of getting rid of the first type in f
ty = f['type'][1:][0]
# The denotation is the result of applying the function's denotation to the argument's denotation
presupposition_failure = arg['denotation'] == '#' or f['denotation'] == '#'
if not presupposition_failure:
denotation = f['denotation'](arg['denotation'])
else:
denotation = '#'
presupposition_failure = denotation == '#' or presupposition_failure
if presupposition_failure:
den_str = '#'
denotation = '#'
# Some special logic for the identity function, since its characteristic set is not a function of the word list but a function of any derivable et function
#if f['denotation'](arg['denotation']) == arg['denotation']:
# return {'PF' : f'{f["PF"]} {arg["PF"]}'.rstrip(),
# 'den_str' : arg['den_str'],
# 'type' : arg['type'],
# 'denotation' : arg['denotation'],
# 'set' : arg['set']}
if 'set' in f.keys():
if f['set'] == 0:
s = 0
else:
s = f['set'][arg['denotation']] if arg['denotation'] in f['set'].keys() else 0
if 's' in locals():
return {'PF' : PF, 'den_str': den_str, 'type' : ty, 'denotation' : denotation, 'set' : s}
else:
return {'PF' : PF, 'den_str': den_str, 'type' : ty, 'denotation' : denotation}
#'set' : {t[1:][0] for t in Y['set'] if X['denotation'] == t[0] and len(t) > 0}}
#'set' : {t[1:] for t in Y['set'] if X['denotation'] == t[0] and len(t) > 0}}
def predicate_modification(*, f1, f2):
# Return the result of predicate modification
# PF is contactenation of the strings
PF = f'{f1["PF"]} {f2["PF"]}'
# Den_str is handled by the formatting function above
den_str = format_modification(f1, f2)
# Since this is only called when f1 and f2 have the same type, the type is equal to their type (either f1['type'] or f2['type'] would work, since the types are identical)
ty = f1['type']
# The denotation is True iff f1(x) and f2(x)
# The set is the set of all items in both f1 and f2 (e.g., every item in f1 that is also in f2)
presupposition_failure = f1['denotation'] == '#' or f2['denotation'] == '#'
if not presupposition_failure:
return {'PF' : PF, 'den_str' : den_str, 'type' : ty,
'denotation' : lambda x: 1 if f1['denotation'](x) and f2['denotation'](x) else 0,
'set' : [item for item in f1['set'] if item in f2['set']]}
else:
return {'PF' : PF, 'den_str' : '#', 'type' : ty, 'denotation' : '#'}
def predicate_abstraction(*, index, pred, g_local, verbose = False):
# Predicate abstraction
# PF-ified semantics is the index + the PF of the predicate
# Den_str is the abstracted version of the predicate, with the value given by the usual assignment function replaced by the modified assignment function applied to the argument
# Type is [e, pred['type']]
# The denotation is the recursive interpretation of the structure where index is mapped to x
# The set is the mapping of a word's denotation to true if it's type e and if it's in the set of the interpretation of the predicate wrt the modified assignment function
# We do this so that we only print out the results of interpreting things once
# Get the next label for a variable so we don't repeat ourselves
x = next(v)
if verbose:
interpret_sentence_r(pred, g_local = g_mod(g_local, f'{index}/{x}'), verbose = verbose)
#print(interpret_sentence_r(pred, g_local = g_local)['den_str'])
#if index == 1:
return {'PF' : f'{index} ' + re.sub(f'^{index} ', '', interpret_sentence_r(pred, g_local = g_local)['PF']),
'den_str' : f'λ{x}.' + re.sub(g_local(index), (g_mod(g_local, f"{index}/{x}"))(index), interpret_sentence_r(pred, g_local = g_local)['den_str']),
'type' : [e, interpret_sentence_r(pred, g_local = g_local)['type']],
'denotation' : lambda x: interpret_sentence_r(pred, g_local = g_mod(g_local, f'{index}/{x}'))['denotation'],
'set' : {word['denotation'] : 1
for word in [word for word in word_list if word['type'] == e]
if (interpret_sentence_r(pred, g_local = g_mod(g_local, f'{index}/{word["denotation"]}')))['set'] == 1}}
# Interpretation function
def i(X, Y = '', /, *, g_local, verbose = False):
# Set up local copies of the variables so we don't override the global ones.
# We define these names first in case they are ints, in which case copying wouldn't work
X_local = X
Y_local = Y
# If X is a pronoun, update its denotation and den_str relative to any modified assignment function
if X in pronouns:
# Make local copies so we don't mess with the global ones
X_local = X.copy()
X_local.update({'denotation' : re.sub('g', 'g_local', X_local['denotation'])})
if verbose:
print(f"{X_local['PF']} = {format_g(g_local, X_local['index'])}({X_local['index']}) = {eval(X_local['denotation'])}")
X_local['denotation'] = eval(X_local['denotation'])
X_local.update({'den_str' : format_den_str(X_local['denotation'])})
# If there are two arguments, figure out what semantic composition rule to apply
if Y:
# If Y is a pronoun, update its denotation and den_str relative to any modified assignment function
if Y in pronouns:
Y_local = Y.copy()
Y_local.update({'denotation' : re.sub('g', 'g_local', Y_local['denotation'])})
if verbose:
print(f"{Y_local['PF']} = {format_g(g_local, Y_local['index'])}({Y_local['index']}) = {eval(Y_local['denotation'])}")
Y_local['denotation'] = eval(Y_local['denotation'])
Y_local.update({'den_str' : format_den_str(Y_local['denotation'])})
# Predicate abstraction when X or Y is an index
if isinstance(X_local, int):
interpretation = predicate_abstraction(index = X_local, pred = Y_local, g_local = g_local, verbose = verbose)
if verbose:
print(f"[[{X_local} {interpret_sentence_r(Y_local, g_local = g_local)['PF']}]] = {interpretation['den_str']} by PA")
return interpretation
elif isinstance(Y_local, int):
interpretation = predicate_abstraction(index = Y_local, pred = X_local, g_local = g_local, verbose = verbose)
if verbose:
print(f"[[{Y_local} {interpret_sentence_r(X_local, g_local = g_local)['PF']}]] = {interpretation['den_str']} by PA")
return interpretation
# Function application when either X_local or Y_local is in the domain of the other
elif Y_local['type'] == X_local['type'][0]:
if verbose:
print(f"[{X_local['den_str']}]({Y_local['den_str']}) = {function_application(f = X_local, arg = Y_local)['den_str']} by FA([[{X_local['PF']}]], [[{Y_local['PF']}]])")
return function_application(f = X_local, arg = Y_local)
elif X_local['type'] == Y_local['type'][0]:
if verbose:
print(f"[{Y_local['den_str']}]({X_local['den_str']}) = {function_application(f = Y_local, arg = X_local)['den_str']} by FA([[{Y_local['PF']}]], [[{X_local['PF']}]])")
return function_application(f = Y_local, arg = X_local)
# Predicate modification when X_local and Y_local have the same domain of application
elif X_local['type'] == Y_local['type']:
if verbose:
print(f"PM({X_local['den_str']}, {Y_local['den_str']}) = {predicate_modification(f1 = X_local, f2 = Y_local)['den_str']} by PM([[{X_local['PF']}]], [[{Y_local['PF']}]])")
return predicate_modification(f1 = X_local, f2 = Y_local)
else:
print(f'Type mismatch: type {X_local["type"]} cannot compose with type {Y_local["type"]}.')
# Otherwise, return the single argument
else:
# If X is a pronoun, update its denotation and den_str relative to any modified assignment function
if X in pronouns:
# Make local copies so we don't mess with the global ones
X_local = X.copy()
X_local.update({'denotation' : re.sub('g', 'g_local', X_local['denotation'])})
if verbose:
print(f"{X_local['PF']} = {re.sub('_local', '', X_local['denotation'])} = {eval(X_local['denotation'])}")
X_local['denotation'] = eval(X_local['denotation'])
X_local.update({'den_str' : format_den_str(X_local['denotation'])})
return X_local
# Interpret a sentence helper (binary branching only!)
def interpret_sentence_r(sentence, /, *, g_local, verbose = False):
#try:
if len(sentence) > 2:
raise Exception
if len(sentence) == 2 and not isinstance(sentence, dict):
branch1 = sentence[0]
branch2 = sentence[1]
if not isinstance(branch1, dict):
if isinstance(branch1, int):
return i(branch1, branch2, g_local = g_local, verbose = verbose)
else:
branch1 = interpret_sentence_r(branch1, g_local = g_local, verbose = verbose)
if not isinstance(branch2, dict):
if isinstance(branch2, int):
return i(branch1, branch2, verbose = verbose)
else:
branch2 = interpret_sentence_r(branch2, g_local = g_local, verbose = verbose)
return i(branch1, branch2, g_local = g_local, verbose = verbose)
elif isinstance(sentence, dict):
return i(sentence, g_local = g_local, verbose = verbose)
#except:
# print(f'Error: only binary branching! {sentence} has too many branches!')
# Interpret a sentence (allows for printing the full sentence only once)
def interpret_sentence(sentence, /, *, g_local = g, verbose = True):
# Reinitialize the lambda variable name generator function
global v
v = var()
if verbose:
print(f'\nInterpretation of sentence "{sentence["PF"]}":')
interpretation = interpret_sentence_r(sentence['LF'], g_local = g_local, verbose = verbose)
if verbose:
print(f'{interpretation["denotation"]}\n')
return interpretation
# Some test sentences
# Type shifter and predication
sentence1 = {'PF' : "The hat is blue", 'LF' : [the_hat, [[IS_IDENT, SHIFT], blue]]}
# Identity
sentence2 = {'PF' : 'The hat is the dress', 'LF' : [the_hat, [IS_IDENT, the_dress]]}
# Pronoun
sentence3 = {'PF' : 'He1 is jumping'.translate(SUB), 'LF' : [he(1), [IS_PRED, jumping]]}
# Topicalization
sentence4 = {'PF' : 'Bill, Mary loves', 'LF' : [Bill, [1, [Mary, [love, t(1)]]]]}
sentence5 = {'PF' : 'John, Mary loves', 'LF' : [John, [1, [Mary, [love, t(1)]]]]}
# This is not a good English sentence because English doesn't allow multiple topicalization, but it shows that nested PA works correctly
sentence6 = {'PF' : 'Mary1, Bill2, t1 loves t2', 'LF' : [Mary, [1, [Bill, [2, [t(1), [love, t(2)]]]]]]}
# Relative clauses
sentence7 = {'PF' : 'the hat that Mary loves', 'LF' : [the, [hat, [1, [that_comp, [Mary, [love, t(1)]]]]]]}
sentence8 = {'PF' : 'the dress that Mary loves', 'LF' : [the, [dress, [1, [that_comp, [Mary, [love, t(1)]]]]]]}
# Full sentences with relative clauses
sentence9 = {'PF' : 'the hat that Mary loves is blue', 'LF' : [[the, [hat, [1, [that_comp, [Mary, [love, t(1)]]]]]], [[IS_IDENT, SHIFT], blue]]}
sentence10 = {'PF' : 'Mary loves the hat that is blue', 'LF' : [Mary, [love, [the, [hat, [1, [that_comp, [t(1), [[IS_IDENT, SHIFT], blue]]]]]]]]}
# Logical connectives
sentence11 = {'PF' : 'Mary is jumping or Bill is jumping' , 'LF' : [[Mary, [[IS_IDENT, SHIFT], jumping]], [OR, [Bill, [[IS_IDENT, SHIFT], jumping]]]]}
sentence12 = {'PF' : 'Mary loves Bill and loves the blue hat', 'LF' : [Mary, [1, [[t(1), [love, Bill]], [AND, [t(1), [love, [the, [blue, hat]]]]]]]]}
sentence13 = {'PF' : "Mary doesn't love John", 'LF' : [NOT, [Mary, [love, John]]]}
# I'm not sure I'm happy with exactly how the output for predicate abstraction is displayed---but it gets the correct results. The issue is that it won't display nested modifications to the assignment function correctly because of how getting the strings for those works. But the interpretations are correct.
|
[
"mawilson@linguist.umass.edu"
] |
mawilson@linguist.umass.edu
|
0f829d8e2b610f7cefeced061d569b1d0b6f399d
|
4a42d03ad01818b6cc04444057dd66300a725d0a
|
/medium/DetectingCycles6.py
|
fd3ef98d11805a968634bdc9da8c1317c56416aa
|
[] |
no_license
|
giy/code-eval
|
1a97b6e0a19c96cd45b7da848ab47ea7eb9a444c
|
748b1e59bb9223ebe44b40b89a960d0b213a93a4
|
refs/heads/master
| 2016-09-05T09:58:32.898874
| 2015-02-28T05:41:00
| 2015-02-28T05:41:00
| 31,239,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 789
|
py
|
import sys
def detectCycle(nos, i):
cycleBeginsAt = i
tortoise = 0
hare = cycleBeginsAt
for i in range(len(nos)):
if nos[hare] == nos[tortoise]:
break
tortoise += 1
hare = (hare + 1)%(len(nos))
cycleBeginsAt = tortoise
result = []
for i in range(tortoise, len(nos)):
if nos[i] not in result:
result.append(nos[i])
else:
break
return result
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
nos = [int(i) for i in test.strip().split()]
if len(nos) == 1:
print nos[0]
continue
for i in range(1, len(nos)):
tortoise = nos[i]
hare = nos[(2*i)%len(nos)]
if hare == tortoise:
print ' '.join([str(x) for x in detectCycle(nos, i)])
break
test_cases.close()
|
[
"gautamiyer@GAUTAMs-MacBook-Air.local"
] |
gautamiyer@GAUTAMs-MacBook-Air.local
|
93117ac33ad6602c755054bba6d85d4308a19d77
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/L7NK7McEQ9yEpTXRE_16.py
|
d1817c7d7db4293f1356e203513e2932567c3783
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 68
|
py
|
def XOR(a, b):
a = a ^ b
b = a ^ b
a = a ^ b
return [a,b]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
dde1ad2c4ae0622c1f48801305dc2b8ddcce57a5
|
a3f062eb210f05e22f90b5eecfda6195a8ba0744
|
/LaneDetect.py
|
0d0937bc0b216fe789900bf28359d9441c280576
|
[] |
no_license
|
Menglingyu2333/RaspberryDesktopFile
|
30b8df562b6e7082700f491a9baaf7f4cefe607e
|
687f7a700014cf7c9d338d2200318d0ea578a4f7
|
refs/heads/master
| 2023-01-21T07:26:51.842296
| 2020-12-07T11:36:59
| 2020-12-07T11:36:59
| 319,299,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,816
|
py
|
import cv2
import numpy as np
# 使用霍夫直线变换做直线检测,前提条件:边缘检测已经完成
__author__ = "boboa"
# 标准霍夫线变换
def line_detection_demo(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=1)
lines = cv2.HoughLines(edges, 1, np.pi/180, 200) # 函数将通过步长为1的半径和步长为π/180的角来搜索所有可能的直线
for line in lines:
rho, theta = line[0] # line[0]存储的是点到直线的极径和极角,其中极角是弧度表示的
a = np.cos(theta) # theta是弧度
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b)) # 直线起点横坐标
y1 = int(y0 + 1000 * (a)) # 直线起点纵坐标
x2 = int(x0 - 1000 * (-b)) # 直线终点横坐标
y2 = int(y0 - 1000 * (a)) # 直线终点纵坐标
cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imshow("image_lines", image)
# 统计概率霍夫线变换
def line_detect_possible_demo(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
# 函数将通过步长为1的半径和步长为π/180的角来搜索所有可能的直线
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, minLineLength=50, maxLineGap=10)
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imshow("line_detect_possible_demo", image)
if __name__ == "__main__":
img = cv2.imread("/home/pi/Pictures/road/test2.jpg")
cv2.namedWindow("input image", cv2.WINDOW_AUTOSIZE)
cv2.imshow("input image", img)
line_detect_possible_demo(img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"1171766563@qq.com"
] |
1171766563@qq.com
|
78736e48e0a8d4cc7189fbf0b4c5142cc83a935d
|
5349b773a61d681ee7903391e22dae1b1c9f1ac4
|
/___sb3analyzer_bak20200401/sb3analyzer/sb3analyzer.py
|
9bb0be82d31fca4fda7c32905ef42310afde57c8
|
[
"MIT"
] |
permissive
|
gnoeykeCG4001/sb3analyzer_references
|
05acafe085c5e8083eacf98c7f67a08faada50ea
|
078740a86cc8c11d0731f7e1138cdbd856c778b1
|
refs/heads/master
| 2023-02-04T14:33:34.027908
| 2020-04-03T07:26:01
| 2020-04-03T07:26:01
| 252,245,854
| 0
| 0
| null | 2023-01-25T14:22:36
| 2020-04-01T17:43:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 939
|
py
|
# Comments regarding module
#
#
## Imports
#from sb3objects import sb3block_inputNfield
import sys
sys.path.append('/__SB3Analyzer/sb3objects')
from sb3objects import sb3project
## Globals
## Define
## Helper functions
## Class declaration
class SB3Analyzer:
# Constructor
# score variables
deadCode_BlockList = []
def __init__(self, Proj):
self.proj = Proj
# Class Methods
def get_self(self):
return self
def getDeadCode_BlockList(self):
self.mark_print()
tList = self.proj.getTargetList()
for t in tList:
for indivBlock in t.get_blockList():
if not indivBlock.isReachable():
self.deadCode_BlockList.append([t.get_name(),indivBlock])
print("\n\n\ndead blocks = " + str(len(self.deadCode_BlockList)))
def mark_print(self):
self.proj.printProj()
|
[
"gnoeyke.code@gmail.com"
] |
gnoeyke.code@gmail.com
|
8db36025f323262af3ea01791f185c0f0290c259
|
46ef284df436bc84cbd62b45dcc909a955e10934
|
/turtle02.py
|
46c978aaba1f6e85a718d4fdf5b9a06b16cc419f
|
[] |
no_license
|
zmscgck/pythonexercise
|
188e2d3175a137ff3eb79fc2d412e932879e794c
|
eca7575b62d65cef6d580ea4261dc3e08f5389ff
|
refs/heads/master
| 2020-08-13T10:13:43.007857
| 2019-10-14T04:58:10
| 2019-10-14T04:58:10
| 214,952,586
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 15 19:42:05 2019
@author: LZM
"""
import turtle
hg=turtle.Turtle()
def square(t,length):
for i in range(4):
t.fd(length)
t.lt(90)
print('请输入边的长度:')
length=input()
length=float(length)
square(hg,length)
|
[
"lgfl@sina.com"
] |
lgfl@sina.com
|
d02ea8b20701f5938a589e9eadbb08bbac40c1a4
|
9f0c27df4ab43ad6f3fca2fec4850b9a7c043241
|
/Scripts/utils.py
|
2e791efa4f4266eb72d6b879d782d363042fc134
|
[] |
no_license
|
ICESAT-2HackWeek/Floes-are-Swell
|
e2911414b35f9dda8fe94ea9a3e5d26aee1fe039
|
a43047de450912a2656bd05881726d83b1542cfc
|
refs/heads/master
| 2020-06-06T00:00:24.386553
| 2020-05-30T20:29:31
| 2020-05-30T20:29:31
| 192,580,732
| 1
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,235
|
py
|
#Import necesary modules
#Use shorter names (np, pd, plt) instead of full (numpy, pandas, matplotlib.pylot) for convenience
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import pandas as pd
import h5py
import xarray as xr
import numpy as np
import pdb
import numpy.ma as ma
def getSnowandConverttoThickness(dF, snowDepthVar='snowDepth',
snowDensityVar='snowDensity',
outVar='iceThickness'):
""" Grid using nearest neighbour the NESOSIM snow depths to the
high-res ICESat-1 freeboard locations
"""
# Convert freeboard to thickness
# Need to copy arrays or it will overwrite the pandas column!
freeboardT=np.copy(dF['freeboard'].values)
snowDepthT=np.copy(dF[snowDepthVar].values)
snowDensityT=np.copy(dF[snowDensityVar].values)
ice_thickness = freeboard_to_thickness(freeboardT, snowDepthT, snowDensityT)
#print(ice_thickness)
dF[outVar] = pd.Series(np.array(ice_thickness), index=dF.index)
return dF
def freeboard_to_thickness(freeboardT, snow_depthT, snow_densityT):
"""
Hydrostatic equilibrium equation to calculate sea ice thickness
from freeboard and snow depth/density data
Args:
freeboardT (var): ice freeboard
snow_depthT (var): snow depth
snow_densityT (var): final snow density
Returns:
ice_thicknessT (var): ice thickness dereived using hydrostatic equilibrium
"""
# Define density values
rho_w=1024.
rho_i=925.
#rho_s=300.
# set snow to freeboard where it's bigger than freeboard.
snow_depthT[snow_depthT>freeboardT]=freeboardT[snow_depthT>freeboardT]
ice_thicknessT = (rho_w/(rho_w-rho_i))*freeboardT - ((rho_w-snow_densityT)/(rho_w-rho_i))*snow_depthT
return ice_thicknessT
def getWarrenData(dF, outSnowVar, outDensityVar='None'):
"""
Assign Warren1999 snow dept/density climatology to dataframe
Added
Args:
dF (data frame): Pandas dataframe
outSnowVar (string): name of Warren snow depth variable
outDensityVar (string): name of Warren snow density variable
Returns:
dF (data frame): Pandas dataframe updated to include colocated Warren snow depth and density
"""
# Generate empty lists
snowDepthW99s=ma.masked_all(np.size(dF['freeboard'].values))
if (outDensityVar!='None'):
snowDensityW99s=ma.masked_all(np.size(dF['freeboard'].values))
# Loop over all freeboard values (rows)
for x in range(np.size(dF['freeboard'].values)):
#print(x, dF['lon'].iloc[x], dF['lat'].iloc[x], dF['month'].iloc[x]-1)
# SUbtract 1 from month as warren index in fucntion starts at 0
snowDepthDayW99T, snowDensityW99T=WarrenClimatology(dF['lon'].iloc[x], dF['lat'].iloc[x], dF['month'].iloc[x]-1)
# Append values to list
snowDepthW99s[x]=snowDepthDayW99T
if (outDensityVar!='None'):
snowDensityW99s[x]=snowDensityW99T
# Assign list to dataframe as a series
dF[outSnowVar] = pd.Series(snowDepthW99s, index=dF.index)
if (outDensityVar!='None'):
dF[outDensityVar] = pd.Series(snowDensityW99s, index=dF.index)
return dF
def WarrenClimatology(lonT, latT, monthT):
"""
Get Warren1999 snow depth climatology
Args:
lonT (var): longitude
latT (var): latitude
monthT (var): month with the index starting at 0
Returns:
Hs (var): Snow depth (m)
rho_s (var): Snow density (kg/m^3)
"""
H_0 = [28.01, 30.28, 33.89, 36.8, 36.93, 36.59, 11.02, 4.64, 15.81, 22.66, 25.57, 26.67]
a = [.127, .1056, .5486, .4046, .0214, .7021, .3008, .31, .2119, .3594, .1496, -0.1876]
b = [-1.1833, -0.5908, -0.1996, -0.4005, -1.1795, -1.4819, -1.2591, -0.635, -1.0292, -1.3483, -1.4643, -1.4229]
c = [-0.1164, -0.0263, 0.0280, 0.0256, -0.1076, -0.1195, -0.0811, -0.0655, -0.0868, -0.1063, -0.1409, -0.1413]
d = [-0.0051, -0.0049, 0.0216, 0.0024, -0.0244, -0.0009, -0.0043, 0.0059, -0.0177, 0.0051, -0.0079, -0.0316]
e = [0.0243, 0.0044, -0.0176, -0.0641, -0.0142, -0.0603, -0.0959, -0.0005, -0.0723, -0.0577, -0.0258, -0.0029]
# Convert lat and lon into degrees of arc, +x axis along 0 degrees longitude and +y axis along 90E longitude
x = (90.0 - latT)*np.cos(lonT * np.pi/180.0)
y = (90.0 - latT)*np.sin(lonT*np.pi/180.0)
Hs = H_0[monthT] + a[monthT]*x + b[monthT]*y + c[monthT]*x*y + (d[monthT]*x*x) + (e[monthT]*y*y)
# Now get SWE, although this is not returned by the function
H_0swe = [8.37, 9.43,10.74,11.67,11.8,12.48,4.01,1.08,3.84,6.24,7.54,8.0]
aswe = [-0.027,0.0058,0.1618,0.0841,-0.0043,0.2084,0.097,0.0712,0.0393,0.1158,0.0567,-0.054]
bswe = [-0.34,-0.1309,0.0276,-0.1328,-0.4284,-0.5739,-0.493,-0.145,-0.2107,-0.2803,-0.3201,-0.365]
cswe = [-0.0319,0.0017,0.0213,0.0081,-0.038,-0.0468,-0.0333,-0.0155,-0.0182,-0.0215,-0.0284,-0.0362]
dswe = [-0.0056,-0.0021,0.0076,-0.0003,-0.0071,-0.0023,-0.0026,0.0014,-0.0053,0.0015,-0.0032,-0.0112]
eswe = [-0.0005,-0.0072,-0.0125,-0.0301,-0.0063,-0.0253,-0.0343,0,-0.019,-0.0176,-0.0129,-0.0035]
swe = H_0swe[monthT] + aswe[monthT]*x + bswe[monthT]*y + cswe[monthT]*x*y + dswe[monthT]*x*x + eswe[monthT]*y*y
# Density in kg/m^3
rho_s = 1000.*(swe/Hs)
#print(ma.mean(rho_s))
# Could mask out bad regions (i.e. land) here if desired.
# Hsw[where(region_maskG<9.6)]=np.nan
# Hsw[where(region_maskG==14)]=np.nan
# Hsw[where(region_maskG>15.5)]=np.nan
# Could mask out bad regions (i.e. land) here if desired.
#rho_s[where(region_maskG<9.6)]=np.nan
#rho_s[where(region_maskG==14)]=np.nan
#rho_s[where(region_maskG>15.5)]=np.nan
# Convert snow depth to meters
Hs=Hs/100.
return Hs, rho_s
def get_psnlatslons(data_path, res=25):
# Get NSIDC polar stereographic grid data
if (res==25):
# 25 km grid
mask_latf = open(data_path+'/psn25lats_v3.dat', 'rb')
mask_lonf = open(data_path+'/psn25lons_v3.dat', 'rb')
lats_mask = reshape(fromfile(file=mask_latf, dtype='<i4')/100000., [448, 304])
lons_mask = reshape(fromfile(file=mask_lonf, dtype='<i4')/100000., [448, 304])
elif (res==12):
# 12.5 km grid
mask_latf = open(data_path+'/psn12lats_v3.dat', 'rb')
mask_lonf = open(data_path+'/psn12lons_v3.dat', 'rb')
lats_mask = reshape(fromfile(file=mask_latf, dtype='<i4')/100000., [896, 608])
lons_mask = reshape(fromfile(file=mask_lonf, dtype='<i4')/100000., [896, 608])
elif (res==6):
# 12.5 km grid
mask_latf = open(data_path+'/psn06lats_v3.dat', 'rb')
mask_lonf = open(data_path+'/psn06lons_v3.dat', 'rb')
lats_mask = reshape(fromfile(file=mask_latf, dtype='<i4')/100000., [1792, 1216])
lons_mask = reshape(fromfile(file=mask_lonf, dtype='<i4')/100000., [1792, 1216])
return lats_mask, lons_mask
def getNESOSIM(fileSnowT, dateStrT):
""" Grab the NESOSIM data and pick the day from a given date string.
Uses the xarray package (files were generated using xarray so works nicely)..
Returns an xarray Dataset
"""
dN = xr.open_dataset(fileSnowT)
# Get NESOSIM snow depth and density data for that date
dNday = dN.sel(day=int(dateStrT))
# Provide additional mask variable
mask = np.ones((dNday['longitude'].values.shape)).astype('int')
mask[np.where((dNday['snowDepth']>0.02)&(dNday['snowDepth']<1)&(dNday['iceConc']>0.15)&np.isfinite(dNday['density']))]=0
dNday['mask'] = (('x', 'y'), mask)
return dNday
def assignRegionMask(dF, mapProj, ancDataPath='../Data/'):
"""
Grab the NSIDC region mask and add to dataframe as a new column
# 1 non-region oceans
# 2 Sea of Okhotsk and Japan
# 3 Bering Sea
# 4 Hudson Bay
# 5 Gulf of St. Lawrence
# 6 Baffin Bay/Davis Strait/Labrador Sea
# 7 Greenland Sea
# 8 Barents Seas
# 9 Kara
# 10 Laptev
# 11 E. Siberian
# 12 Chukchi
# 13 Beaufort
# 14 Canadian Archipelago
# 15 Arctic Ocean
# 20 Land
# 21 Coast
Args:
dF (data frame): original data frame
mapProj (basemap instance): basemap map projection
Returns:
dF (data frame): data frame including ice type column (1 = multiyear ice, 0 = everything else)
"""
region_mask, xptsI, yptsI = get_region_mask_sect(ancDataPath, mapProj, xypts_return=1)
xptsI=xptsI.flatten()
yptsI=yptsI.flatten()
region_mask=region_mask.flatten()
#iceTypeGs=[]
regionFlags=ma.masked_all((size(dF['freeboard'].values)))
for x in range(size(dF['freeboard'].values)):
# Find nearest ice type
dist=sqrt((xptsI-dF['xpts'].iloc[x])**2+(yptsI-dF['ypts'].iloc[x])**2)
index_min = np.argmin(dist)
regionFlags[x]=int(region_mask[index_min])
# This is what I sometimes do but it appears slower in this case..
# I checked and they gave the same answers
# iceTypeG2 = griddata((xpts_type, ypts_type), ice_typeT2, (dF['xpts'].iloc[x], dF['ypts'].iloc[x]), method='nearest')
# print(iceTypeG)
# iceTypeGs.append(iceTypeG)
dF['region_flag'] = pd.Series(regionFlags, index=dF.index)
return dF
def get_region_mask_sect(datapath, mplot, xypts_return=0):
""" Get NSIDC section mask data """
datatype='uint8'
file_mask = datapath+'/sect_fixed_n.msk'
# 1 non-region oceans
# 2 Sea of Okhotsk and Japan
# 3 Bering Sea
# 4 Hudson Bay
# 5 Gulf of St. Lawrence
# 6 Baffin Bay/Davis Strait/Labrador Sea
# 7 Greenland Sea
# 8 Barents Seas
# 9 Kara
# 10 Laptev
# 11 E. Siberian
# 12 Chukchi
# 13 Beaufort
# 14 Canadian Archipelago
# 15 Arctic Ocean
# 20 Land
# 21 Coast
fd = open(file_mask, 'rb')
region_mask = fromfile(file=fd, dtype=datatype)
region_mask = reshape(region_mask, [448, 304])
#xpts, ypts = mplot(lons_mask, lats_mask)
if (xypts_return==1):
mask_latf = open(datapath+'/psn25lats_v3.dat', 'rb')
mask_lonf = open(datapath+'/psn25lons_v3.dat', 'rb')
lats_mask = reshape(fromfile(file=mask_latf, dtype='<i4')/100000., [448, 304])
lons_mask = reshape(fromfile(file=mask_lonf, dtype='<i4')/100000., [448, 304])
xpts, ypts = mplot(lons_mask, lats_mask)
return region_mask, xpts, ypts
else:
return region_mask
def getProcessedATL10ShotdataNCDF(dataPathT, yearStr='2018', monStr='*', dayStr='*',
fNum=-1, beamStr='gt1r', vars=[], smoothingWindow=0):
"""
Load ICESat-2 thickness data produced from the raw ATL10 segment data
By Alek Petty (June 2019)
"""
print(dataPathT+'IS2ATL10*'+yearStr+monStr+dayStr+'*'+'_'+beamStr+'.nc')
files=glob(dataPathT+'IS2ATL10*'+yearStr+monStr+dayStr+'*'+'_'+beamStr+'.nc')
print('Number of files:', size(files))
#testFile = Dataset(files[0])
#print(testFile.variables.keys())
if (fNum>-0.5):
if (size(vars)>0):
IS2dataAll= xr.open_dataset(files[fNum], engine='h5netcdf', data_vars=vars)
else:
IS2dataAll= xr.open_dataset(files[fNum], engine='h5netcdf')
else:
# apparently autoclose assumed so no longer need to include the True flag
if (size(vars)>0):
IS2dataAll= xr.open_mfdataset(dataPathT+'/IS2ATL10*'+yearStr+monStr+dayStr+'*'+'_'+beamStr+'.nc', engine='h5netcdf', data_vars=vars, parallel=True)
else:
IS2dataAll= xr.open_mfdataset(dataPathT+'/IS2ATL10*'+yearStr+monStr+dayStr+'*'+'_'+beamStr+'.nc', engine='h5netcdf', parallel=True)
#IS2dataAll = pd.read_pickle(files[0])
print(IS2dataAll.info)
#IS2dataAll=IS2dataAll[vars]
#print(IS2dataAll.info)
if (smoothingWindow>0):
# If we want to smooth the datasets
seg_length=IS2dataAll['seg_length']
seg_weightedvarR=seg_length.rolling(index=smoothingWindow, center=True).mean()
seg_weightedvar=seg_weightedvarR[int(smoothingWindow/2):-int(smoothingWindow/2):smoothingWindow]
# print(seg_weightedvar)
seg_weightedvars=[]
ds = seg_weightedvar.to_dataset(name = 'seg_length')
#seg_weightedvars.append(seg_weightedvar)
# Skip the first one as that's always (should be) the seg_length
for var in vars[1:]:
print('Coarsening'+var+'...')
varIS2=IS2dataAll[var]
seg_weightedvarR=varIS2*seg_length.rolling(index=smoothingWindow, center=True).sum()/seg_length.rolling(index=smoothingWindow, center=True).sum()
seg_weightedvar=seg_weightedvarR[int(smoothingWindow/2):-int(smoothingWindow/2):smoothingWindow]
#print(seg_weightedvar)
ds[var] = seg_weightedvar
#seg_weightedvars.append(seg_weightedvar)
print('Coarsened var')
#Merge the coarsened arrays
#seg_weightedvarsM=xr.merge(seg_weightedvars)
ds=ds.reset_index('index', drop=True)
#print('Rechunking...')
#ds=ds.chunk(2000)
#print('Rechunked')
print(ds)
return ds
else:
return IS2dataAll
def getNesosimDates(dF, snowPathT):
""" Get dates from NESOSIM files"""
# This will come from the distinct rows of the IS-1 data eventually,
# but for now the data only span a day or two, so not much change in snow depth..
dayS=dF['day'].iloc[0]
monthS=dF['month'].iloc[0]
monthF=dF['month'].iloc[-1]
yearS=dF['year'].iloc[0]
dateStr= getDate(dF['year'].iloc[0], dF['month'].iloc[0], dF['day'].iloc[0])
print ('Date:', yearS, monthS, dayS)
#print (dateStr)
#print (dF['year'].iloc[-1], dF['month'].iloc[-1], dF['day'].iloc[-1])
# Find the right NESOSIM data file based on the freeboard dates
fileNESOSIM = glob(snowPathT+'*'+str(yearS)+'-*'+'.nc')[0]
#if (monthS>8):
#fileNESOSIM = glob(snowPathT+'*'+str(yearS)+'-*'+'.nc')[0]
#else:
# fileNESOSIM = glob(snowPathT+'*'+str(yearS-1)+'-*'+'.nc')[0]
if (monthS>5 & monthF==5):
print ('WARNING! LACK OF SNOW DATA')
return fileNESOSIM, dateStr
def getDate(year, month, day):
""" Get date string from year month and day"""
return str(year)+'%02d' %month+'%02d' %day
def gridNESOSIMtoFreeboard(dF, mapProj, fileSnow, dateStr, outSnowVar='snowDepthN', outDensityVar='snowDensityN', returnMap=0):
"""
Load relevant NESOSIM snow data file and assign to freeboard values
Args:
dF (data frame): Pandas dataframe
mapProj (basemap instance): Basemap map projection
fileSnow (string): NESOSIM file path
dateStr (string): date string
outSnowVar (string): Name of snow depth column
outDensityVar (string): Name of snow density column
Returns:
dF (data frame): dataframe updated to include colocated NESOSIM (and dsitributed) snow data
"""
dN = xr.open_dataset(fileSnow)
# Get NESOSIM snow depth and density data for that date
# Should move this into the loop if there is a significant date cahgne in the freeboard data.
# Not done this to improve processing speed.
dNday = dN.sel(day=int(dateStr))
lonsN = array(dNday.longitude)
latsN = array(dNday.latitude)
xptsN, yptsN = mapProj(lonsN, latsN)
# Get dates at start and end of freeboard file
dateStrStart= getDate(dF['year'].iloc[0], dF['month'].iloc[0], dF['day'].iloc[0])
dateStrEnd= getDate(dF['year'].iloc[-1], dF['month'].iloc[-1], dF['day'].iloc[-1])
print('Check dates (should be within a day):', dateStr, dateStrStart, dateStrEnd)
snowDepthNDay = array(dNday.snowDepth)
snowDensityNDay = array(dNday.density)
iceConcNDay = array(dNday.iceConc)
# Remove data where snow depths less than 0 (masked).
# Might need to chek if I need to apply any other masks here.
mask=where((snowDepthNDay>0.01)&(snowDepthNDay<1)&(iceConcNDay>0.01)&np.isfinite(snowDensityNDay))
snowDepthNDay = snowDepthNDay[mask]
snowDensityNDay = snowDensityNDay[mask]
xptsNDay = xptsN[mask]
yptsNDay = yptsN[mask]
# Load into array, sppeds up later computation and may aid parallelization
freeboardsT=dF['freeboard'].values
xptsT=dF['xpts'].values
yptsT=dF['ypts'].values
# I think it's better to declare array now so memory is allocated before the loop?
snowDepthGISs=ma.masked_all(size(freeboardsT))
snowDensityGISs=ma.masked_all(size(freeboardsT))
#snowDepthDists=ma.masked_all(size(freeboardsT))
#for x in prange(size(freeboardsT)):
for x in range(size(freeboardsT)):
# Could embed the NESOSIM dates here
# Use nearest neighbor to find snow depth at IS2 point
#snowDepthGISs[x] = griddata((xptsDay, yptsDay), snowDepthDay, (dF['xpts'].iloc[x], dF['ypts'].iloc[x]), method='nearest')
#snowDensityGISs[x] = griddata((xptsDay, yptsDay), densityDay, (dF['xpts'].iloc[x], dF['ypts'].iloc[x]), method='nearest')
# Think this is the much faster way to find nearest neighbor!
dist = sqrt((xptsNDay-xptsT[x])**2+(yptsNDay-yptsT[x])**2)
index_min = np.argmin(dist)
snowDepthGISs[x]=snowDepthNDay[index_min]
snowDensityGISs[x]=snowDensityNDay[index_min]
#print(snowDepthNDay[index_min], densityNDay[index_min])
dF[outSnowVar] = pd.Series(snowDepthGISs, index=dF.index)
dF[outDensityVar] = pd.Series(snowDensityGISs, index=dF.index)
# SNOW REDISTRIBUTION
#for x in range(size(freeboardsT)):
# Find the mean freebaord in this vicinitiy
# ICESat-1 has a shot every 172 m, so around 600 shots = 100 km
# meanFreeboard = ma.mean(freeboardsT[x-300:x+300])
# snowDepthDists[x] = snowDistribution(snowDepthGISs[x], freeboardsT[x], meanFreeboard)
#dF[outSnowVar+'dist'] = pd.Series(snowDepthDists, index=dF.index)
#print ('Snow depth (m): ', snowDepthGIS)
#print ('Snow density (kg/m3): ', snowDensityGIS)
#print ('Snow depth (m): ', snowDepthDists)
if (returnMap==1):
return dF, xptsN, yptsN, dNday,
else:
return dF
def bindataSegment(x, y, z, seg, xG, yG, binsize=0.01, retbin=True, retloc=True):
"""
Place unevenly spaced 2D data on a grid by 2D binning (nearest
neighbor interpolation) and weight using the IS2 segment lengths.
Parameters
----------
x : ndarray (1D)
The idependent data x-axis of the grid.
y : ndarray (1D)
The idependent data y-axis of the grid.
z : ndarray (1D)
The dependent data in the form z = f(x,y).
seg : ndarray (1D)
The segment length of the data points in the form z = seg(x,y).
binsize : scalar, optional
The full width and height of each bin on the grid. If each
bin is a cube, then this is the x and y dimension. This is
the step in both directions, x and y. Defaults to 0.01.
retbin : boolean, optional
Function returns `bins` variable (see below for description)
if set to True. Defaults to True.
retloc : boolean, optional
Function returns `wherebins` variable (see below for description)
if set to True. Defaults to True.
Returns
-------
grid : ndarray (2D)
The evenly gridded data. The value of each cell is the median
value of the contents of the bin.
bins : ndarray (2D)
A grid the same shape as `grid`, except the value of each cell
is the number of points in that bin. Returns only if
`retbin` is set to True.
wherebin : list (2D)
A 2D list the same shape as `grid` and `bins` where each cell
contains the indicies of `z` which contain the values stored
in the particular bin.
Revisions
---------
2010-07-11 ccampo Initial version
"""
# get extrema values.
xmin, xmax = xG.min(), xG.max()
ymin, ymax = yG.min(), yG.max()
# make coordinate arrays.
xi = xG[0]
yi = yG[:, 0] #np.arange(ymin, ymax+binsize, binsize)
xi, yi = np.meshgrid(xi,yi)
# make the grid.
grid = np.zeros(xi.shape, dtype=x.dtype)
nrow, ncol = grid.shape
if retbin: bins = np.copy(grid)
# create list in same shape as grid to store indices
if retloc:
wherebin = np.copy(grid)
wherebin = wherebin.tolist()
# fill in the grid.
for row in prange(nrow):
for col in prange(ncol):
xc = xi[row, col] # x coordinate.
yc = yi[row, col] # y coordinate.
# find the position that xc and yc correspond to.
posx = np.abs(x - xc)
posy = np.abs(y - yc)
ibin = np.logical_and(posx < binsize/2., posy < binsize/2.)
ind = np.where(ibin == True)[0]
# fill the bin.
bin = z[ibin]
segbin = seg[ibin]
if retloc: wherebin[row][col] = ind
if retbin: bins[row, col] = bin.size
if bin.size != 0:
binvalseg = np.sum(bin*segbin)/np.sum(segbin)
grid[row, col] = binvalseg
else:
grid[row, col] = np.nan # fill empty bins with nans.
# return the grid
if retbin:
if retloc:
return grid, bins, wherebin
else:
return grid, bins
else:
if retloc:
return grid, wherebin
else:
return grid
def convert_GPS_time(GPS_Time, OFFSET=0.0):
"""
convert_GPS_time.py (10/2017)
Return the calendar date and time for given GPS time.
Written by Tyler Sutterley
Based on Tiffany Summerscales's PHP conversion algorithm
https://www.andrews.edu/~tzs/timeconv/timealgorithm.html
INPUTS:
GPS_Time: GPS time (standard = seconds since January 6, 1980 at 00:00)
OUTPUTS:
month: Number of the desired month (1 = January, ..., 12 = December).
day: Number of day of the month.
year: Number of the desired year.
hour: hour of the day
minute: minute of the hour
second: second (and fractions of a second) of the minute.
OPTIONS:
OFFSET: number of seconds to offset each GPS time
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (http://www.numpy.org)
PROGRAM DEPENDENCIES:
convert_julian.py: convert Julian dates into calendar dates
UPDATE HISTORY:
Updated 10/2017: added leap second from midnight 2016-12-31
Written 04/2016
"""
#-- PURPOSE: convert from GPS time to calendar dates
#-- convert from standard GPS time to UNIX time accounting for leap seconds
#-- and adding the specified offset to GPS_Time
UNIX_Time = convert_GPS_to_UNIX(np.array(GPS_Time) + OFFSET)
#-- calculate Julian date from UNIX time and convert into calendar dates
#-- UNIX time: seconds from 1970-01-01 00:00:00 UTC
julian_date = (UNIX_Time/86400.0) + 2440587.500000
cal_date = convert_julian(julian_date)
#-- include UNIX times in output
cal_date['UNIX'] = UNIX_Time
#-- return the calendar dates and UNIX time
return cal_date
def convert_julian(JD, ASTYPE=None, FORMAT='dict'):
#-- convert to array if only a single value was imported
# Written and provided by Tyler Sutterley
if (np.ndim(JD) == 0):
JD = np.array([JD])
SINGLE_VALUE = True
else:
SINGLE_VALUE = False
JDO = np.floor(JD + 0.5)
C = np.zeros_like(JD)
#-- calculate C for dates before and after the switch to Gregorian
IGREG = 2299161.0
ind1, = np.nonzero(JDO < IGREG)
C[ind1] = JDO[ind1] + 1524.0
ind2, = np.nonzero(JDO >= IGREG)
B = np.floor((JDO[ind2] - 1867216.25)/36524.25)
C[ind2] = JDO[ind2] + B - np.floor(B/4.0) + 1525.0
#-- calculate coefficients for date conversion
D = np.floor((C - 122.1)/365.25)
E = np.floor((365.0 * D) + np.floor(D/4.0))
F = np.floor((C - E)/30.6001)
#-- calculate day, month, year and hour
DAY = np.floor(C - E + 0.5) - np.floor(30.6001*F)
MONTH = F - 1.0 - 12.0*np.floor(F/14.0)
YEAR = D - 4715.0 - np.floor((7.0+MONTH)/10.0)
HOUR = np.floor(24.0*(JD + 0.5 - JDO))
#-- calculate minute and second
G = (JD + 0.5 - JDO) - HOUR/24.0
MINUTE = np.floor(G*1440.0)
SECOND = (G - MINUTE/1440.0) * 86400.0
#-- convert all variables to output type (from float)
if ASTYPE is not None:
YEAR = YEAR.astype(ASTYPE)
MONTH = MONTH.astype(ASTYPE)
DAY = DAY.astype(ASTYPE)
HOUR = HOUR.astype(ASTYPE)
MINUTE = MINUTE.astype(ASTYPE)
SECOND = SECOND.astype(ASTYPE)
#-- if only a single value was imported initially: remove singleton dims
if SINGLE_VALUE:
YEAR = YEAR.item(0)
MONTH = MONTH.item(0)
DAY = DAY.item(0)
HOUR = HOUR.item(0)
MINUTE = MINUTE.item(0)
SECOND = SECOND.item(0)
#-- return date variables in output format (default python dictionary)
if (FORMAT == 'dict'):
return dict(year=YEAR, month=MONTH, day=DAY,
hour=HOUR, minute=MINUTE, second=SECOND)
elif (FORMAT == 'tuple'):
return (YEAR, MONTH, DAY, HOUR, MINUTE, SECOND)
elif (FORMAT == 'zip'):
return zip(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND)
def get_leaps():
#-- PURPOSE: Define GPS leap seconds
# Written and provided by Tyler Sutterley
leaps = [46828800, 78364801, 109900802, 173059203, 252028804, 315187205,
346723206, 393984007, 425520008, 457056009, 504489610, 551750411,
599184012, 820108813, 914803214, 1025136015, 1119744016, 1167264017]
return leaps
def is_leap(GPS_Time):
#-- PURPOSE: Test to see if any GPS seconds are leap seconds
# Written and provided by Tyler Sutterley
leaps = get_leaps()
Flag = np.zeros_like(GPS_Time, dtype=np.bool)
for leap in leaps:
count = np.count_nonzero(np.floor(GPS_Time) == leap)
if (count > 0):
indices, = np.nonzero(np.floor(GPS_Time) == leap)
Flag[indices] = True
return Flag
def count_leaps(GPS_Time):
#-- PURPOSE: Count number of leap seconds that have passed for each GPS time
# Written and provided by Tyler Sutterley
leaps = get_leaps()
#-- number of leap seconds prior to GPS_Time
n_leaps = np.zeros_like(GPS_Time, dtype=np.uint)
for i,leap in enumerate(leaps):
count = np.count_nonzero(GPS_Time >= leap)
if (count > 0):
indices, = np.nonzero(GPS_Time >= leap)
# print(indices)
# pdb.set_trace()
n_leaps[indices] += 1
return n_leaps
def convert_UNIX_to_GPS(UNIX_Time):
#-- PURPOSE: Convert UNIX Time to GPS Time
#-- calculate offsets for UNIX times that occur during leap seconds
offset = np.zeros_like(UNIX_Time)
count = np.count_nonzero((UNIX_Time % 1) != 0)
if (count > 0):
indices, = np.nonzero((UNIX_Time % 1) != 0)
UNIX_Time[indices] -= 0.5
offset[indices] = 1.0
#-- convert UNIX_Time to GPS without taking into account leap seconds
#-- (UNIX epoch: Jan 1, 1970 00:00:00, GPS epoch: Jan 6, 1980 00:00:00)
GPS_Time = UNIX_Time - 315964800
leaps = get_leaps()
#-- calculate number of leap seconds prior to GPS_Time
n_leaps = np.zeros_like(GPS_Time, dtype=np.uint)
for i,leap in enumerate(leaps):
count = np.count_nonzero(GPS_Time >= (leap - i))
if (count > 0):
indices, = np.nonzero(GPS_Time >= (leap - i))
n_leaps[indices] += 1
#-- take into account leap seconds and offsets
GPS_Time += n_leaps + offset
return GPS_Time
def convert_GPS_to_UNIX(GPS_Time):
#-- PURPOSE: Convert GPS Time to UNIX Time
#-- convert GPS_Time to UNIX without taking into account leap seconds
#-- (UNIX epoch: Jan 1, 1970 00:00:00, GPS epoch: Jan 6, 1980 00:00:00)
UNIX_Time = GPS_Time + 315964800
#-- number of leap seconds prior to GPS_Time
n_leaps = count_leaps(GPS_Time)
UNIX_Time -= n_leaps
#-- check if GPS Time is leap second
Flag = is_leap(GPS_Time)
if Flag.any():
#-- for leap seconds: add a half second offset
indices, = np.nonzero(Flag)
UNIX_Time[indices] += 0.5
return UNIX_Time
|
[
"ayumif@umich.edu"
] |
ayumif@umich.edu
|
3e034a11bde11aa6a40bca38c774c9dba4dc8ef4
|
9b422078f4ae22fe16610f2ebc54b8c7d905ccad
|
/xlsxwriter/test/comparison/test_chart_format07.py
|
45e9369b2bac7462c137134173b1cda4559f1696
|
[
"BSD-2-Clause-Views"
] |
permissive
|
projectsmahendra/XlsxWriter
|
73d8c73ea648a911deea63cb46b9069fb4116b60
|
9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45
|
refs/heads/master
| 2023-07-21T19:40:41.103336
| 2023-07-08T16:54:37
| 2023-07-08T16:54:37
| 353,636,960
| 0
| 0
|
NOASSERTION
| 2021-04-01T08:57:21
| 2021-04-01T08:57:20
| null |
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_format07.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [46163840, 46175360]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'marker': {
'type': 'square',
'size': 5,
'line': {'color': 'yellow'},
'fill': {'color': 'red'},
},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
bd0aee949be51e9122bd5c53c9a3f1bed2200067
|
1865a8508bed279961abaef324b434c0e3caa815
|
/setup.py
|
261fb583f89174f98ea47d3f5b9b3cadf5e81b6b
|
[
"MIT"
] |
permissive
|
zidarsk8/simple_wbd
|
de68cbefe94fda52ed5330ff55b97b4a73aedfb4
|
6c2d1611ffd70d3bf4468862b0b569131ef12d94
|
refs/heads/master
| 2021-01-19T10:54:38.824763
| 2016-08-16T03:58:42
| 2016-08-16T03:58:42
| 59,942,658
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,866
|
py
|
#!/usr/bin/env python3
"""Simplo wbd setup file.
This is the main setup for simple wbd. To manually install this module run:
$ pip install .
For development to keep track of the changes in the module and to include
development and test dependecies run:
$ pip install --editable .[dev,test]
"""
from setuptools import setup
def get_description():
with open("README.rst") as f:
return f.read()
if __name__ == "__main__":
setup(
name="simple_wbd",
version="0.5.1",
license="MIT",
author="Miha Zidar",
author_email="zidarsk8@gmail.com",
description=("A simple python interface for World Bank Data Indicator "
"and Climate APIs"),
long_description=get_description(),
url="https://github.com/zidarsk8/simple_wbd",
download_url="https://github.com/zidarsk8/simple_wbd/tarball/0.5.1",
packages=["simple_wbd"],
provides=["simple_wbd"],
install_requires=[
"pycountry"
],
extras_require={
"dev": [
"pylint"
],
"test": [
"codecov",
"coverage",
"mock",
"nose",
"vcrpy",
],
},
test_suite="tests",
keywords = [
"World Bank Data",
"indicator api",
"climate api",
],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering",
],
)
|
[
"zidarsk8@gmail.com"
] |
zidarsk8@gmail.com
|
6408e85162d2fbe559aaf0b909b1f053f0c6e12d
|
e9355a2363a831ea9e06cd843b563ffbaf852b26
|
/lists/models.py
|
c2973956da997cf57e7c62b16f4c8f8748b241c6
|
[] |
no_license
|
odisei369/superlists
|
06abb6947b49becb1b2f674fbe2469ee40680322
|
a1013bbf33cedf8016b214c615d326a93eba102a
|
refs/heads/master
| 2021-08-14T12:11:22.704245
| 2017-11-12T11:34:33
| 2017-11-12T11:34:33
| 108,184,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
from django.db import models
class List(models.Model):
pass
class Item(models.Model):
text = models.TextField(default='')
list = models.ForeignKey(List, default=None)
# Create your models here.
|
[
"petrenko.ilia@gmail.com"
] |
petrenko.ilia@gmail.com
|
23a0169704ce8fb2f2e17a376be93d9ea3025544
|
de0341aef4a487d1be271fc2bc3b3b60258ef6b0
|
/programmers/Level 3/표 편집/solve.py
|
c993128c2119260cac35cc583c67f3b1dffbce15
|
[] |
no_license
|
aver1001/github-practice
|
485d8695cd4b9aa374c6b069832b3c0999fc4b6c
|
62ab6de80e8246b627b880a7aff5d668b0fea889
|
refs/heads/main
| 2023-08-24T09:49:35.498578
| 2021-10-13T23:57:18
| 2021-10-13T23:57:18
| 379,813,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,725
|
py
|
def solution(n, k, cmd):
## 행의 갯수 n
## 처음에 선택된 행의 위치 k
## 명령어들이 담긴 문자열 배열
## U X => X칸 위로
## D X => X칸 아래로
## C => 선택된 행을 삭제 후 바로 아래 행 선택 단 마지막 행일경우 삭제 후 자신의 위 인덱스를 선택
## Z => 최근에 삭제한 행을 복구, 단 선택된 행은 그대로
answer = ['O' for _ in range (n)]
pt = k-1
backup = []
for cmd in cmd:
## U 명령어
if cmd[0] == 'U':
move = int(cmd.split(' ')[-1])
while(move != 0 and pt != -1) :
if answer[pt] == 'O':
move -=1
pt -= 1
else:
pt -=1
## 범위를 벗어났을경우 0으로 바꿔줌
if pt < 0:
pt = 0
## D 명령어
elif cmd[0] == 'D':
move = int(cmd.split(' ')[-1])
while(move != 0 and pt !=n) :
if answer[pt] == 'O':
move -=1
pt += 1
else:
pt +=1
## 범위를 벗어났을경우 n으로 바꿔줌
if pt > n-1:
pt = n-1
## C 명렁어
elif cmd[0] == 'C':
backup.append(pt)
answer[pt] = 'X'
while(answer[pt] != 'O'):
pt += 1
elif cmd[0] == 'Z':
answer[backup.pop()] = 'O'
print(cmd,answer,sep = '\n',end = '\n\n')
return ''.join(answer)
print(solution(8,2,["D 2","C","U 3","C","D 4","C","U 2","Z","Z","U 1","C"]))
|
[
"69618305+aver1001@users.noreply.github.com"
] |
69618305+aver1001@users.noreply.github.com
|
e6595a2de890bde16ecaf021779fa9b65bd318be
|
063bf24d3fc3f4a582ba31bb99a0b31eee336ad7
|
/SphinxTrain/python/cmusphinx/s3model.py
|
9bbc0b3c30456a49c2b127079c724336d8e70ac0
|
[
"BSD-2-Clause"
] |
permissive
|
khcqcn/cmusphinx
|
6f2bd35593ff40208340543bb43f56e60bf0a71a
|
45179a75546b396a218435a40eaf07be68707f59
|
refs/heads/master
| 2020-03-19T13:41:39.147382
| 2018-06-08T11:36:22
| 2018-06-08T11:36:22
| 136,590,836
| 0
| 0
| null | 2018-06-08T08:33:52
| 2018-06-08T08:33:51
| null |
UTF-8
|
Python
| false
| false
| 3,737
|
py
|
# Copyright (c) 2006 Carnegie Mellon University
#
# You may copy and modify this freely under the same terms as
# Sphinx-III
"""Sphinx-III acoustic models.
This module provides a class which wraps a set of acoustic models, as
used by SphinxTrain, Sphinx-III, and PocketSphinx. It provides
functions for computing Gaussian mixture densities for acoustic
feature vectors.
"""
__author__ = "David Huggins-Daines <dhuggins@cs.cmu.edu>"
__version__ = "$Revision$"
import s3gau
import s3mixw
import s3tmat
import s3mdef
import s3file
import sys
import os
import numpy
WORSTSCORE = -100000
class S3Model(object):
def __init__(self, path=None, topn=4):
self.topn = topn
self.mwfloor = 1e-5
self.varfloor = 1e-5
if path != None:
self.read(path)
def read(self, path):
self.mdef = s3mdef.open(os.path.join(path, "mdef"))
self.mean = s3gau.open(os.path.join(path, "means"))
self.var = s3gau.open(os.path.join(path, "variances"))
self.mixw = s3mixw.open(os.path.join(path, "mixture_weights"))
self.tmat = s3tmat.open(os.path.join(path, "transition_matrices"))
# Normalize transition matrices and mixture weights
for t in range(0, len(self.tmat)):
self.tmat[t] = (self.tmat[t].T / self.tmat[t].sum(1)).T
for t in range(0, len(self.mixw)):
self.mixw[t] = (self.mixw[t].T / self.mixw[t].sum(1)).T.clip(self.mwfloor, 1.0)
# Floor variances and precompute normalizing and inverse variance terms
self.norm = numpy.empty((len(self.var),
len(self.var[0]),
len(self.var[0][0])),'d')
for m,mgau in enumerate(self.var):
for f,feat in enumerate(mgau):
fvar = feat.clip(self.varfloor, numpy.inf)
# log of 1/sqrt(2*pi**N * det(var))
det = numpy.log(fvar).sum(1)
lrd = -0.5 * (det + 2 * numpy.pi * feat.shape[1])
self.norm[m,f] = lrd
# "Invert" variances
feat[:] = (1 / (fvar * 2))
# Construct senone to codebook mapping
if os.access(os.path.join(path, "senmgau"), os.F_OK):
self.senmgau = s3file.S3File(os.path.join(path, "senmgau")).read1d()
elif len(self.mean) == 1:
self.senmgau = numpy.ones(len(self.mixw))
else:
self.senmgau = numpy.arange(0, len(self.mixw))
self.senscr = numpy.ones(len(self.mixw)) * WORSTSCORE
def cb_compute(self, mgau, feat, obs):
"Compute codebook #mgau feature #feat for obs"
mean = self.mean[mgau][feat]
ivar = self.var[mgau][feat]
norm = self.norm[mgau][feat]
diff = obs - mean
dist = (diff * ivar * diff).sum(1)
return norm - dist
def senone_compute(self, senones, *features):
"""Compute senone scores for given list of senones and a
frame of acoustic features"""
cbs = {}
self.senscr[:] = WORSTSCORE
for s in senones:
m = self.senmgau[s]
if not m in cbs:
cbs[m] = [self.cb_compute(m, f, features[f])
for f in range(0,len(self.mean[m]))]
score = 0
for f, vec in enumerate(features):
# Compute densities and scale by mixture weights
d = cbs[m][f] + numpy.log(self.mixw[s,f])
# Take top-N densities
d = d.take(d.argsort()[-self.topn:])
# Multiply into output score
score += numpy.log(numpy.exp(d).sum())
self.senscr[s] = score
return numpy.exp(self.senscr - self.senscr.max())
|
[
"dhdfu@c60273ff-4a12-0410-8d58-82ceea6d4170"
] |
dhdfu@c60273ff-4a12-0410-8d58-82ceea6d4170
|
caea01eb1c844499ed64d53c25b5a3fde5d6597b
|
38368dfaef53c2e3213e2b6d2ea55ab38fbe3eab
|
/setup.py
|
406e9fcbfe28fbf16b5bec6d2092ebb0ac7b42c5
|
[
"MIT"
] |
permissive
|
DonCharlesLambert/tkCharacter
|
5ab9cd5d74123749bc61cbf0747d43ae855b4bcf
|
eb877e3e40a71f44321f09207a7c5bf955779799
|
refs/heads/master
| 2022-04-22T19:56:26.554151
| 2020-04-24T22:23:34
| 2020-04-24T22:23:34
| 257,748,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
from distutils.core import setup
setup(
name = 'tkCharacter',
packages = ['tkCharacter'],
version = '0.1',
license='MIT',
description = 'Allows for effortless creation of playable and AI 2d characters in python games',
long_description= "",
author = 'Don Charles - Lambert',
author_email = 'your.email@domain.com',
url = 'https://github.com/DonCharlesLambert/tkCharacter',
download_url = 'https://github.com/DonCharlesLambert/tkCharacter/archive/0.1.tar.gz', # I explain this later on
keywords = ['tkinter', 'games', 'characters'],
install_requires=[ # I get to this in a second
'tkinter',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
|
[
"zcabdc0@ucl.ac.uk"
] |
zcabdc0@ucl.ac.uk
|
37eb3dcee7882c9b0d490c7ac8434257256685dd
|
d9ab421ab08c34179c9337400d34031af409c03e
|
/firstweek/ideabank.py
|
83ec6c8e86f408386530a0bd611942a95950a29e
|
[] |
no_license
|
grosuclaudia/dojos
|
b79caada2a89ab015d79deb21cda780ef892c55d
|
27958e14fe0e67e90dc811441a36fa7f4425a6ae
|
refs/heads/master
| 2020-08-31T16:20:24.737074
| 2019-10-31T09:53:20
| 2019-10-31T09:53:20
| 218,731,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
#idea bank
idea = input("What is your new idea: ")
nr = 0
file=open("ideabank.txt","a")
file.write(idea + "\n")
file.close()
file=open("ideabank.txt","r")
print("Your ideabank:")
for line in file:
nr = nr + 1
print(str(nr) + ". " + line)
file.close()
|
[
"chvaly@gmail.com"
] |
chvaly@gmail.com
|
cd45e3c8488bd30b895a3c056efaec59d2095aca
|
3d786ca81bae70c59c62572764b7c774e00160ca
|
/01. jump to python/chapter_03/122_4.py
|
7b7c6810e008bbded43d4993c28bc44d3b4be4e8
|
[] |
no_license
|
youngholee0226/bigdata2019
|
7b504853a4a353d6a844155d438a72fef4f9d7f6
|
cb17b6b67cdcc8b723ada62cc574f50d85368d62
|
refs/heads/master
| 2020-04-21T10:56:38.180438
| 2019-02-07T02:32:23
| 2019-02-07T02:32:23
| 169,503,627
| 0
| 0
| null | null | null | null |
UHC
|
Python
| false
| false
| 443
|
py
|
# coding: cp949
feel = "호감"
# feel = ""
hit_on_count = 0
while feel and hit_on_count <10 :
hit_on_count = hit_on_count + 1
print ("%d번 데이트 신청합니다, " %hit_on_count)
if(hit_on_count == 10):
print("고백할 때가 다가 왔네요, ")
continue
feel= input("현재 그녀에 대한 당신의 감정은 어떤가요?")
if(feel == "비호감"):
print("그럼 단념하세요")
break
|
[
"01076777066@daum.net"
] |
01076777066@daum.net
|
67112ebad7aac4774ca09bdc5da34187288d3838
|
38c0d8bce5aff8e2ee1911884d1f93b38a78635c
|
/backend/cryptbox/jwtutils/jwterrors.py
|
75f2e1b679f2c27d97d1eca0ab42c2e53ad7948f
|
[] |
no_license
|
Riolku/cryptbox
|
a1998932169d82efe7e48c726776b965efd0dc48
|
ecd91a69082791cff8d935c271f6331250dd8421
|
refs/heads/master
| 2023-07-31T21:51:04.905500
| 2021-09-17T15:33:20
| 2021-09-17T15:33:20
| 330,080,827
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 78
|
py
|
class ExpiredJWT(RuntimeError):
pass
class InvalidJWT(RuntimeError):
pass
|
[
"alex.yj.liao@gmail.com"
] |
alex.yj.liao@gmail.com
|
a7a66869b09e6c69a5e03ada7e513a55feeb1d98
|
68d7cf6eb326d5e1f19d19033e9c852c9ebcfc5f
|
/filter.py
|
14323e843e34d823e306fc738dfec0e9ad862ea4
|
[] |
no_license
|
Sergiogd112/internalphisycs
|
bdf484596dec73a28802bc36bcf1c77f41c0ee3b
|
a8ec188a10c51db33b41a471c6201af4188d7dee
|
refs/heads/master
| 2020-08-13T01:30:10.417530
| 2019-10-18T14:24:36
| 2019-10-18T14:24:36
| 214,879,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
import astropy
import astroplan
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
import os
import time
import pickle as pk
from time import gmtime
from astropy.coordinates import solar_system_ephemeris
from astropy.time import Time as tm
from astropy.coordinates import EarthLocation as eloc
from astroplan import Observer
from mylib import *
from multiprocessing import Pool
ln = 30
t = tm(time.time(), format='unix')
location = eloc.from_geodetic(lon=41.385728, lat=2.055923)
basedir = 'Analema/'
deldir = 'delAnalema/'
obs = Observer(longitude=41.385728, latitude=2.055923)
rise = obs.moon_rise_time(time=t)
println(ln)
print(rise, rise.to_datetime())
println(ln)
photopath = [basedir + x for x in os.listdir(basedir)]
println(ln)
print(photopath[:10])
println(ln)
println(ln)
bigarr = [[x, ln, deldir, basedir, obs] for x in photopath]
pool = Pool(3)
pool.map(main, bigarr)
|
[
"sergio.gomezdamas@gmail.com"
] |
sergio.gomezdamas@gmail.com
|
d22df926e0e5abdc854f92d3d1f38844c15bf8eb
|
26bd8294c64974d809198ce62238747340a775dc
|
/src/graffiau/overview.py
|
9ad596805da800156e532db3fe8bddd13f0e8095
|
[] |
no_license
|
skuzzymiglet/ffair-gwyddoniaeth-2019
|
af4a3024a01b76931f49f1dff8282b860fbd2228
|
31ef108de4aac978ce53cb8c823847f3730736d2
|
refs/heads/master
| 2021-08-19T13:08:00.063451
| 2020-01-31T14:18:02
| 2020-01-31T14:18:02
| 186,683,440
| 0
| 1
| null | 2020-05-25T12:30:13
| 2019-05-14T19:04:43
|
Python
|
UTF-8
|
Python
| false
| false
| 880
|
py
|
#! /usr/bin/env python3
# Ffair Gwyddoniaeth 2019
# Arbrawf Rafik Harrington
# Mesur CO2 mewn sgarff
# Mwy ar https://github.com/skuzzymiglet/ffair-gwyddoniaeth-2019
# Rhaglen fach i gael cipolwg ar eich data
from bokeh.plotting import figure, output_file, show
import csv
data = open("DATA.CSV", "r") # Data o'r synhwyrydd
reader = csv.reader(data, delimiter=',') # I ddarllen y data
# Cyfeirnodau pwyntiau ar y llinell
x = [] # Amser
y = [] # CO2
# Ychwanegu'r holl data i'r echelinau
for row in reader:
y.append(row[0])
x.append(row[1])
data.close() # Cau'r ffeil
output_file("overview.html") # Y ffeil i blotio iddo
p = figure(title="CO2 ppm", x_axis_label="Time", y_axis_label="CO2 ppm", y_range=[0, int(max(y))]) # Creu y graff
p.line(x, y, line_width=1, color="blue") # Plotio'r llinell
show(p) # Dangos y graff
|
[
"noreply@github.com"
] |
skuzzymiglet.noreply@github.com
|
bc35e25ddcb973b77a29166ac83c929676dbc149
|
c1be89ddd435016c4907be6d4238f49312bb22b4
|
/use_python_do_something/crawler_ex1.py
|
ee453d7de36b129633e1e9690bfe97616e4095fd
|
[] |
no_license
|
gdhe55555/learn_python
|
1fb6b0f3b3fc62c94290a2803cd037ee0860dd86
|
3027c8bbbfbdd266dcdd02091376d375273790b7
|
refs/heads/master
| 2021-01-10T11:18:06.204381
| 2016-03-09T15:58:08
| 2016-03-09T15:58:08
| 53,510,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
#!/usr/bin/env python
import urllib
from bs4 import BeautifulSoup
import re
websit='http://www.heibanke.com/lesson/crawler_ex00/'
html = urllib.urlopen('http://www.heibanke.com/lesson/crawler_ex00/')
bs_obj = BeautifulSoup(html, "html.parser")
#a_list = bs_obj.findAll("a")
#a_list = bs_obj.findAll("a", href=re.compile("baike\.baidu\.com\w?"))
#for aa in a_list:
# if not aa.find("img"):
# if aa.attrs.get('href'):
# print aa.text ,aa.attrs['href']
a_list = bs_obj.findAll("h3")
#print a_list
text = a_list[0].text.encode('utf-8')
print(text)
ma = re.match(u"([\u4e00-\u9fa5]+)", text)
print(ma.groups())
|
[
"gdhe55555@qq.com"
] |
gdhe55555@qq.com
|
35eada1e6e31e47d1156a2dd8c85c2aada530ebe
|
4fbd844113ec9d8c526d5f186274b40ad5502aa3
|
/algorithms/python3/pacific_atlantic_water_flow.py
|
6a5e0384ee2afe8a2dd84a801719431deeaa3b09
|
[] |
no_license
|
capric8416/leetcode
|
51f9bdc3fa26b010e8a1e8203a7e1bcd70ace9e1
|
503b2e303b10a455be9596c31975ee7973819a3c
|
refs/heads/master
| 2022-07-16T21:41:07.492706
| 2020-04-22T06:18:16
| 2020-04-22T06:18:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,244
|
py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Given an m x n matrix of non-negative integers representing the height of each unit cell in a continent, the "Pacific ocean" touches the left and top edges of the matrix and the "Atlantic ocean" touches the right and bottom edges.
Water can only flow in four directions (up, down, left, or right) from a cell to another one with height equal or lower.
Find the list of grid coordinates where water can flow to both the Pacific and Atlantic ocean.
Note:
The order of returned grid coordinates does not matter.
Both m and n are less than 150.
Example:
Given the following 5x5 matrix:
Pacific ~ ~ ~ ~ ~
~ 1 2 2 3 (5) *
~ 3 2 3 (4) (4) *
~ 2 4 (5) 3 1 *
~ (6) (7) 1 4 5 *
~ (5) 1 1 2 4 *
* * * * * Atlantic
Return:
[[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]] (positions with parentheses in above matrix).
"""
""" ==================== body ==================== """
class Solution:
def pacificAtlantic(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
""" ==================== body ==================== """
|
[
"capric8416@gmail.com"
] |
capric8416@gmail.com
|
0d4686f787a951cd5fc7bc0bbdf519fe1c900f03
|
30ce589ccea8afec70ffc340834872316ef8ca5b
|
/Exercise folder/ex61.py
|
7e04f511fa3135670cfea235de675180689ff676
|
[] |
no_license
|
gideonseedboy/Python-Programming-with-Data-Science-and-AI
|
4c152f2cf14600320c6efe79368056e4205ed8a3
|
8563a20d66584413219ed7ae45d3cec5e9e017c0
|
refs/heads/master
| 2022-12-22T20:02:50.539504
| 2020-10-06T07:30:56
| 2020-10-06T07:30:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
#Loops
members = ["Irene", "Emmanuel", "Abraham", "Micheal", "Abraham"]
counter = 0
for i in members:
if i=="Abraham":
counter = counter + 1
print(counter)
|
[
"gideonseedboy@gmail.com"
] |
gideonseedboy@gmail.com
|
a7f27f1b272c8edef7cc2f92a290977e2e30a352
|
64913d7f6803f2cdaa5fbd38ae75437e2e0d936a
|
/migrations/versions/fcd07d45ca01_.py
|
06d5fc59e7cb0907f8856e7b002f61e75a5576d0
|
[] |
no_license
|
peekwez/trader_api
|
251887c52462b66f28cd9035325619d2aeaa94fe
|
fda7357fb9b00f44314420ea960cb34479a8ec7e
|
refs/heads/master
| 2023-05-26T04:20:40.994372
| 2019-02-25T10:18:12
| 2019-02-25T10:18:12
| 172,482,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
"""empty message
Revision ID: fcd07d45ca01
Revises: 2fd5a06e09b0
Create Date: 2019-01-11 05:39:20.505585
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = 'fcd07d45ca01'
down_revision = '2fd5a06e09b0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('updates_log', sa.Column('task_id', sa.String(length=250), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('updates_log', 'task_id')
# ### end Alembic commands ###
|
[
"kwesi@oddjob.utias.utoronto.ca"
] |
kwesi@oddjob.utias.utoronto.ca
|
e84fa3eaad48b2bc87fd8ac42d0bc6aeb80ed0de
|
b4ec04d6a2a4ba57d11b577326086c14d9b9408b
|
/freshontheboat/resources/getForumProfileLikes.py
|
49cf00e752c1e50d704424cdeb416f91d39d9988
|
[] |
no_license
|
petergzli/FreshOnTheBoat
|
91268d43f91c85da0bacafa268b42e2f1e3dfe6c
|
6320bcd798ad23d6ed936fddeb51a040a28853b2
|
refs/heads/master
| 2021-01-20T10:06:13.318571
| 2015-11-25T18:52:36
| 2015-11-25T18:52:36
| 41,778,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
from flask_restful import Resource, reqparse
from freshontheboat.models.users import User
from freshontheboat.models.forumpostlikes import ForumPostLikes
class GetNewForumLikes(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('forum_profile_id', type = int, required=True)
super(GetNewForumLikes, self).__init__()
def get(self):
args = self.reqparse.parse_args()
results = ForumPostLikes.query.filter_by(forum_profile_id = args['forum_profile_id']).all()
jsonDictionary = []
for result in results:
username = User.query.get(result.user_who_liked).username
dictionaryResult = {'id' : result.id, 'user_who_liked': username, 'forum_profile_id': result.forum_profile_id, 'likes': result.likes, 'dislikes': result.dislikes}
jsonDictionary.append(dictionaryResult)
response = {'status': 'successful', 'results' : jsonDictionary}
return response
|
[
"petergzli@gmail.com"
] |
petergzli@gmail.com
|
7b46b02f2a3f1012c5334e5b8ceecdffd9284e99
|
04bf59cfe698cc5fd9142ab0a503f1ce9cc24ead
|
/tensorflow-serving/docker/serving_client.py
|
664e7477200244098e7e822827746e9f27b7adff
|
[
"MIT"
] |
permissive
|
gingeleski/tensorflow-object-detection
|
9a3e19a55deccb94f4cf42c11fa545d2f4fc191a
|
03d1e418abf6a7bd09894169288b5ad1a4544c93
|
refs/heads/master
| 2020-03-18T00:53:00.448560
| 2018-06-03T00:50:13
| 2018-06-03T00:50:13
| 134,117,218
| 0
| 0
| null | 2018-05-20T03:39:48
| 2018-05-20T03:39:47
| null |
UTF-8
|
Python
| false
| false
| 3,083
|
py
|
#!/usr/bin/env python2.7
"""Send JPEG image to tensorflow_model_server
"""
from __future__ import print_function
from grpc.beta import implementations
import tensorflow as tf
import numpy as np
import os
from io import BytesIO
import requests
from PIL import Image
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import label_map_util
tf.app.flags.DEFINE_string('server', 'localhost:9000',
'PredictionService host:port')
tf.app.flags.DEFINE_string('image', '', 'url to image in JPEG format')
tf.app.flags.DEFINE_string('label_map_path', './pascal_label_map.pbtxt', 'path to label map path')
tf.app.flags.DEFINE_string('save_path', './', 'save path for output image')
tf.app.flags.DEFINE_string('model_name', 'serving', 'model name')
tf.app.flags.DEFINE_string('signature_name', 'serving_default', 'signature name')
tf.app.flags.DEFINE_string('num_classes', '1', 'num classes')
FLAGS = tf.app.flags.FLAGS
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def main(_):
host, port = FLAGS.server.split(':')
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
# Send request
response = requests.get(FLAGS.image, stream=True)
if response.status_code == 200:
request = predict_pb2.PredictRequest()
request.model_spec.name = FLAGS.model_name
request.model_spec.signature_name = FLAGS.signature_name
request.inputs['inputs'].CopyFrom(
tf.contrib.util.make_tensor_proto(response.content, shape=[1]))
result = stub.Predict(request, 10.0) # 10 secs timeout
image = Image.open(BytesIO(response.content))
image_np = load_image_into_numpy_array(image)
boxes = np.array(result.outputs['detection_boxes'].float_val).reshape(
result.outputs['detection_boxes'].tensor_shape.dim[0].size,
result.outputs['detection_boxes'].tensor_shape.dim[1].size,
result.outputs['detection_boxes'].tensor_shape.dim[2].size
)
classes = np.array(result.outputs['detection_classes'].float_val)
scores = np.array(result.outputs['detection_scores'].float_val)
label_map = label_map_util.load_labelmap(FLAGS.label_map_path)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=FLAGS.num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
vis_util.save_image_array_as_png(image_np, FLAGS.save_path+"/output-"+FLAGS.image.split('/')[-1])
if __name__ == '__main__':
tf.app.run()
|
[
"sozercan@gmail.com"
] |
sozercan@gmail.com
|
0ad8ae2c0e8ce0b9a519c70b38fdc6cc8e3ca6c9
|
2b9fe2dbe8681224b1ca385e74ea58e0fb929ac7
|
/blog/models.py
|
b957b7407f8a926dd3ba1497a65df7e4f0eb493f
|
[] |
no_license
|
lpkyrius/py-blog-django
|
1594c5bedeb7c788b9594aab08f5e312aaee5b3d
|
41ca5f22bbc45cefe38f51b043ce071ea09ef88f
|
refs/heads/master
| 2022-11-29T06:59:02.812659
| 2020-08-14T13:43:36
| 2020-08-14T13:43:36
| 287,529,955
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
'''
-------------------------------------------------------
O próprio Django faz as tratativas com o banco de dados
Assim posso mudar o banco sem precisar alterar o código
As tabelas são tratadas como Classes
E os campos como atributos destas classes
-------------------------------------------------------
'''
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.TextField() # texto ilimitado
'''
opções de data automática, mas não permite alteração
----------------------------------------------------
date_posted = models.DateTimeField(auto_now=True) # data de criação/update
date_posted = models.DateTimeField(auto_now_add=True) # data de criação
'''
date_posted = models.DateTimeField(default=timezone.now) # now sem travar alterações
# agora informo que o cmapo author será chave 1 para N para isso
# informo ForeignKey e passo 2 parâmetros: User - para pegar dessa tabela
# e on_delete para integridade (se deletar o usuário, deleta seus posts)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk})
|
[
"lpkyrius@gmail.com"
] |
lpkyrius@gmail.com
|
88ae046695faa97023e4952c85ae2915a6475290
|
b1480f77540258ec08c9c35866138bfec839d7d0
|
/src/drostedraw/__init__.py
|
926f411cf29c3ff970f86b4bedac15a10a608353
|
[
"MIT"
] |
permissive
|
asweigart/drostedraw
|
1fd69cac3659eaeebf8179f8015989b5d572c55b
|
d2a3620a4d2bda6fb76321883a3c9587abf6cec4
|
refs/heads/main
| 2023-08-04T18:23:21.586862
| 2021-09-14T17:35:00
| 2021-09-14T17:35:00
| 397,367,988
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,584
|
py
|
"""Droste Draw
By Al Sweigart al@inventwithpython.com
A Python module for making recursive drawings (aka Droste effect) with the built-in turtle module."""
__version__ = '0.2.1'
import turtle, math
MAX_FUNCTION_CALLS = 10000 # Stop recursion after this many function calls.
MAX_ITERATION = 400 # Stop recursion after this iteration.
MIN_SIZE = 1 # Stop recursion if size is less than this.
# NOTE: In general, don't use absolute coordinate functions (like turtle.goto(), turtle.xcor(), turtle.ycor(),
# turtle.setheading()) in your draw functions because they might not work when the heading angle is not 0.
def drawSquare(size, extraData=None):
"""Draw a square where `size` is the length of each side."""
# Move the turtle to the top-right corner before drawing:
turtle.penup()
turtle.forward(size // 2) # Move to the right edge.
turtle.left(90) # Turn to face upwards.
turtle.forward(size // 2) # Move to the top-right corner.
turtle.left(180) # Turn around to face downwards.
turtle.pendown()
# Draw the four sides of a square:
for i in range(4):
turtle.forward(size)
turtle.right(90)
def drawTriangle(size, extraData=None):
"""Draw an equilateral triangle where `size` is the length of
each side."""
# Move the turtle to the top of the equilateral triangle:
height = (size * math.sqrt(3)) / 2
turtle.penup()
turtle.left(90) # Turn to face upwards.
turtle.forward(height * (2/3)) # Move to the top corner.
turtle.right(150) # Turn to face the bottom-right corner.
turtle.pendown()
# Draw the three sides of the triangle:
for i in range(3):
turtle.forward(size)
turtle.right(120)
def drawFilledSquare(size, extraData=None):
"""Draw a solid, filled-in square where `size` is the length of each
side. The extraData dictionary can have a key 'colors' whose value
is a list of "color strings" that the turtle module recognizes, e.g.
'red', 'black', etc. The first color string in the list is used
for the first iteration, the second for the second, and so on. When
you run out of colors for later iterations, the first color is used
again."""
# Move the turtle to the top-right corner before drawing:
turtle.penup()
turtle.forward(size // 2) # Move to the right edge.
turtle.left(90) # Turn to face upwards.
turtle.forward(size // 2) # Move to the top-right corner.
turtle.left(180) # Turn around to face downwards.
turtle.pendown()
# The extra data is a tuple of (fillcolor, pencolor) values:
if extraData is not None:
iteration = extraData['_iteration'] - 1 # -1 because iteration starts at 1, not 0.
turtle.fillcolor(extraData['colors'][iteration % len(extraData['colors'])])
turtle.pencolor(extraData['colors'][iteration % len(extraData['colors'])])
# Draw the four sides of a square:
turtle.begin_fill()
for i in range(4):
turtle.forward(size)
turtle.right(90)
turtle.end_fill()
def drawFilledDiamond(size, extraData=None):
# Move to the right corner before drawing:
turtle.penup()
turtle.forward(math.sqrt(size ** 2 / 2))
turtle.right(135)
turtle.pendown()
# The extra data is a tuple of (fillcolor, pencolor) values:
if extraData is not None:
iteration = extraData['_iteration'] - 1 # -1 because iteration starts at 1, not 0.
turtle.fillcolor(extraData['colors'][iteration % len(extraData['colors'])])
turtle.pencolor(extraData['colors'][iteration % len(extraData['colors'])])
# Draw a square:
turtle.begin_fill()
for i in range(4):
turtle.forward(size)
turtle.right(90)
turtle.end_fill()
def drosteDraw(drawFunction, size, recursiveDrawings, extraData=None):
# NOTE: The current heading of the turtle is considered to be the
# rightward or positive-x direction.
# Provide default values for extraData:
if extraData is None:
extraData = {}
if '_iteration' not in extraData:
extraData['_iteration'] = 1 # The first iteration is 1, not 0.
if '_maxIteration' not in extraData:
extraData['_maxIteration'] = MAX_ITERATION
if '_maxFunctionCalls' not in extraData:
extraData['_maxFunctionCalls'] = MAX_FUNCTION_CALLS
if '_minSize' not in extraData:
extraData['_minSize'] = MIN_SIZE
requiredNumCalls = len(recursiveDrawings) ** extraData['_iteration']
if extraData['_iteration'] > extraData['_maxIteration'] or \
requiredNumCalls > extraData['_maxFunctionCalls'] or \
size < extraData['_minSize']:
return # BASE CASE
# Remember the original starting coordinates and heading.
origX = turtle.xcor()
origY = turtle.ycor()
origHeading = turtle.heading()
turtle.pendown()
drawFunction(size, extraData)
turtle.penup()
# RECURSIVE CASE
# Do each of the recursive drawings:
for i, recursiveDrawing in enumerate(recursiveDrawings):
# Provide default values for the recursiveDrawing dictionary:
if 'x' not in recursiveDrawing:
recursiveDrawing['x'] = 0
if 'y' not in recursiveDrawing:
recursiveDrawing['y'] = 0
if 'size' not in recursiveDrawing:
recursiveDrawing['size'] = 1.0
if 'angle' not in recursiveDrawing:
recursiveDrawing['angle'] = 0
# Move the turtle into position for the next recursive drawing:
turtle.goto(origX, origY)
turtle.setheading(origHeading + recursiveDrawing['angle'])
turtle.forward(size * recursiveDrawing['x'])
turtle.left(90)
turtle.forward(size * recursiveDrawing['y'])
turtle.right(90)
# Increment the iteration count for the next level of recursion:
extraData['_iteration'] += 1
drosteDraw(drawFunction, int(size * recursiveDrawing['size']), recursiveDrawings, extraData)
# Decrement the iteration count when done with that recursion:
extraData['_iteration'] -= 1
# Display any buffered drawing commands on the screen:
if extraData['_iteration'] == 1:
turtle.update()
_DEMO_NUM = 0
def demo(x=None, y=None):
global _DEMO_NUM
turtle.reset()
turtle.tracer(20000, 0) # Increase the first argument to speed up the drawing.
turtle.hideturtle()
if _DEMO_NUM == 0:
# Recursively draw smaller squares in the center:
drosteDraw(drawSquare, 350, [{'size': 0.8}])
elif _DEMO_NUM == 1:
# Recursively draw smaller squares going off to the right:
drosteDraw(drawSquare, 350, [{'size': 0.8, 'x': 0.20}])
elif _DEMO_NUM == 2:
# Recursively draw smaller squares that go up at an angle:
drosteDraw(drawSquare, 350, [{'size': 0.8, 'y': 0.20, 'angle': 15}])
elif _DEMO_NUM == 3:
# Recursively draw smaller triangle in the center:
drosteDraw(drawTriangle, 350, [{'size': 0.8}])
elif _DEMO_NUM == 4:
# Recursively draw smaller triangle going off to the right:
drosteDraw(drawTriangle, 350, [{'size': 0.8, 'x': 0.20}])
elif _DEMO_NUM == 5:
# Recursively draw smaller triangle that go up at an angle:
drosteDraw(drawTriangle, 350, [{'size': 0.8, 'y': 0.20, 'angle': 15}])
elif _DEMO_NUM == 6:
# Recursively draw a spirograph of squares:
drosteDraw(drawSquare, 150, [{'angle': 10, 'x': 0.1}])
elif _DEMO_NUM == 7:
# Recursively draw a smaller square in each of the four corners of the parent square:
drosteDraw(drawSquare, 350, [{'size': 0.5, 'x': -0.5, 'y': 0.5},
{'size': 0.5, 'x': 0.5, 'y': 0.5},
{'size': 0.5, 'x': -0.5, 'y': -0.5},
{'size': 0.5, 'x': 0.5, 'y': -0.5}])
elif _DEMO_NUM == 8:
# Recursively draw smaller filled squares in the center, alternating red and black:
drosteDraw(drawFilledSquare, 350, [{'size': 0.8}], {'colors': ['red', 'black']})
elif _DEMO_NUM == 9:
# Recursively draw a smaller filled square in each of the four corners of the parent square with red and black:
drosteDraw(drawFilledSquare, 350, [{'size': 0.5, 'x': -0.5, 'y': 0.5},
{'size': 0.5, 'x': 0.5, 'y': 0.5},
{'size': 0.5, 'x': -0.5, 'y': -0.5},
{'size': 0.5, 'x': 0.5, 'y': -0.5}], {'colors': ['red', 'black']})
elif _DEMO_NUM == 10:
# Recursively draw a smaller filled square in each of the four corners of the parent square with white and black:
drosteDraw(drawFilledSquare, 350, [{'size': 0.5, 'x': -0.5, 'y': 0.5},
{'size': 0.5, 'x': 0.5, 'y': 0.5},
{'size': 0.5, 'x': -0.5, 'y': -0.5},
{'size': 0.5, 'x': 0.5, 'y': -0.5}], {'colors': ['white', 'black']})
elif _DEMO_NUM == 11:
# Recursively draw a smaller filled square in each of the four corners of the parent square:
drosteDraw(drawFilledDiamond, 350, [{'size': 0.5, 'x': -0.45, 'y': 0.45},
{'size': 0.5, 'x': 0.45, 'y': 0.45},
{'size': 0.5, 'x': -0.45, 'y': -0.45},
{'size': 0.5, 'x': 0.45, 'y': -0.45}], {'colors': ['green', 'yellow']})
elif _DEMO_NUM == 12:
# Draw the sierpinsky triangle:
drosteDraw(drawTriangle, 600, [{'size': 0.5, 'x': 0, 'y': math.sqrt(3) / 6, 'angle': 0},
{'size': 0.5, 'x': 0, 'y': math.sqrt(3) / 6, 'angle': 120},
{'size': 0.5, 'x': 0, 'y': math.sqrt(3) / 6, 'angle': 240}])
elif _DEMO_NUM == 13:
# Draw a recursive "glider" shape from Conway's Game of Life:
drosteDraw(drawSquare, 600, [{'size': 0.333, 'x': 0, 'y': 0.333},
{'size': 0.333, 'x': 0.333, 'y': 0},
{'size': 0.333, 'x': 0.333, 'y': -0.333},
{'size': 0.333, 'x': 0, 'y': -0.333},
{'size': 0.333, 'x': -0.333, 'y': -0.333}])
turtle.exitonclick()
_DEMO_NUM += 1
def main():
# Start the demo:
turtle.onscreenclick(demo)
demo()
turtle.mainloop()
if __name__ == '__main__':
main()
|
[
"asweigart@gmail.com"
] |
asweigart@gmail.com
|
3cae8ac244b500fe128a0b1630bc2a07a0b73780
|
781f4f6aff07d69751025f4cc7419c3571567618
|
/seq2seq_htr.py
|
99062755bcfbd35548933eaa534ff10e7205a057
|
[] |
no_license
|
hiqmatNisa/Sequence-to-Sequence-Model
|
1837c598c9d476c10407908490968817093372ab
|
28dc41fc38c28e9d8daaa6ba07d45df0e5354aa2
|
refs/heads/main
| 2023-04-30T07:48:24.571191
| 2021-05-23T14:24:09
| 2021-05-23T14:24:09
| 364,155,813
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,678
|
py
|
import torch
from torch import nn
from torch.autograd import Variable
import random
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, output_max_len, vocab_size):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.vocab_size = vocab_size
self.output_max_len=output_max_len
def forward(self, src, trg, train_in_len, teacher_rate, train=True):
#train_in, train_out, train_in_len, teacher_rate=0.50, train=True
#src = [src len, batch size]
#trg = [trg len, batch size]
#teacher_forcing_ratio is probability to use teacher forcing
#e.g. if teacher_forcing_ratio is 0.75 we use teacher forcing 75% of the time
batch_size = src.shape[0]
trg = trg.permute(1, 0)
trg_len = trg.shape[0]
#outputs = Variable(torch.zeros(self.output_max_len-1, batch_size, self.vocab_size), requires_grad=True)
#tensor to store decoder outputs
outputs = torch.zeros(self.output_max_len-1, batch_size, self.vocab_size).cuda()#.to(torch.float64)
#encoder_outputs is all hidden states of the input sequence, back and forwards
#hidden is the final forward and backward hidden states, passed through a linear layer
encoder_outputs, hidden, cell = self.encoder(src, train_in_len, train)
#first input to the decoder is the <sos> tokens
input = Variable(self.one_hot(trg[0].data)).to(torch.int64)
prev_c = Variable(torch.zeros(encoder_outputs.shape[1], encoder_outputs.shape[2]), requires_grad=True).cuda() #b,f
for t in range(0, self.output_max_len-1):
#insert input token embedding, previous hidden state and all encoder hidden states
#receive output tensor (predictions) and new hidden state
output, hidden, cell, prev_att_weights= self.decoder(input, hidden, cell, encoder_outputs, train, prev_c)
#place predictions in a tensor holding predictions for each token
outputs[t] = output
#decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_rate
#if teacher forcing, use actual next token as next input
#if not, use predicted token
input = Variable(self.one_hot(trg[t+1].data)).to(torch.int64) if train and teacher_force else output.data
return outputs
def one_hot(self, src): # src: torch.cuda.LongTensor
ones = torch.eye(self.vocab_size).cuda()
return ones.index_select(0, src)
|
[
"noreply@github.com"
] |
hiqmatNisa.noreply@github.com
|
1f03b32254f1d0ce03b2a51b6b6eb983daeac7b5
|
8fa8ab78770861fe2cee964b381d3b9af0e9d867
|
/SlatechainCore/qa/rpc-tests/util.py
|
bcd2bc5115dbfadad1e8deed754c45b25fc1b50a
|
[
"MIT"
] |
permissive
|
npq7721/coproject
|
37eb2725d737b46ac2fea20363ee159a6e8b8550
|
a8110e40bac798f25654e2492ef35ae92eae3bbf
|
refs/heads/master
| 2020-03-22T16:31:30.101967
| 2018-07-12T06:08:26
| 2018-07-12T06:08:26
| 140,332,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,452
|
py
|
# Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The Corallium developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "corallium.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
coralliumd and corallium-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run coralliumd:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "coralliumd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "corallium-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in corallium.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a coralliumd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "coralliumd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "corallium-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple coralliumds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
[
"tri282@gmail.com"
] |
tri282@gmail.com
|
c8e7deb719db05703d93fe764028941e8baac0db
|
25768f25147068914d7a88db6765411e33a9f708
|
/django_itchat-master/wechat/app/views_bak.py
|
02461fd22dc2129827659afb23ab1ed59a21adf4
|
[] |
no_license
|
QzeroQ/python
|
d00739ca4630b2e84c65d726c9e0ad09a60f8a83
|
23e7fd4b3867bcced17febfadaf6094965707420
|
refs/heads/master
| 2022-04-15T11:15:53.468501
| 2020-03-26T16:24:21
| 2020-03-26T16:24:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,542
|
py
|
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.urls import reverse
from wxpy import *
import uuid
from wechat.settings import BASE_DIR, MEDIA_ROOT
import os, time, base64, json, random
from .models import *
from django.contrib.auth.models import User as uuu
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
# Create your views here.
qr_path = os.path.join(BASE_DIR, 'wechat/')
def path_check(qr_path):
if os.path.exists(qr_path):
os.remove(qr_path)
class WXBot(object):
def __init__(self, request):
self.qr_s = ''
self.qr_path = qr_path + request.user.username + '.png'
def callback(self, uuid, status, qrcode):
f = open(self.qr_path, 'wb')
f.write(qrcode)
f.close()
def get_qr(self):
bot = Bot(cache_path=False, qr_callback=self.callback)
return bot
# 获取登录二维码
@login_required
def qr_img(request):
if request.method == 'POST':
for i in range(5):
time.sleep(2)
if os.path.exists(qr_path + request.user.username + '.png'):
res = {
'img': str(base64.b64encode(open(qr_path + request.user.username + '.png', 'rb').read()),
encoding='utf8')
}
return JsonResponse(res)
continue
# 获取数据库里的群列表 和 组列表
@login_required
def get_group_list(request):
uuid_str = str(uuid.uuid4())
group_list = groups_name.objects.all()
set_list = grouping.objects.all()
set_res = []
for i in set_list:
set_name = []
obj = i.group.all()
for j in obj:
set_name.append(j.name)
d = {
'id': i.id,
'set_name': i.group_name,
'group_names': ','.join(set_name),
'num': len(obj)
}
set_res.append(d)
return render(request, 'app/group_list.html', {'data': group_list, 'set': set_res, 'uuid': uuid_str})
# 添加群
@login_required
def create_group(request):
if request.method == 'POST':
d = {
'count': 0,
'name_list': []
}
a = WXBot(request)
bot = a.get_qr()
groups = bot.groups()
print(groups)
d['count'] = len(groups)
for i in groups:
d['name_list'].append(i.nick_name)
groups_name.objects.get_or_create(
name=i.nick_name, users_num=len(i.members), owner=i.owner.name
)
bot.logout()
path_check(qr_path=qr_path + request.user.username + '.png')
return JsonResponse(d)
else:
return render(request, 'app/create_group.html')
# 删除群
@login_required
def delete_group(request):
if request.method == 'POST':
id = request.POST.get('id', '')
typ = request.POST.get('typ', 'one') # 默认单个删除
if id:
if typ == 'one':
u = groups_name.objects.filter(id=int(id))
if u:
u[0].delete()
return JsonResponse(
{
'msg': '删除成功!',
'status': True
}
)
else:
return JsonResponse(
{
'msg': '该群不存在!',
'status': False
}
)
else:
id_list = json.loads(id)
for i in id_list:
u = groups_name.objects.filter(id=int(i))
u[0].delete()
return JsonResponse(
{
'msg': '删除成功!',
'status': True
}
)
else:
return JsonResponse(
{
'msg': '参数错误!',
'status': False
}
)
# 添加组
def add_set(request):
if request.method == 'POST':
set_name = request.POST.get('set_name', '')
group_id_list = request.POST.get('group_name', '')
if set_name and group_id_list:
g_l = [int(x) for x in json.loads(group_id_list)]
if grouping.objects.filter(group_name=set_name):
return JsonResponse(
{
'status': False,
'msg': '群名重复,请检查!'
}
)
grouping_obj = grouping.objects.create(group_name=set_name)
grouping_obj.group.set(groups_name.objects.filter(id__in=g_l))
return JsonResponse(
{
'status': True,
'msg': '添加成功!'
}
)
else:
return JsonResponse(
{
'status': False,
'msg': '参数有误!'
}
)
group_list = groups_name.objects.all()
return render(request, 'app/add_set.html', {'data': group_list})
# 删除组
def delete_set(request):
if request.method == 'POST':
id = request.POST.get('id', '')
typ = request.POST.get('typ', 'one')
if id:
if typ == 'one':
obj = grouping.objects.filter(id=int(id))
if obj:
obj[0].delete()
return JsonResponse(
{
'status': True,
'msg': '删除成功!'
}
)
else:
return JsonResponse(
{
'status': False,
'msg': '组不存在!'
}
)
else:
id_list = json.loads(id)
for i in id_list:
obj = grouping.objects.filter(id=int(i))
obj[0].delete()
return JsonResponse(
{
'status': True,
'msg': '删除成功!'
}
)
else:
return JsonResponse(
{
'status': False,
'msg': '参数有误!'
}
)
# 编辑组
def edit_set(request):
if request.method == 'POST':
set_id = request.POST.get('set_id', '')
set_name = request.POST.get('set_name', '')
group_id_list = request.POST.get('group_name', '')
if set_name and group_id_list and set_id:
g_l = [int(x) for x in json.loads(group_id_list)]
if grouping.objects.exclude(id=int(set_id)).filter(group_name=set_name):
return JsonResponse(
{
'status': False,
'msg': '群名重复,请检查!'
}
)
grouping_obj = grouping.objects.get(id=int(set_id))
grouping_obj.group.set(groups_name.objects.filter(id__in=g_l))
return JsonResponse(
{
'status': True,
'msg': '添加成功!'
}
)
else:
return JsonResponse(
{
'status': False,
'msg': '参数有误!'
}
)
set_id = request.GET.get('set_id', '')
group_name = []
weixuan = []
grouping_name = ''
if set_id:
obj = grouping.objects.filter(id=int(set_id))
if obj:
grouping_name = obj[0].group_name
group_name = obj[0].group.all()
id_list = []
for i in group_name:
id_list.append(i.id)
weixuan = groups_name.objects.exclude(id__in=id_list)
return render(request, 'app/edit_set.html',
{'group_name': group_name, 'weixuan': weixuan, 'grouping_name': grouping_name, 'set_id': set_id})
# 组发送
def send_set(request):
uuid_str = str(uuid.uuid4())
set_list = grouping.objects.all()
set_res = []
for i in set_list:
set_name = []
obj = i.group.all()
for j in obj:
set_name.append(j.name)
d = {
'id': i.id,
'set_name': i.group_name,
'group_names': ','.join(set_name),
'num': len(obj)
}
set_res.append(d)
return render(request, 'app/set_send.html', {'set': set_res, 'uuid': uuid_str})
# 提交审核 添加任务
@login_required
def add_task(request):
if request.method == 'POST':
t = request.POST.get('type', 'group') # 默认是组发 群发为 set
content = request.POST.get('content', '')
group_name = request.POST.get('group_name', '')
send_type = request.POST.get('send_type', 'text') # 默认是文字
uuid = request.POST.get('uuid') # 区分任务
if send_type == 'text':
if content and groups_name:
if t == 'group':
task.objects.create(
message=content, group_name=group_name, typ='群发'
)
return JsonResponse({'status': True, 'msg': '添加审核成功,请等待管理员审核!'})
else:
set_id = json.loads(group_name)
l = []
for i in set_id:
for j in grouping.objects.get(id=int(i)).group.all():
d = {"gname": j.name, "owner": j.owner}
l.append(d)
print(l)
task.objects.create(
message=content, group_name=json.dumps(l, ensure_ascii=False), typ='组发'
)
return JsonResponse({'status': True, 'msg': '添加审核成功,请等待管理员审核!'})
else:
return JsonResponse({'status': False, 'msg': '参数错误!'})
else:
if groups_name:
if send_type == 'file':
message_type = '文件'
elif send_type == 'video':
message_type = '视频'
else:
message_type = '图片'
f_obj = file.objects.filter(uuid_str=uuid)
if f_obj:
if t == 'group':
task.objects.create(
message=uuid, uuid_str=uuid, group_name=group_name, typ='群发', message_type=message_type
)
return JsonResponse({'status': True, 'msg': '添加审核成功,请等待管理员审核!'})
else:
set_id = json.loads(group_name)
l = []
for i in set_id:
for j in grouping.objects.get(id=int(i)).group.all():
d = {"gname": j.name, "owner": j.owner}
l.append(d)
print(l)
task.objects.create(
message=uuid, uuid_str=uuid, group_name=json.dumps(l, ensure_ascii=False), typ='组发',
message_type=message_type
)
return JsonResponse({'status': True, 'msg': '添加审核成功,请等待管理员审核!'})
else:
return JsonResponse({'status': False, 'msg': '尚未上传附件,请先上传附件后再提交!'})
else:
return JsonResponse({'status': False, 'msg': '参数错误!'})
# 审核列表
@login_required
def check_list(request):
if request.method == 'POST':
data = {
'msg': '',
'data': [],
'count': '',
'code': 0,
}
task_list = task.objects.all()
for i in task_list:
if i.message_type != '文字':
file_path = file.objects.get(uuid_str=i.uuid_str).file_path
f = file_path.split('==')[-1]
fie =file_path.split('/')[-1]
d = {
'id': i.id,
'message': f,
'message_type': i.message_type,
'group': i.group_name,
'status': i.status,
'typ': i.typ,
'date': i.date.strftime('%Y-%m-%d %H:%M:%S'),
'uuid': i.uuid_str,
'fie_yuanben': fie
}
else:
d = {
'id': i.id,
'message': i.message,
'message_type': i.message_type,
'group': i.group_name,
'status': i.status,
'typ': i.typ,
'date': i.date.strftime('%Y-%m-%d %H:%M:%S'),
'uuid': i.uuid_str,
}
data['data'].append(d)
return JsonResponse(data)
return render(request, 'app/check_list.html')
# 删除任务
@login_required
def delete_task(request):
if request.method == 'POST':
id = request.POST.get('id', '')
typ = request.POST.get('typ', 'one')
if id:
if typ == 'one':
s = task.objects.filter(id=int(id))
if s:
s[0].delete()
return JsonResponse(
{
'status': True,
'msg': '成功!'
}
)
else:
return JsonResponse(
{
'status': False,
'msg': '参数有误!'
}
)
else:
id_list = json.loads(id)
for i in id_list:
s = task.objects.filter(id=int(i))
s[0].delete()
return JsonResponse(
{
'status': True,
'msg': '成功!'
}
)
else:
return JsonResponse(
{
'status': False,
'msg': '参数有误!'
}
)
# 审核
@login_required
def check(request):
if request.method == 'POST':
group_name = request.POST.get('group_name', '')
message = request.POST.get('message', '')
id = request.POST.get('id', '')
if group_name and message and id:
group_name = json.loads(group_name)
t = task.objects.get(id=int(id))
t.message = message
t.status = '审核通过'
t.save()
return JsonResponse(
{
'status': True,
'msg': '审核通过!'
}
)
else:
return JsonResponse(
{
'status': False,
'msg': '参数有误!'
}
)
return render(request, 'app/check.html')
# 发送
@login_required
def send(request):
if request.method == 'POST':
group_name = request.POST.get('group_name', '')
message = request.POST.get('message', '')
message_type = request.POST.get('message_type', '')
id = request.POST.get('id', '')
uuid = request.POST.get('uuid', '')
if group_name and message and id:
group_name = json.loads(group_name)
a = WXBot(request)
bot = a.get_qr()
groups = bot.groups()
for i in group_name:
print(i)
print(message)
try:
if message_type == '文字': # todo
groups.search(i['gname'])[0].send(message)
elif message_type == '文件':
path = file.objects.get(uuid_str=uuid).file_path
groups.search(i['gname'])[0].send_file(path)
elif message_type == '视频':
path = file.objects.get(uuid_str=uuid).file_path
groups.search(i['gname'])[0].send_video(path)
elif message_type == '图片':
path = file.objects.get(uuid_str=uuid).file_path
groups.search(i['gname'])[0].send_image(path)
else:
pass
except:
with open('error.log', 'a+') as f:
f.write('发送失败:{0} >>>{1}\n'.format(i['gname'], message))
pass
time.sleep(random.random())
bot.logout()
path_check(qr_path=qr_path + request.user.username + '.png')
t = task.objects.get(id=int(id))
t.status = '已发送'
t.save()
return JsonResponse(
{
'status': True,
'msg': '发送成功!'
}
)
else:
return JsonResponse(
{
'status': False,
'msg': '参数有误!'
}
)
return render(request, 'app/send.html')
# 登录
def do_login(request):
if request.method == "POST":
username = request.POST.get("username", "")
password = request.POST.get("password", "")
user = authenticate(username=username, password=password)
if user and user.is_active:
try:
session_urls = []
role_name = []
role_list = User.objects.get(username=username).myuser.role.all()
for role in role_list:
role_name.append(role.role_name)
permission_list = role.permission.all()
for permission in permission_list:
session_urls.append(permission.url)
request.session['urls'] = json.dumps(session_urls)
request.session['role_name'] = json.dumps(role_name)
except:
# 这块表示该用户没有设置任何角色
return render(request, 'login.html', {"err": "该账号异常,请联系管理员!"})
login(request, user)
return redirect(reverse('get_group_list'))
else:
return render(request, 'login.html', {"err": "账号密码错误或被冻结,请联系管理员!"})
return render(request, 'login.html')
def do_logout(request):
logout(request)
return redirect(reverse('do_login'))
def forbidden(request):
return render(request, 'forbidden.html')
# 上传
def upload(request):
res = {
"code": 0,
"msg": "上传成功",
"data": {
"src": "http://cdn.layui.com/123.jpg"
}
}
now = str(datetime.now().strftime('%Y%m%d%H%M%S'))
if request.method == 'POST':
uuid = request.GET.get('uuid')
f = request.FILES.get('file')
file_name = now + '==' + f.name
video_list = [
'rm','rmvb','mpeg1-4','mov','mtv','dat','wmv','avi','3gp','amv','dmv','flv','mp3','mp4'
]
for i in video_list:
if i in file_name.lower():
res['code'] = -1
res['msg'] = '暂不支持视频文件!'
return JsonResponse(res)
path = os.path.join(MEDIA_ROOT, file_name)
destination = open(path, 'wb+') # 打开特定的文件进行二进制的写操作
for chunk in f.chunks(): # 分块写入文件
destination.write(chunk)
destination.close()
f_obj = file.objects.get_or_create(
uuid_str=uuid
)
f_obj[0].file_path = path
f_obj[0].save()
res['msg'] = f.name + ',上传成功!'
return JsonResponse(res)
|
[
"1223609881@qq.com"
] |
1223609881@qq.com
|
c376c39e37435a1e79e90afc296107078fdf4713
|
64f726483db2bae0c418026c47acc41501118e2f
|
/chat.py
|
961687dd7fa0c748143eafc4a8fd42b070097222
|
[] |
no_license
|
aouataf-djillani/Simple-chatbot-with-python-and-flask-
|
ad91d6db25a657a243674d8874706f6738daab86
|
50d365e536c341a43d5bc9eca2dff1874955ff69
|
refs/heads/master
| 2023-09-03T00:46:55.234719
| 2021-11-22T07:52:24
| 2021-11-22T07:52:24
| 430,612,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
from flask import Flask, render_template, request
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
app = Flask(__name__)
myBot=ChatBot(name='Aouataf', storage_adapter="chatterbot.storage.SQLStorageAdapter")
""" greetings=["hi there!","hi","how are you doing?","fine","good", "great", "what's your name?","aouataf"]
math1=["pythagorean theorem","a squared plus b squared equals c squared"]
math2=["law of cosine","c**2= a**2+b**2-2*a*b*cos(gamma)"]
list_trainer=ListTrainer(myBot)
for item in (greetings,math1, math2):
list_trainer.train(item) """
corpus_trainer=ChatterBotCorpusTrainer(myBot)
corpus_trainer.train('chatterbot.corpus.english')
@app.route("/")
def home():
return render_template("index.html")
@app.route("/get")
def get_bot_response():
userText = request.args.get('msg')
return str(myBot.get_response(userText))
if __name__ == "__main__":
app.run()
|
[
"aouatefd@yahoo.com"
] |
aouatefd@yahoo.com
|
e0d852a289aa3a8e3aca62072d98ba4f2cf26939
|
33524b5c049f934ce27fbf046db95799ac003385
|
/2018/Other/Urok_10_0_классы_объекты/teoriya_class_0.py
|
68f7a2ba67558f66e7e39854b191bc7d8ef21224
|
[] |
no_license
|
mgbo/My_Exercise
|
07b5f696d383b3b160262c5978ad645b46244b70
|
53fb175836717493e2c813ecb45c5d5e9d28dd23
|
refs/heads/master
| 2022-12-24T14:11:02.271443
| 2020-10-04T04:44:38
| 2020-10-04T04:44:38
| 291,413,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
from math import pi
class Circle:
def __init__(self, x=0, y=0, r=0):
self.x = x
self.y = y
self.r = r
def __str__(self):
return "({},{},{})".format(self.x,self.y,self.r)
def read(self):
self.x,self.y,self.r = map(int,input().split())
def area(self):
a = pi*self.r * self.r
return a
def perimetr(self):
return 2*pi*self.r
def zoom(self, k):
self.r *=k
def is_crossed(self, c): # пересекается или нет окружность с окружностью с?
d2 = (self.x - c.x)**2 + (self.y - c.y)**2
r2 =(self.r + c.r)**2
return d2 <=r2
c1 = Circle()
c2 = Circle()
'''
c1.r = 3
c2.r = 5
c2.x = 1
c2.y = 1
'''
c1.read()
c2.read()
print (c1)
print (c2)
'''
ans = c1.area()
print (ans)
'''
|
[
"mgbo433@gmail.com"
] |
mgbo433@gmail.com
|
e6a1d15cf99580bb3bb6d61442e94f09c15cfdd4
|
ef7c6a90ec6b09477d49a8719a12c45368d59619
|
/venv/lib/python2.7/site-packages/faker/name.py
|
089e225d7e62f6c5a3536f8c5a2f5b4ad70a3725
|
[] |
no_license
|
GavrilyukAG/TP_WEB
|
a2d612dcdfede55c8775f45373c66a9b730cda49
|
8c114153a004179ae6b944571d102f66a6c8a474
|
refs/heads/master
| 2020-03-27T08:27:55.646237
| 2018-08-27T06:46:28
| 2018-08-27T06:46:28
| 146,257,338
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
from faker import frandom
from faker import helper
import random
def first_name():
""""""
return frandom.first_name()
def last_name():
""""""
return frandom.last_name()
def find_name():
""""""
r = random.randint(0,10)
if r==0:
return frandom.name_prefix() + " " + first_name() + " " + last_name()
elif r==1:
return first_name() + " " + last_name() + " " + frandom.name_suffix()
return first_name() + " " + last_name()
|
[
"sashagavrilyuk97@gmail.com"
] |
sashagavrilyuk97@gmail.com
|
bbf5068fcd5c3270cf2448fddc69044e5fb04048
|
ddac7346ca9f1c1d61dfd7b3c70dc6cd076a9b49
|
/tests/test_calculators.py
|
ea4ae7c9ee767f607d8382ac221cc57272a8fee0
|
[
"MIT"
] |
permissive
|
gvenus/dftfit
|
f8cf5e9bef5a173ff0aa7202bacbfee0df61bd14
|
a00354f8f0d611bf57c6925f920c749d8628cf98
|
refs/heads/master
| 2023-03-17T18:58:52.287217
| 2019-10-20T04:07:44
| 2019-10-20T04:07:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,979
|
py
|
import asyncio
import shutil
import pymatgen as pmg
import numpy as np
import pytest
from dftfit.io.lammps import LammpsLocalDFTFITCalculator
from dftfit.io.lammps_cython import LammpsCythonDFTFITCalculator
from dftfit.cli.utils import load_filename
from dftfit.potential import Potential
@pytest.mark.pymatgen_lammps
@pytest.mark.lammps_cython
@pytest.mark.calculator
def test_calculator_equivalency(structure):
target_a = 4.1990858
s = structure('test_files/structure/MgO.cif')
lattice = pmg.Lattice.from_parameters(target_a, target_a, target_a, 90, 90, 90)
s.modify_lattice(lattice)
assert np.all(np.isclose(s.lattice.abc, (target_a, target_a, target_a)))
s = s * (2, 2, 2)
assert len(s) == 64
base_directory = 'test_files/dftfit_calculators/'
potential_schema = load_filename(base_directory + 'potential.yaml')
potential_schema['spec']['charge']['Mg']['initial'] = 1.4
potential_schema['spec']['charge']['O']['initial'] = -1.4
potential = Potential(potential_schema)
command = None
if shutil.which('lammps'): command = 'lammps'
elif shutil.which('lmp_serial'): command = 'lmp_serial'
calculators = [
LammpsLocalDFTFITCalculator(structures=[s], potential=potential, command=command, num_workers=1),
LammpsCythonDFTFITCalculator(structures=[s], potential=potential)
]
loop = asyncio.get_event_loop()
results = []
async def run(calc, potential):
await calc.create()
return await calc.submit(potential)
for calc in calculators:
results.append(loop.run_until_complete(run(calc, potential)))
assert len(results) == 2
assert len(results[0]) == 1
assert len(results[1]) == 1
for r1, r2 in zip(*results):
assert r1.structure == r2.structure
assert abs(r1.energy - r2.energy) < 1e-4
assert np.all(np.isclose(r1.forces, r2.forces, atol=1e-8))
assert np.all(np.isclose(r1.stress, r2.stress, atol=1e-8))
|
[
"chris.ostrouchov@gmail.com"
] |
chris.ostrouchov@gmail.com
|
58f10fc505bb8f71ebb268ab6cc5c8bb7932e7cb
|
f26f95837f579186a52eb7f4f385d938fb534333
|
/api1.py
|
fcec904ab6ff0fd0c2d20410e426d638b9d608a1
|
[] |
no_license
|
aishuo07/Flask-Bearer-authentication
|
70767f561f1bd39ef2669758d2edb630f4a4a53d
|
af7d29e957de5842c565c90b696fd3b2d3bb9c90
|
refs/heads/master
| 2022-11-28T16:40:51.257310
| 2020-08-07T14:02:50
| 2020-08-07T14:02:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,839
|
py
|
from flask import Flask,request,make_response,jsonify
from flask_sqlalchemy import SQLAlchemy
import uuid
from werkzeug.security import generate_password_hash,check_password_hash
import jwt
import datetime
from functools import wraps
from oauthlib.oauth2.rfc6749 import tokens
from oauthlib.oauth2 import Server
application = app = Flask(__name__)
app.config['SECRET_KEY']='secret'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///Users2.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
class Users(db.Model):
id = db.Column(db.Integer,primary_key=True)
public_id = db.Column(db.String(50),unique=True)
Full_name = db.Column(db.String(80))
User_password = db.Column(db.String(80))
bearer_token = db.Column(db.String(80), unique=True)
def Token(f):
@wraps(f)
def decorated(*args,**kwargs):
token=None
try:
auth = request.authorization
if not auth.Token:
return jsonify({'message': 'token missing1'}), 401
elif ('Token' in auth):
token = auth.Bearer_Token
except:
if'Bearer-token' in request.headers:
token=request.headers['Bearer-token']
if not token:
return jsonify({'message': 'token missing','auth':auth}),401
try:
current_user = Users.query.filter_by(bearer_token = token).first()
except:
return jsonify({'message': 'not a valid token'}), 401
return f(current_user,*args,**kwargs)
return decorated
@app.route('/',methods=['GET','POST'])
@Token
def Hello_world(current_user):
return("Hello World")
#
#
# @app.route('/create',methods=['POST'])
# def Create_user():
# data = request.get_json()
# hashed_password = generate_password_hash(data['password'],method='sha256')
# new_user = Users(public_id= str(uuid.uuid4()),Full_name=data['name'],User_password =hashed_password,bearer_token=str(uuid.uuid4()))
# db.session.add(new_user)
# db.session.commit()
# return 'User Created'
#
# @app.route('/login',)
# def login():
# auth = request.authorization
# if not auth or not auth.username or not auth.password:
# return make_response("Not verified",401,{'WWW-Authenticate':'Basic realm="Login requtired!"'})
# user = Users.query.filter_by(Full_name=auth.username).first()
# if not user:
# return make_response("Not verified",401,{'WWW-Authenticate':'Basic realm="Login requtired!"'})
# if check_password_hash(user.User_password,auth.password):
# token = user.bearer_token
#
# return jsonify({'token': token,'auth':auth})
# return make_response("Not verified",401,{'WWW-Authenticate':'Basic realm="Login requtired!"'})
if __name__ == '__main__':
app.run(host='0.0.0.0',port=80,debug=True)
|
[
"noreply@github.com"
] |
aishuo07.noreply@github.com
|
fd532eee5e2c033e0a30bd156b541d4417da7fdb
|
1bdcf47e4d690f232090f581518cab62d582dbc1
|
/first_project/first_app/views.py
|
d77103e649ecbb675cf601f85c92e759e10dc884
|
[] |
no_license
|
sonerk46/proje2
|
0923ed77ecfcfcbbe34cad9116bb5b0f4148bd5b
|
264ae22d1c8db4ff6d242528b54d324483a966ba
|
refs/heads/master
| 2020-05-01T16:24:25.117448
| 2019-03-25T11:15:51
| 2019-03-25T11:15:51
| 177,571,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
my_dict={'insert_me':"Now I am coming from first_app/index.html ! "}
return render(request,'first_app/index.html',context=my_dict)
|
[
"noreply@github.com"
] |
sonerk46.noreply@github.com
|
bde27465e5215f809b247a635fd24f3186193786
|
0698be34413debeb570e2560072c5696433acd81
|
/ForkTube/celeryconfig.py
|
1a437d56f6e0390a359e88338fe971e211e45e34
|
[] |
no_license
|
Miserlou/ForkTube
|
90a057c459fda4b8d92d94f89c9d86bf786549ca
|
848fdf4ff81c1d70b03c30a6382c8464dd4f25fe
|
refs/heads/master
| 2020-05-19T07:47:44.130888
| 2012-04-09T19:53:24
| 2012-04-09T19:53:24
| 2,363,212
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
BROKER_HOST = "localhost"
BROKER_PORT = 5672
BROKER_USER = "myuser"
BROKER_PASSWORD = "mypassword"
BROKER_VHOST = "myvhost"
CELERY_RESULT_BACKEND = "amqp"
CELERY_IMPORTS = ("tasks", )
|
[
"rich@anomos.info"
] |
rich@anomos.info
|
ea5e949ab6808a8425c1446752be046ffa13c5e1
|
6f8d8cf37cc2a3749f3151b5bc5412f1ff60dc77
|
/Final-Project_Proposal-Assignment/OpenSource/gera_geometria.py
|
10b859d1364f52c8ad87648dc26ed11eec52e29c
|
[] |
no_license
|
abraaonascimento/GIS-Programming_and_Automation
|
e57ec82632a001e30e2d8bf89f8771f0b0f53d32
|
bc22978a32040d3eed7f8c98fd2c43864ffa9ce9
|
refs/heads/master
| 2020-04-06T07:12:46.679597
| 2018-08-15T19:42:57
| 2018-08-15T19:42:57
| 52,040,916
| 3
| 3
| null | 2020-06-04T17:05:01
| 2016-02-18T21:42:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,280
|
py
|
import random
from shapely.geometry import Polygon, Point
def ponto_unico(poligono):
"""
A função cria_ponto_aleatorio cria um unico ponto aleatorio dentro de um poligono
Dados de entrada: latitute e longitude de cada vertice que constroe o poligono
Dados de saida: um par de coordenadas (um ponto) gerada aleatoriamente dentro de um poligono
"""
# Recebe cada par de coordenadas do bounding box do poligono
(coord_S_O, coord_S_L, coord_N_O, coord_N_L) = poligono.bounds
# Gera ponto aleatorio dentro do bounding box do poligono
while True:
ponto_aleatorio = Point(random.uniform(coord_S_O, coord_N_O), random.uniform(coord_S_L, coord_N_L))
# Verifica se o ponto gerado esta CONTIDO no poligono
if poligono.contains(ponto_aleatorio):
# Se o ponto gerado estiver CONTIDO no poligono
# Retorna o ponto gerado
return ponto_aleatorio
def multiplos_pontos(codigos_e_geometrias, codigos_e_populacao):
"""
A funcao cria_pontos_aleatorios cria pontos aleatorios dentro de um poligono
Dados de entrada: 1) conjunto de dados (dicionario) que contem o codigo do setor censitario
e os pares de coordenadas (vertices) da geometria do setor censitário; 2) conjunto de dados
(dicionario) que contem o codigo do setor censitario e os registros da populacao nos setores
censitários
Dados de saida: lista de pares de coordenadas (pontos) aleatorios
"""
# Cria uma lista para guardar pontos
pontos_aleatorios = []
# Para setor censitario
for codigo in codigos_e_populacao:
# Cria uma lista com a quantidade de habitantes no setor
try:
populacao = range(int(codigos_e_populacao[codigo]))
except:
populacao = range(0)
# Para cada habitante no setor
for pessoa in populacao:
# Cria um ponto aleatorio dentro do setor
ponto = ponto_unico(Polygon(codigos_e_geometrias[codigo]))
x,y = ponto.x, ponto.y
# Guarda o ponto aleatorio gerado
pontos_aleatorios.append([x,y])
# Retorna a lista de pontos aleatorios gerados
return pontos_aleatorios
|
[
"noreply@github.com"
] |
abraaonascimento.noreply@github.com
|
8056525b17134e7712545ac1c5bfa311bfbb9524
|
299bbcec69ea2e046ecbcd4b625453e8f46d64ac
|
/Evaluation/python-evaluation-scripts/pdr_new-blueflood.py
|
fd895a31dc9399f36e2943f569bea8c7126200a0
|
[] |
no_license
|
Airelin/BlueFlood-v-2
|
44310e513ef5a9567421bfa47e6fef90ae46558d
|
2dfd92a39b3399ff92e155dd157be8e4397500e2
|
refs/heads/master
| 2023-08-17T05:09:05.770047
| 2021-09-29T13:24:26
| 2021-09-29T13:24:26
| 411,314,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,088
|
py
|
import os
import progressbar
import numpy as np
import matplotlib.pyplot as plt
from utility import slugify, cached, init_cache, load_env_config
METHOD_PREFIX = 'export_'
CONFIDENCE_FILL_COLOR = '0.8'
COLOR_MAP = 'tab10'
def load_plot_defaults():
# Configure as needed
plt.rc('lines', linewidth=2.0)
plt.rc('legend', framealpha=1.0, fancybox=True)
plt.rc('errorbar', capsize=3)
plt.rc('pdf', fonttype=42)
plt.rc('ps', fonttype=42)
plt.rc('font', size=11)
def export_sine_wave_example(config, export_dir):
# We got multiple experiment runs with individual measurements
num_runs = 10
# Create our measurement steps
xs = np.linspace(0, 2 * np.pi, 100, endpoint=True)
# We also collect overall data for mean and confidence interval
overall_data = []
for r in range(0, num_runs):
name = "Sine Wave Run {}".format(r)
def proc():
# you can load your data from a database or CSV file here
# we will randomly generate data
ys = np.sin(np.array(xs))
# we add some uniform errors
ys += np.random.uniform(-0.1, 0.1, len(xs))
return ys
# If caching is enabled, this line checks for available cache data
# If no data was found, the proc callback is executed and the result cached
# Use ys = proc() if caching not yet wanted
ys = cached(('sine_wave', r), proc)
# We also add the data to overall_data
overall_data.append(ys)
plt.clf()
# Plot the main data
plt.plot(xs, ys, linestyle='-', label="Sin Run {}".format(r), color='C' + str(r + 1))
plt.legend()
plt.xlabel("x")
plt.ylabel("sin(x)")
plt.axis([None, None, None, None])
plt.grid(True)
plt.tight_layout()
plt.savefig(export_dir + slugify(name) + ".pdf", format="pdf")
plt.close()
overall_data = np.array(overall_data)
# We swap the axes to get all values at the first position together
overall_data = np.swapaxes(overall_data, 0, 1)
# We can then merge each step to get the mean
mean = np.mean(overall_data, axis=1)
# Calculate the lower and upper bounds of the confidence interval
# This describes that 95% of the measurements (for each timestep) are within that range
# Use standard error to determine the "quality" of your calculated mean
(lq, uq) = np.percentile(overall_data, [2.5, 97.5], axis=1)
plt.clf()
plt.plot(xs, mean, linestyle='-', label="Mean", color='C1')
plt.fill_between(xs, lq, uq, color=CONFIDENCE_FILL_COLOR, label='95% Confidence Interval')
plt.legend()
plt.xlabel("x")
plt.ylabel("sin(x)")
plt.axis([None, None, None, None])
plt.grid(True)
plt.tight_layout()
plt.savefig(export_dir + slugify("Sine Wave Mean") + ".pdf", format="pdf")
plt.close()
def export_bar_example(config, export_dir):
# we want to display two bar grahps
# see export_sine_wave_example
num_data_points = 100
data_a = []
with open('../Logs/5213_blueflood2-mode5/logs/packet-delivery-rate.txt', encoding="iso-8859-14") as file_a:
lines_a = file_a.readlines()
str_num_a = []
for line in lines_a:
str_num_a.append(line)
for num in str_num_a:
data_a.append(float(num))
data_b = []
with open('../Logs/5214_blueflood2-mode6/logs/packet-delivery-rate.txt', encoding="iso-8859-14") as file_b:
lines_b = file_b.readlines()
str_num_b = []
for line in lines_b:
str_num_b.append(line)
for num in str_num_b:
data_b.append(float(num))
data_c = []
with open('../Logs/5211_blueflood2-mode3/logs/packet-delivery-rate.txt', encoding="iso-8859-14") as file_c:
lines_c = file_c.readlines()
str_num_c = []
for line in lines_c:
str_num_c.append(line)
for num in str_num_c:
data_c.append(float(num))
data_d = []
with open('../Logs/5212_blueflood2-mode4/logs/packet-delivery-rate.txt', encoding="iso-8859-14") as file_d:
lines_d = file_d.readlines()
str_num_d = []
for line in lines_d:
str_num_d.append(line)
for num in str_num_d:
data_d.append(float(num))
mean_a = np.mean(data_a)
mean_b = np.mean(data_b)
mean_c = np.mean(data_c)
mean_d = np.mean(data_d)
std_a = np.std(data_a)
std_b = np.std(data_b)
std_c = np.std(data_c)
std_d = np.std(data_d)
plt.clf()
fig, ax = plt.subplots()
ax.bar(["125 Kbit", "500 Kbit", "1 Mbit", "2 Mbit"], [mean_a, mean_b, mean_c, mean_d], yerr=[std_a, std_b, std_c, std_d], align='center',
ecolor='black', capsize=5, color=['C1', 'C2', 'C3', 'C4'])
ax.yaxis.grid(True)
plt.ylabel("PDR in %")
plt.axis([None, None, 0, 1])
# Adapt the figure size as needed
fig.set_size_inches(5.0, 8.0)
plt.tight_layout()
plt.savefig(export_dir + slugify(("20210921","pdr","new-blueflood","Bar", 5.0, 8.0)) + ".pdf", format="pdf")
fig.set_size_inches(4.0, 4.0)
plt.tight_layout()
plt.savefig(export_dir + slugify(("20210921","pdr","new-blueflood","Bar", 4.0, 4.0)) + ".pdf", format="pdf")
plt.close()
def export_example_3(config, export_dir):
pass
if __name__ == '__main__':
config = load_env_config()
load_plot_defaults()
assert 'EXPORT_DIR' in config and config['EXPORT_DIR']
if 'CACHE_DIR' in config and config['CACHE_DIR']:
init_cache(config['CACHE_DIR'])
steps = [
#export_sine_wave_example, # I put the example I am working on to the beginning
export_bar_example,
# export_example_3, excluded for now
]
for step in progressbar.progressbar(steps, redirect_stdout=True):
name = step.__name__.removeprefix(METHOD_PREFIX)
print("Handling {}".format(name))
export_dir = os.path.join(config['EXPORT_DIR'], name) + '/'
os.makedirs(export_dir, exist_ok=True)
step(config, export_dir)
|
[
"stu214538@mail.uni-kiel.de"
] |
stu214538@mail.uni-kiel.de
|
9068d5fa8635986f907256b19f7cc5f4060948a9
|
a90b0fe3875d615585234563d9e2f5a13b5e61e6
|
/Streamlit/Examples/Stock-Price-Dashboard/app.py
|
5ba426e3ee1d2bff282e924cba3fadc40a32e63d
|
[
"MIT"
] |
permissive
|
Badranh/Tutorials
|
424bfed02b2dc6addad6288856905e5cd66d2297
|
1e5127756ee6d962cab79738c157f1e31ea1438f
|
refs/heads/master
| 2022-11-12T12:20:08.024770
| 2020-07-01T18:18:09
| 2020-07-01T18:18:09
| 277,617,438
| 0
| 1
|
MIT
| 2020-07-06T18:20:56
| 2020-07-06T18:20:55
| null |
UTF-8
|
Python
| false
| false
| 1,453
|
py
|
import streamlit as st
import numpy as np
import pandas as pd
import plotly.plotly as py
import plotly.graph_objs as go
st.title('Stock Price of Apple')
@st.cache
def load_data():
data = pd.read_csv('AAPL_data.csv', parse_dates=['date'])
return data
df = load_data()
columns = st.multiselect(
"Choose Columns", list(df.drop(['date', 'Name'], axis=1).columns), ['open']
)
columns.extend(['date'])
start_date = st.date_input('Start date', value=df['date'].min())
end_date = st.date_input('End date', value=df['date'].max())
data = df[columns][(df['date']>=start_date) & (df['date']<=end_date)]
st.write(data)
st.subheader('Line chart of selected columns')
chart = st.line_chart(data.drop(['date'], axis=1))
if st.checkbox('Show summaries'):
st.subheader('Summaries:')
st.write(data.describe())
week_df = data.groupby(data['date'].dt.weekday_name).mean()
traces = [go.Bar(
x = week_df.index,
y = data[col],
name = col,
marker = dict(
line = dict(
color = 'rgb(0, 0, 0)',
width = 2
)
)
) for col in data.drop(['date'], axis=1).columns]
layout = go.Layout(
title = 'Stockprice over days',
xaxis = dict(
title = 'Weekday',
),
yaxis = dict(
title = 'Average Price'
)
)
fig = go.Figure(data=traces, layout=layout)
st.plotly_chart(fig)
|
[
"gilberttanner.work@gmail.com"
] |
gilberttanner.work@gmail.com
|
39f51b8befba9f505afddabff3d6d21823fa7df5
|
adb759899204e61042225fabb64f6c1a55dac8ce
|
/1900~1999/1904.py
|
8a490e0cc71ac769e26193e2bc6f97c4d01e51cb
|
[] |
no_license
|
geneeol/baekjoon-online-judge
|
21cdffc7067481b29b18c09c9152135efc82c40d
|
2b359aa3f1c90f178d0c86ce71a0580b18adad6f
|
refs/heads/master
| 2023-03-28T23:25:12.219487
| 2021-04-01T09:19:06
| 2021-04-01T09:19:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,478
|
py
|
# 문제
# 지원이에게 2진 수열을 가르쳐 주기 위해, 지원이 아버지는 그에게 타일들을 선물해주셨다.
# 그리고 이 각각의 타일들은 0 또는 1이 쓰여 있는 낱장의 타일들이다.
# 어느 날 짓궂은 동주가 지원이의 공부를 방해하기 위해 0이 쓰여진 낱장의 타일들을 붙여서 한 쌍으로 이루어진 00 타일들을 만들었다.
# 결국 현재 1 하나만으로 이루어진 타일 또는 0타일을 두 개 붙인 한 쌍의 00타일들만이 남게 되었다.
# 그러므로 지원이는 타일로 더 이상 크기가 N인 모든 2진 수열을 만들 수 없게 되었다.
# 예를 들어, N=1일 때 1만 만들 수 있고, N=2일 때는 00, 11을 만들 수 있다. (01, 10은 만들 수 없게 되었다.)
# 또한 N=4일 때는 0011, 0000, 1001, 1100, 1111 등 총 5개의 2진 수열을 만들 수 있다.
# 우리의 목표는 N이 주어졌을 때 지원이가 만들 수 있는 모든 가짓수를 세는 것이다.
# 단 타일들은 무한히 많은 것으로 가정하자.
#
# 입력
# 첫 번째 줄에 자연수 N이 주어진다.(N ≤ 1,000,000)
#
# 출력
# 첫 번째 줄에 지원이가 만들 수 있는 길이가 N인 모든 2진 수열의 개수를 15746으로 나눈 나머지를 출력한다.
N = int(input())
MOD = 15746
dp = [0 for _ in range(1000001)]
dp[1], dp[2], dp[3] = 1, 2, 3
for i in range(4, 1000001):
dp[i] = (dp[i - 1] + dp[i - 2]) % MOD
print(dp[N])
|
[
"alstn2468_@naver.com"
] |
alstn2468_@naver.com
|
bf2dc3f9c8e764a9d8aef8bdecd21ae74fe802d2
|
0e33e481ce9122b0d43ec033dc1d0c162b67d4ee
|
/blog/urls.py
|
e33b9421ce2a1bfbe9f13c2f7815e5e8d21cc4a6
|
[] |
no_license
|
lunnbag/my-first-blog
|
1f2a69f9c6407fc775b925c41a60da1dcfb40bb2
|
f3af52fa53f18793546847074066e559158c89ec
|
refs/heads/master
| 2020-03-20T18:18:33.571922
| 2018-06-16T15:41:50
| 2018-06-16T15:41:50
| 137,582,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 121
|
py
|
from django.conf.urls import url
from .import views
urlpatterns = [
url(r'^$', views.post_list, name='post_list'),
]
|
[
"lauralunn@hotmail.co.uk"
] |
lauralunn@hotmail.co.uk
|
572253b9a187470080137315d72e1e91c8ee594b
|
a30cb7c68459b10d21cb91d00745a3e05ebb6c41
|
/pygame/08_sprites.py
|
53d8da65846927cf1a84bf9898368071bd15e381
|
[] |
no_license
|
LyceeClosMaire/FormationBiblio
|
2f5172214da2be3868003df8e197a02098d69317
|
8cd14f6add80f00bd34e3c1de934ef2400a9d152
|
refs/heads/master
| 2021-01-19T09:49:35.217001
| 2017-05-24T13:37:28
| 2017-05-24T13:37:28
| 87,790,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,270
|
py
|
import pygame
from pygame.locals import *
# les définitions de classes sont introduites par le mot clé class
# puis le nom de la classe, puis entre parenthèses la classe dont elle hérite
class Ball(pygame.sprite.Sprite):
# la méthode __init__ (avec des doubles tirets bas des deux côtés) est très
# importante, c'est celle qui est appelée lorsqu'on construit un objet de
# la classe.
# notez le premier paramètre, conventionnellement appelé "self" ("soi-même")
# il désigne l'objet qu'on est en train de construire
def __init__(self, position, speed):
# lorsque l'on hérite d'une classe, il est préférable de commencer le constructeur
# par un appel au constructeur de la classe parente que l'on retrouve avec super()
super().__init__()
# les attributs de l'objet (les données qui lui sont attachées) doivent
# être initialisée, généralement dans __init__()
self.speed = speed
# rect est un attribut standard des Sprites, vous devez toujours l'initialiser
# pour que vos sprites marchent correctement avec les fonctions de collision
self.rect = pygame.rect.Rect( (0,0), (50,50) )
self.rect.center = position
# de même image est un attribut standard, utilisé par draw() sur un Group
self.image = pygame.Surface( (50,50), SRCALPHA )
self.image.fill( (0,0,0,0) )
pygame.draw.circle(self.image, (255,0,0), (25,25), 25)
# la méthode update() est celle qui sera appelée à chaque tour de boucle pour
# mettre à jour l'état interne de votre Sprite qui est toujours le premier paramètre
# de toute méthode (fonction attachée à un objet)
def update(self):
self.rect.move_ip(self.speed)
# la méthode draw() sera appelée pour dessiner le sprite sur une surface
def draw(self, surface):
# voici ce que fait Group par défaut :
surface.blit( self.image, self.rect )
# les définitions de classes ou fonctions se font généralement dans un module
# à part mais ici pour l'exemple tout est réuni dans un même programme
pygame.init()
clock = pygame.time.Clock()
window = pygame.display.set_mode( (800,600) )
winrect = window.get_rect()
# il est possible de créer autant d'objets de la classe Ball que l'on souhaite
ball1 = Ball( (10,10), (3,3) )
ball2 = Ball( (500,500), (-1,-4) )
running = True
while running:
clock.tick(60)
for event in pygame.event.get():
if event.type == QUIT or(event.type == KEYUP and event.key == K_ESCAPE):
running = False
# les Sprites doivent être mis à jour manuellement, ou par l'intermédiaire d'un
# Group comme nous le verrons
# Notez que le paramètre "self" n'est pas passé entre parenthèse mais automatiquement
# à cause de la syntaxe objet.méthode()
ball1.update()
ball2.update()
window.fill( (0,0,0) )
# le détail du dessin peut être ignoré mais il nous faut encore dessiner les balles
# une par une, Group palliera à ceci. Nous dessinons directement sur window.
ball1.draw(window)
ball2.draw(window)
pygame.display.flip()
pygame.quit()
|
[
"chaddai.fouche@gmail.com"
] |
chaddai.fouche@gmail.com
|
710f90e901aebc0be4d31eed525c04c01665c3e0
|
3ad6d731c994813a10801829c45f56c58ff9021d
|
/src/teleop_bot/src/keys_to_twist_with_ramps.py
|
f8bf0f492753a4cd8e20f5fa2477366b8f82f090
|
[] |
no_license
|
bladesaber/ROS_tutorial
|
9b4ae5a9a1bd773ae48d836a87d08bde8a757a5d
|
63486048786ebc864bc731eb1b524a72e9267738
|
refs/heads/master
| 2022-11-16T07:36:15.938433
| 2020-07-07T02:47:50
| 2020-07-07T02:47:50
| 277,693,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,584
|
py
|
#!/usr/bin/env python
import rospy
import math
from std_msgs.msg import String
from geometry_msgs.msg import Twist
key_mapping = { 'w': [ 0, 1], 'x': [ 0, -1],
'a': [ 1, 0], 'd': [-1, 0],
's': [ 0, 0] }
g_twist_pub = None
g_target_twist = None
g_last_twist = None
g_last_send_time = None
g_vel_scales = [0.1, 0.1] # default to very slow
g_vel_ramps = [1, 1] # units: meters per second^2
def ramped_vel(v_prev, v_target, t_prev, t_now, ramp_rate):
# compute maximum velocity step
step = ramp_rate * (t_now - t_prev).to_sec()
sign = 1.0 if (v_target > v_prev) else -1.0
error = math.fabs(v_target - v_prev)
if error < step: # we can get there within this timestep. we're done.
return v_target
else:
return v_prev + sign * step # take a step towards the target
def ramped_twist(prev, target, t_prev, t_now, ramps):
tw = Twist()
tw.angular.z = ramped_vel(prev.angular.z, target.angular.z, t_prev,
t_now, ramps[0])
tw.linear.x = ramped_vel(prev.linear.x, target.linear.x, t_prev,
t_now, ramps[1])
return tw
def send_twist():
global g_last_twist_send_time, g_target_twist, g_last_twist,\
g_vel_scales, g_vel_ramps, g_twist_pub
t_now = rospy.Time.now()
g_last_twist = ramped_twist(g_last_twist, g_target_twist,
g_last_twist_send_time, t_now, g_vel_ramps)
g_last_twist_send_time = t_now
g_twist_pub.publish(g_last_twist)
def keys_cb(msg):
global g_target_twist, g_last_twist, g_vel_scales
if len(msg.data) == 0 or not key_mapping.has_key(msg.data[0]):
return # unknown key.
vels = key_mapping[msg.data[0]]
g_target_twist.angular.z = vels[0] * g_vel_scales[0]
g_target_twist.linear.x = vels[1] * g_vel_scales[1]
def fetch_param(name, default):
if rospy.has_param(name):
return rospy.get_param(name)
else:
print "parameter [%s] not defined. Defaulting to %.3f" % (name, default)
return default
if __name__ == '__main__':
rospy.init_node('keys_to_twist')
g_last_twist_send_time = rospy.Time.now()
g_twist_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
rospy.Subscriber('keys', String, keys_cb)
g_target_twist = Twist() # initializes to zero
g_last_twist = Twist()
g_vel_scales[0] = fetch_param('~angular_scale', 0.1)
g_vel_scales[1] = fetch_param('~linear_scale', 0.1)
g_vel_ramps[0] = fetch_param('~angular_accel', 1.0)
g_vel_ramps[1] = fetch_param('~linear_accel', 1.0)
rate = rospy.Rate(20)
while not rospy.is_shutdown():
send_twist()
rate.sleep()
|
[
"2510294705@qq.com"
] |
2510294705@qq.com
|
d6ed4c471e16de0dbe3a1f1b89a719fd27fbf993
|
f5b0d6cca8caadb34da340592514154fd711bca5
|
/archive/Front-end/moments.py
|
fcaa4aec0fef16e6b3f1f36af92b05ec1bb49b31
|
[] |
no_license
|
DmitryBol/reel
|
3e66eed5b18c3e26b147c0e8baa8e9043b3b26ea
|
e08de8ed4be222c2369d8b7579509fa5ccd3df07
|
refs/heads/master
| 2021-06-07T10:32:50.473918
| 2020-02-04T17:08:26
| 2020-02-04T17:08:26
| 130,555,036
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,649
|
py
|
# xi - random variable, equals payment for combination
# eta - random variable, equals the number of freespins given for combination
# zeta - random variable, equals payment for freespin
def Exi2(self, width, lines):
s = 0
for str_with_count in self.simple_num_comb:
string = str_with_count[0]
payment = self.get_simple_payment(string)
s += str_with_count[1] / self.all_combinations() * (payment ** 2)
for scatter_comb in self.scatter_num_comb:
scat = scatter_comb[0]
counts = scatter_comb[1]
for cnt in range(width + 1):
s += ((self.symbol[scat].payment[cnt] * len(lines)) ** 2) * counts[cnt] / self.all_combinations()
return s
def Exieta(self, width, lines):
s = 0
for scatter_comb in self.scatter_num_comb:
scat = scatter_comb[0]
counts = scatter_comb[1]
for cnt in range(width + 1):
s += (self.symbol[scat].payment[cnt] * len(lines)) * self.symbol[scat].scatter[cnt] * \
counts[cnt] / self.all_combinations()
return s
def Eeta(self, width):
s = 0
for scatter_comb in self.scatter_num_comb:
scat = scatter_comb[0]
counts = scatter_comb[1]
for cnt in range(width + 1):
s += self.symbol[scat].scatter[cnt] * counts[cnt] / self.all_combinations()
return s
def Eeta2(self, width):
s = 0
for scatter_comb in self.scatter_num_comb:
scat = scatter_comb[0]
counts = scatter_comb[1]
for cnt in range(width + 1):
s += (self.symbol[scat].scatter[cnt] ** 2) * counts[cnt] / self.all_combinations()
return s
|
[
"sadontsfath@gmail.com"
] |
sadontsfath@gmail.com
|
f130de7bee24e00d0360f72e957813c45c2440e3
|
29e81d5aca62adf8a7c0f52025eb2904287caed4
|
/P5/tree_perfect_matching/tree_perfect_matching.py
|
8e1f0aa9e2dd71a43fe2020eb4ac41b8bc1e5d88
|
[] |
no_license
|
jasonluocesc/CS_E3190
|
759a5bfa7e51d1fd0458b9ceea28848c48ddc0ef
|
a962479e4ce4670705ddc24dd69ce30f9df218d3
|
refs/heads/master
| 2018-09-20T10:53:43.259416
| 2018-06-06T13:08:11
| 2018-06-06T13:08:11
| 109,515,163
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
# coding: utf-8
from networkx import Graph
from solution import tree_perfect_matching
import sys
def read_graph_in_dimacs_format(filename):
with open(filename) as f:
line = f.readline()
while line.startswith('c '):
line = f.readline()
tokens = line.split()
num_nodes = int(tokens[2])
num_edges = int(tokens[3])
G = Graph()
G.add_nodes_from(range(1, num_nodes+1))
for i in range(num_edges):
tokens = f.readline().split()
n1 = int(tokens[1])
n2 = int(tokens[2])
G.add_edge(n1, n2)
return G
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: python tree_perfect_matching.py <file>")
sys.exit(1)
# Read graph from given file
g = read_graph_in_dimacs_format(sys.argv[1])
# Get matching
matching = tree_perfect_matching(g)
# Output the result
if matching is None:
print("No perfect matching.")
else:
print(" ".join("({},{})".format(edge[0], edge[1]) for edge in matching))
|
[
"luowenbinlove???"
] |
luowenbinlove???
|
644e8b72aa37b4a680a2ddf1ca694dfd8cf0e58b
|
dd7ecc8ee4605f97947949cb486c8741c886f425
|
/app/app/TextSender.py
|
d943389863cd9ca0b303fe363aab07d40d0fd940
|
[] |
no_license
|
rmartinsen/where_ru_rt
|
5df7c0a08df0797f984594981d95fdf79a173172
|
3188a53737c4ae78579c16a820168fc2ad0b830f
|
refs/heads/master
| 2021-01-19T00:57:24.920347
| 2017-02-18T18:37:22
| 2017-02-18T18:37:22
| 64,557,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
from twilio.rest import TwilioRestClient
from twilio import TwilioRestException
import logging
from SETTINGS import settings
class Texter():
def send_text(self, phone_number, body):
self.ACCOUNT_SID = settings['ACCOUNT_SID']
self.AUTH_TOKEN = settings['AUTH_TOKEN']
self.client = TwilioRestClient(self.ACCOUNT_SID, self.AUTH_TOKEN)
try:
self.client.messages.create(
to = phone_number,
from_ = '9167108744',
body = body,
)
logging.info("Message sent to phone number: " + phone_number)
except TwilioRestException as e:
logging.error("Message could not be sent to phone number " + phone_number)
logging.error(e)
raise
|
[
"rpmartinsen@ucdavis.edu"
] |
rpmartinsen@ucdavis.edu
|
ec83305e998ad683a062bfd80f473ac8191c45eb
|
4c3803e4cad48db586dfdef2e26ffa2a3f2a06fd
|
/Chapter_1/Exercises/R-1.1.py
|
244c70863278bd3cd7edc7c50d4cb938a3b0b347
|
[] |
no_license
|
dabiri1377/DS-AL_in_python3
|
07cee6569c39c79d56102b5b31f8e9329e58df4e
|
3684a8f28df0c67bd504748fcdd4a12fc714bb92
|
refs/heads/master
| 2021-09-09T15:55:58.912069
| 2018-03-17T16:00:16
| 2018-03-17T16:00:16
| 115,943,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
def is_multiply(n, m):
n = int(n)
m = int(m)
if n // m == n / m:
return True
else:
return False
# __main__
a, b = input("enter n and m:").split()
print(is_multiply(a, b))
|
[
"dabiri1377@gmail.com"
] |
dabiri1377@gmail.com
|
2affed72b0c39fbbb031f7f7b77cc32a1cb1b4f5
|
1a65481701a7ec2ba17e051cf50d131ded1516e1
|
/unit 21~30/unit27/파이썬 객체 파일에 저장.py
|
e05c4f5de67abe6216e322f503f1a8f27d9cfd03
|
[] |
no_license
|
kimdohui/Python_Study
|
bec9b24bab79048d3b7169609898c213912f1d99
|
92dcec38cfa1043e004a72fdb2be2a3f56311444
|
refs/heads/master
| 2021-05-17T13:58:59.729705
| 2020-04-22T10:58:12
| 2020-04-22T10:58:12
| 250,809,291
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
import pickle
name = 'hana'
age = 18
address = '서울시'
scores = {'korean': 90, 'english': 95}
with open('hanaInfo.p', 'wb') as file: # pickle.dump로 객체 저장 시 파일모드를 wb로 해야 함
pickle.dump(name,file)
pickle.dump(age,file)
pickle.dump(address,file)
pickle.dump(scores,file)
print('----------------\n')
with open('hanaInfo.p', 'rb') as file: # james.p 파일을 바이너리 읽기 모드(rb)로 열기
name = pickle.load(file)
age = pickle.load(file)
address = pickle.load(file)
scores = pickle.load(file)
print(name)
print(age)
print(address)
print(scores)
|
[
"kdh24162416@gmail.com"
] |
kdh24162416@gmail.com
|
d2fad2f4d6e5d35ef96cca26932b4f326533d021
|
6769f4c0ac48e52b75a597ce5d9612a243558ef1
|
/Controller/Click.py
|
5d8d081d5efdd22cac5ffae58ff185dea843f58a
|
[] |
no_license
|
sehwaa/Naver_Movie_Crawler
|
ecfe073c25c1f0dac6478af3bf07ad1f5520c137
|
4b92b27a0e6edafcd932316ddedbced19f8abd99
|
refs/heads/master
| 2020-03-15T01:52:20.650219
| 2018-05-17T07:44:15
| 2018-05-17T07:44:15
| 131,903,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,062
|
py
|
'''
Created on 2018. 5. 3.
@author: SEHWA
'''
#coding: utf-8
from Connection.Connection import driver
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.support.wait import WebDriverWait
#검색 버튼 클릭
def submitBtn():
driver.find_element_by_class_name("btn_srch").click()
#자동 완성 리스트 클릭
def autoCompletementList(movieName):
driver.implicitly_wait(5)
element = driver.find_element_by_xpath("//li[@data-title='"+movieName+"']")
element.click()
#'주요 정보'탭 클릭
def mainInformationTab():
driver.find_element_by_xpath("//a[@title='주요정보']").click()
#'배우/제작진'탭 클릭
def actorTab():
driver.find_element_by_xpath("//a[@title='배우/제작진']").click()
#'평점'탭 클릭
def scoreTab():
driver.find_element_by_xpath("//a[@title='평점']").click()
#'평점'탭 클릭후 - '개봉 전 평점' 메뉴 클릭
def beforeOpening():
driver.find_element_by_id("beforePointTab").click()
#'평점'탭 클릭후 - '개봉 후 평점' 메뉴 클릭
def afterOpening():
driver.find_element_by_id("afterPointTab").click()
#'개봉 후 평점'메뉴 클릭 후 '남녀별/연령별' 메뉴 클릭
def netizenGenderAndAge():
driver.find_element_by_xpath("//a[@id='netizen_group']").click()
#'개봉 후 평점'메뉴 클릭 후 '관람객 평점' 탭 클릭
def audienceScore():
driver.find_element_by_xpath("//div[@class='title_area grade_tit']").click()
#'관람객 평점' 탭 클릭 후 '남녀별/연령별' 메뉴 클릭
def audienceGenderAndAge():
driver.find_element_by_xpath("//a[@id='actual_group']").click()
#성인 인증시 로그인
def adultLogin():
_id = driver.find_element_by_id("id")
_id.send_keys("####") #개인정보 문제로 블락
_pwd = driver.find_element_by_id("pw")
_pwd.send_keys("####") #개인정보 문제로 블락
driver.find_element_by_xpath("//input[@type='submit']").click()
|
[
"nsh235482@gmail.com"
] |
nsh235482@gmail.com
|
38bae379c04d24789026484a687ef0293b07e1f4
|
d346c1e694e376c303f1b55808d90429a1ad3c3a
|
/medium/61.rotate_list.py
|
86f5af201842b8ba886e5132edcc3439263c61a5
|
[] |
no_license
|
littleliona/leetcode
|
3d06bc27c0ef59b863a2119cd5222dc94ed57b56
|
789d8d5c9cfd90b872be4a4c35a34a766d95f282
|
refs/heads/master
| 2021-01-19T11:52:11.938391
| 2018-02-19T03:01:47
| 2018-02-19T03:01:47
| 88,000,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,234
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
#
current = head
storeList = []
while current != None:
storeList.append(current)
current = current.next
if len(storeList) <= 1:
return head
k = k % len(storeList)
if k == 0:
return head
res = storeList[-k]
storeList[-k - 1].next = None
storeList[-1].next = head
return res
#mine
if not head or not head.next or k == 0:
return head
length_list = 1
current = head
while current.next:
current = current.next
length_list += 1
current.next = head
current = head
for i in range(1,length_list - (k % length_list)):
current = current.next
head = current.next
current.next = None
return head
s = Solution()
a = s.threeSum([-1,0,1,2,-1,-4])
print(a)
|
[
"aria@Arias-MacBook-Pro.local"
] |
aria@Arias-MacBook-Pro.local
|
73fe66859a65e73496b91d800a11f82a54258308
|
a85419f08198548eb6ba4d3df0d181769f810358
|
/C_Carray/split_for_singlechannel_tests.py
|
4887feeda442f244c49cc385774a1b017c5a6ddf
|
[] |
no_license
|
keflavich/w51evlareductionscripts
|
cd0287d750d938bab96f1a7d335b3b84c27a987f
|
00cb8085e8fe5c047f53852c8057a1f7457863f6
|
refs/heads/master
| 2021-01-17T07:26:01.574220
| 2016-07-07T09:02:26
| 2016-07-07T09:02:26
| 8,590,805
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
# June 29, 2015
# Instead, use h2co_cvel_split in ../C_AC
# outputvis_A = 'h2co11_Cband_Aarray_nocal_20to100kms.ms'
# split(vis=outputvis_A, outputvis='h2co11_Cband_Aarray_nocal_20kms_onechan.ms',
# spw='0:0', width=1)
# split(vis=outputvis_A, outputvis='h2co11_Cband_Aarray_nocal_57kms_onechan.ms',
# spw='0:74', width=1)
# outputvis_C = 'h2co11_Cband_Carray_nocal_20to100kms.ms'
# split(vis=outputvis_C, outputvis='h2co11_Cband_Carray_nocal_20kms_onechan.ms',
# spw='0:0', width=1, datacolumn='data')
# split(vis=outputvis_C, outputvis='h2co11_Cband_Carray_nocal_57kms_onechan.ms',
# spw='0:74', width=1, datacolumn='data')
|
[
"keflavich@gmail.com"
] |
keflavich@gmail.com
|
40f4ee37875a2c832d94dba99ab3b56ade7ccd8e
|
03673029f34a235103f2fa428e15ce50cf677a15
|
/python/752打开转盘锁.py
|
c83ee9d136a60c9299a16fafc011f75e53a7b585
|
[] |
no_license
|
sysuwsh/My-Leetcode-Solution
|
6aa1d0c02c844c8fa50480b8ada206510668f4c4
|
ad1efa0faa6c5bbde7e185759ad5446599dddf52
|
refs/heads/master
| 2022-12-08T02:09:06.125150
| 2020-08-17T17:16:47
| 2020-08-17T17:16:47
| 270,934,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,744
|
py
|
from typing import List
from collections import deque
# 采用双向BFS的方式,相对单向BFS来说要快很多
class Solution:
def openLock(self, deadends: List[str], target: str) -> int:
def neighbor(s: str) -> str:
for i in range(4):
for j in (-1, 1):
yield s[:i] + str((int(s[i]) + j) % 10) + s[i + 1:]
dead = set(deadends)
queue1, queue2, visited = set(), set(), set()
queue1.add("0000")
queue2.add(target)
step = 0
while queue1 and queue2:
tmp = set()
for cur in queue1:
if cur in dead:
continue
if cur in queue2:
return step
visited.add(cur)
for nei in neighbor(cur):
if nei not in visited:
tmp.add(nei)
step += 1
queue1 = queue2
queue2 = tmp
return -1
# 注意这里做的优化:将deadends转为set提高查找效率
# 将visited采用set存储
# 这里注意python中的一个语法,对于集合set来说,{}可以生成集合,和字典不同的是,没有value值
# 以及,当要生成一个空的集合时,采用set()生成,{}默认生成的是空字典
class Solution1:
def openLock(self, deadends: List[str], target: str) -> int:
dead = set(deadends)
visited = {"0000"}
queue = deque()
queue.append("0000")
step = 0
while queue:
size = len(queue)
for i in range(size):
cur = queue.popleft()
if cur in dead:
continue
if cur == target:
return step
for j in range(4):
up = self.plusOne(cur, j)
if up not in visited:
queue.append(up)
visited.add(up)
down = self.minusOne(cur, j)
if down not in visited:
queue.append(down)
visited.add(down)
step += 1
return -1
def plusOne(self, s: str, index: int) -> str:
l = list(s)
if l[index] == '9':
l[index] = '0'
else:
l[index] = str(int(l[index]) + 1)
return ''.join(l)
def minusOne(self, s: str, index: int) -> str:
l = list(s)
if l[index] == '0':
l[index] = '9'
else:
l[index] = str(int(l[index]) - 1)
return ''.join(l)
s = Solution()
deadends = ["0201", "0101", "0102", "1212", "2002"]
target = "0202"
print(s.openLock(deadends, target))
|
[
"sysuwsh@gmail.com"
] |
sysuwsh@gmail.com
|
6950c60efeafde32b627e0da341719ed7b00ff6d
|
ee5882611747127cb8c2160d01cf9be88c1dcd3b
|
/send_text.py
|
b42d9ce1988418d941f86194c4ff01a777e21fc6
|
[] |
no_license
|
DespoinaSakoglou/Mini-Apps
|
e958ceb2a532161917b3b2019d655b6ec1251f81
|
b50c243587fedaa83a210d0054f4bb0140bef6a1
|
refs/heads/master
| 2021-04-12T05:51:44.558031
| 2018-05-02T13:35:02
| 2018-05-02T13:35:02
| 125,926,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
"""
A program that sends a text message using twilio
Created on Mon Mar 19 16:38:13 2018
"""
from twilio.rest import Client
# Your Account SID from twilio.com/console
account_sid = "*********************************"
# Your Auth Token from twilio.com/console
auth_token = "**********************************"
client = Client(account_sid, auth_token)
message = client.messages.create(
to="+18*********",
from_="+18*********",
body="Hello from Python!")
print(message.sid)
|
[
"noreply@github.com"
] |
DespoinaSakoglou.noreply@github.com
|
6bfd23840261bb11b4ad1cbe4485024aaf39ee33
|
caa34a2291ecadb8daa5ff147565d284f41b26ca
|
/spring1718_assignment2_v2/learn_keras.py
|
05d03821429cd8e8da1b9f85292bd464ab87476c
|
[] |
no_license
|
shuyanzhu/leran_cs231n
|
c287446efa1e2c9724c3316d82ea03297dd26089
|
210c1f7ec9622d5bbcf9f12b5bab06fc8ffeea98
|
refs/heads/master
| 2020-04-16T18:26:24.052223
| 2019-02-26T06:16:12
| 2019-02-26T06:16:12
| 164,188,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,678
|
py
|
import numpy as np
import tensorflow as tf
# 加载数据
def load_cifar10():
cifar10 = tf.keras.datasets.cifar10.load_data()
(X_train, y_train), (X_test, y_test) = cifar10
X_train = X_train.astype(np.float32)
y_train = y_train.astype(np.int32).flatten()
X_test = X_test.astype(np.float32)
y_test = y_test.astype(np.int32).flatten()
val_size = 1000
X_val = X_train[:val_size]
y_val = y_train[:val_size]
X_train = X_train[val_size:]
y_train = y_train[val_size:]
mean_pixel = X_train.mean(axis=(0, 1, 2), keepdims=True)
std_pixel = X_train.std(axis=(0, 1, 2), keepdims=True)
X_train = (X_train - mean_pixel) / std_pixel
X_val = (X_val - mean_pixel) / std_pixel
X_test = (X_test - mean_pixel) / std_pixel
print('Trian data size', X_train.shape)
print('Train labels size', y_train.shape)
print('Validation data size', X_val.shape)
print('Validation labels size', y_val.shape)
print('Test data size', X_test.shape)
print('Test labels size', y_test.shape)
return X_train, y_train, X_val, y_val, X_test, y_test
# 建立模型
def inference(inputs):
x = tf.layers.BatchNormalization()(inputs)
x = tf.layers.Conv2D(16, [5, 5], activation='relu', padding='same')(x)
x = tf.layers.BatchNormalization()(x)
x = tf.layers.Conv2D(32, [3, 3], activation='relu', padding='same')(x)
x = tf.layers.Flatten()(x)
x = tf.layers.Dropout()(x)
x = tf.layers.BatchNormalization()(x)
x = tf.layers.Dense(120, activation='relu')(x)
x = tf.layers.Dropout()(x)
x = tf.layers.BatchNormalization()(x)
outputs = tf.layers.Dense(10, activation='softmax')(x)
return outputs
if __name__ == '__main__':
# 导入数据
X_train, y_train, X_val, y_val, X_test, y_test = load_cifar10()
train_dset = tf.data.Dataset.from_tensor_slices((X_train, y_train))
train_dset = train_dset.batch(64).repeat()
validation_dset = tf.data.Dataset.from_tensor_slices((X_val, y_val))
validation_dset = validation_dset.batch(64).repeat()
# 构建模型
inputs = tf.keras.Input(shape=(32, 32, 3))
outputs=inference(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=tf.train.AdamOptimizer(5e-4),
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
checkpoint = tf.keras.callbacks.ModelCheckpoint('./model-{epoch:02d}.hdf5', monitor='val_acc', save_best_only=True, verbose=1)
history = model.fit(train_dset, epochs=5, steps_per_epoch=49000//64, validation_data = validation_dset, validation_steps=1000//64, callbacks=[checkpoint])
print(history.history)
|
[
"1294206499@qq.com"
] |
1294206499@qq.com
|
15701489ab41edd41261b2b31779b163a468529e
|
44a2741832c8ca67c8e42c17a82dbe23a283428d
|
/cmssw/HeavyIonsAnalysis/JetAnalysis/python/jets/akVs3CaloJetSequence_pPb_mix_cff.py
|
3d77c27baa5beb48450caf86750981f27c601170
|
[] |
no_license
|
yenjie/HIGenerator
|
9ff00b3f98b245f375fbd1b565560fba50749344
|
28622c10395af795b2b5b1fecf42e9f6d4e26f2a
|
refs/heads/master
| 2021-01-19T01:59:57.508354
| 2016-06-01T08:06:07
| 2016-06-01T08:06:07
| 22,097,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,519
|
py
|
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.patHeavyIonSequences_cff import *
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
akVs3Calomatch = patJetGenJetMatch.clone(
src = cms.InputTag("akVs3CaloJets"),
matched = cms.InputTag("ak3HiGenJetsCleaned")
)
akVs3Caloparton = patJetPartonMatch.clone(src = cms.InputTag("akVs3CaloJets"),
matched = cms.InputTag("hiGenParticles")
)
akVs3Calocorr = patJetCorrFactors.clone(
useNPV = False,
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akVs3CaloJets"),
payload = "AKVs3Calo_HI"
)
akVs3CalopatJets = patJets.clone(jetSource = cms.InputTag("akVs3CaloJets"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akVs3Calocorr")),
genJetMatch = cms.InputTag("akVs3Calomatch"),
genPartonMatch = cms.InputTag("akVs3Caloparton"),
jetIDMap = cms.InputTag("akVs3CaloJetID"),
addBTagInfo = False,
addTagInfos = False,
addDiscriminators = False,
addAssociatedTracks = False,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = False,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
embedCaloTowers = False,
embedPFCandidates = False
)
akVs3CaloJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akVs3CalopatJets"),
genjetTag = 'ak3HiGenJetsCleaned',
rParam = 0.3,
matchJets = cms.untracked.bool(True),
matchTag = 'akPu3CalopatJets',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
genParticles = cms.untracked.InputTag("hiGenParticles"),
eventInfoTag = cms.InputTag("hiSignal")
)
akVs3CaloJetSequence_mc = cms.Sequence(
akVs3Calomatch
*
akVs3Caloparton
*
akVs3Calocorr
*
akVs3CalopatJets
*
akVs3CaloJetAnalyzer
)
akVs3CaloJetSequence_data = cms.Sequence(akVs3Calocorr
*
akVs3CalopatJets
*
akVs3CaloJetAnalyzer
)
akVs3CaloJetSequence_jec = akVs3CaloJetSequence_mc
akVs3CaloJetSequence_mix = akVs3CaloJetSequence_mc
akVs3CaloJetSequence = cms.Sequence(akVs3CaloJetSequence_mix)
|
[
"dgulhan@cern.ch"
] |
dgulhan@cern.ch
|
3b7a3ca5057af51ac8447f4b0b547d67f142be7c
|
305c4d943e1e8c6748ebcd965ec227aee0da751e
|
/examples/fastapi_e2e/tests/conftest.py
|
1d23983d0ba7aeb609bf209f526e93da9adef1e6
|
[
"MIT"
] |
permissive
|
williaminfante/pact-python
|
9f2acbed4967084e6959c107492331bd37c0e0e4
|
d2dda3f6973ae2e099284a4894c8e2a225f9e05b
|
refs/heads/master
| 2023-02-22T22:25:05.595040
| 2020-12-29T20:08:30
| 2020-12-29T20:08:30
| 328,525,719
| 0
| 1
|
MIT
| 2021-01-27T00:07:01
| 2021-01-11T02:18:33
| null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
def pytest_addoption(parser):
parser.addoption(
"--publish-pact", type=str, action="store",
help="Upload generated pact file to pact broker with version"
)
parser.addoption(
"--provider-url", type=str, action="store",
help="The url to our provider."
)
|
[
"elliottmurray@gmail.com"
] |
elliottmurray@gmail.com
|
a69149ec8b9dd6535e90898b9807d97f229a412f
|
3b46e9f7e2fef169589f336f09f00a9294cb3b04
|
/test/core/test_drop.py
|
72e7efc40fd48127ce67ee9e678a8d3cf8b19500
|
[] |
no_license
|
Vayana/sumpter
|
79128ac2bcb4fe6cd05435e9e80e4ad6cb8ba28b
|
f18d7c5c3f8d32a2ae8abf58a70a31614bdc2e39
|
refs/heads/master
| 2020-12-25T18:16:33.833216
| 2016-09-15T10:44:18
| 2016-09-15T10:44:18
| 467,042
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,576
|
py
|
#############################################################################
# Copyright 2010 Dhananjay Nene
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
__author__ = '<a href="nkhalasi@vayana.in">Naresh Khalasi</a>'
from sumpter import *
import unittest
class TestDrop(unittest.TestCase):
def setUp(self):
self.ctx = {1:'hello','world':'foo'}
self.val = ['foo',('bar','baz'),{'boom':555}]
def tearDown(self):
pass
def get_new_drop(self):
return Drop(self.ctx,self.val)
def testConstruction(self):
drop = self.get_new_drop()
self.assert_(isinstance(drop.id,(int,long)))
self.assertEqual(drop.trace,[])
self.assertEqual(drop.parents,[])
self.assertEqual(drop.children,[])
self.assertEqual(drop.alive,True)
self.assertEqual(drop.ctx, self.ctx)
self.assertEqual(drop.val, self.val)
def testEmptyConstruction(self):
drop = Drop(None,None)
self.assertEqual(drop.ctx,{})
self.assertEqual(drop.val,None)
def testDifferentIds(self):
drop1 = Drop(None,None)
drop2 = Drop(None,None)
self.assertNotEqual(drop1.id, drop2.id)
def testChildCreation(self):
drop_parent = self.get_new_drop()
drop_child = drop_parent.create_child('hello')
self.assertEqual(drop_child.ctx,self.ctx)
self.assertEqual(drop_child.val,'hello')
self.assertEqual(drop_parent.children,[drop_child])
self.assertEqual(drop_child.parents,[drop_parent])
def testTrace(self):
drop = self.get_new_drop()
drop.record('One')
drop.record('Two')
self.assertEqual(drop.trace,['One','Two'])
self.assertEqual(drop.get_trace(),'One=>Two')
def testKillDrop(self):
drop = self.get_new_drop()
drop.kill()
self.assertEqual(drop.alive,False)
def testRecordAKilledDrop(self):
drop = self.get_new_drop()
drop.kill()
try :
drop.record('foo')
self.fail('Should have raised a runtime error on drop.record() for a killed drop')
except Exception as e :
self.assertEquals(e,PypeRuntimeError('inactive-drop-operation-record',drop,'foo'))
def testCreateChildOnKilledDrop(self):
drop = self.get_new_drop()
drop.kill()
try :
drop.create_child('hello')
self.fail('Should have raised a runtime error on drop.create_child() for a killed drop')
except Exception as e :
self.assertEquals(e,PypeRuntimeError('inactive-drop-operation-create-child',drop))
def testKillAKilledDrop(self):
drop = self.get_new_drop()
drop.kill()
try :
drop.kill()
self.fail('Should have raised a runtime error on drop.kill() for a killed drop')
except Exception as e :
self.assertEquals(e,PypeRuntimeError('inactive-drop-operation-kill',drop))
|
[
"nkhalasi@vayana.in"
] |
nkhalasi@vayana.in
|
2e5daa13e1b08a262d40a179079d7d11029e9af2
|
5a0d6fff86846117420a776e19ca79649d1748e1
|
/rllib_exercises/serving/do_rollouts.py
|
d2dff98d01aa7e23c66a2e98eb958ee472389934
|
[] |
no_license
|
ray-project/tutorial
|
d823bafa579fca7eeb3050b0a13c01a542b6994e
|
08f4f01fc3e918c997c971f7b2421551f054c851
|
refs/heads/master
| 2023-08-29T08:46:38.473513
| 2022-03-21T20:43:22
| 2022-03-21T20:43:22
| 89,322,668
| 838
| 247
| null | 2022-03-21T20:43:22
| 2017-04-25T05:55:26
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,596
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import argparse
import gym
from ray.rllib.utils.policy_client import PolicyClient
parser = argparse.ArgumentParser()
parser.add_argument(
"--no-train", action="store_true", help="Whether to disable training.")
parser.add_argument(
"--off-policy",
action="store_true",
help="Whether to take random instead of on-policy actions.")
if __name__ == "__main__":
args = parser.parse_args()
import pong_py
env = pong_py.PongJSEnv()
client = PolicyClient("http://localhost:8900")
eid = client.start_episode(training_enabled=not args.no_train)
obs = env.reset()
rewards = 0
episode = []
f = open("out.txt", "w")
while True:
if args.off_policy:
action = env.action_space.sample()
client.log_action(eid, obs, action)
else:
action = client.get_action(eid, obs)
next_obs, reward, done, info = env.step(action)
episode.append({
"obs": obs.tolist(),
"action": float(action),
"reward": reward,
})
obs = next_obs
rewards += reward
client.log_returns(eid, reward, info=info)
if done:
print("Total reward:", rewards)
f.write(json.dumps(episode))
f.write("\n")
f.flush()
rewards = 0
client.end_episode(eid, obs)
obs = env.reset()
eid = client.start_episode(training_enabled=not args.no_train)
|
[
"noreply@github.com"
] |
ray-project.noreply@github.com
|
7fff68255bdd4958ff339290d7b266500468f8b9
|
8b6b2eacbfeb97c5ea8b487ea784f296beae1b18
|
/td-auth-master/tutordudes/payment/apps.py
|
a8db0e02d53a92e4ab6b078f7d3f1104a5047861
|
[] |
no_license
|
FaizanM2000/Project-Demo-1
|
c1ff244d457a2d8243e607dafec74931da0702b7
|
e4aa4a0beed58ea7e3bf802beafeae6f6f9e8a6f
|
refs/heads/main
| 2023-08-05T07:12:14.477072
| 2021-09-18T08:06:43
| 2021-09-18T08:06:43
| 403,812,761
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
from django.apps import AppConfig
class PaymentConfig(AppConfig):
name = 'tutordudes.payment'
def ready(self):
try:
import tutordudes.payment.signals # noqa F401
except ImportError:
pass
|
[
"53056387+Sebastiangao0219@users.noreply.github.com"
] |
53056387+Sebastiangao0219@users.noreply.github.com
|
ab4791fb3dadc7e1bed926e757b5eb84a780446e
|
1ab01e3a556e4effd333e6bfb1970c8cd309da40
|
/minirank/sofia_ml.py
|
75e4e121b82750220ab6074c6d0bc696c52bb227
|
[] |
no_license
|
aurora1625/minirank
|
19009cf8011091dfb58db0d8595035826f59f3e1
|
c5b0ff33053867df7121dc94c2dc463100801421
|
refs/heads/master
| 2020-12-25T12:42:45.282193
| 2012-12-21T14:47:01
| 2012-12-21T14:47:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,767
|
py
|
import sys, tempfile
import numpy as np
from sklearn import datasets
import _sofia_ml
if sys.version_info[0] < 3:
bstring = basestring
else:
bstring = str
def sgd_train(X, y, b, alpha, n_features=None, model='rank', max_iter=100, step_probability=0.5):
"""
Minimizes an expression of the form
Loss(X, y, b) + 0.5 * alpha * (||w|| ** 2)
where Loss is an Hinge loss defined on pairs of images
Parameters
----------
X : input data
y : target labels
b : blocks (aka query_id)
alpha: float
model : {'rank', 'combined-ranking', 'roc'}
Returns
-------
coef
None
"""
if isinstance(X, bstring):
if n_features is None:
n_features = 2 ** 17 # the default in sofia-ml TODO: parse file to see
w = _sofia_ml.train(X, n_features, alpha, max_iter, False, model,
step_probability)
else:
with tempfile.NamedTemporaryFile() as f:
datasets.dump_svmlight_file(X, y, f.name, query_id=b)
w = _sofia_ml.train(f.name, X.shape[1], alpha, max_iter, False, model,
step_probability)
return w, None
def sgd_predict(data, coef, blocks=None):
# TODO: isn't query_id in data ???
s_coef = ''
for e in coef:
s_coef += '%.5f ' % e
s_coef = s_coef[:-1]
if isinstance(X, bstring):
return _sofia_ml.predict(data, s_coef, False)
else:
X = np.asarray(data)
if blocks is None:
blocks = np.ones(X.shape[0])
with tempfile.NamedTemporaryFile() as f:
y = np.ones(X.shape[0])
datasets.dump_svmlight_file(X, y, f.name, query_id=blocks)
prediction = _sofia_ml.predict(f.name, s_coef, False)
return prediction
|
[
"fabian@fseoane.net"
] |
fabian@fseoane.net
|
3610918d2b73d9d7fb9529196d9121b89800d8c4
|
03901933adfaa9130979b36f1e42fb67b1e9f850
|
/iotapy/storage/providers/rocksdb.py
|
a1b6d630c97ebda8f54229ab370820ab8f9b63f1
|
[
"MIT"
] |
permissive
|
aliciawyy/iota-python
|
03418a451b0153a1c55b3951d18d4cb533c7ff28
|
b8d421acf94ccd9e7374f799fbe496f6d23e3cf3
|
refs/heads/master
| 2020-03-19T04:15:54.594313
| 2018-06-04T18:26:52
| 2018-06-04T18:26:52
| 135,811,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,363
|
py
|
# -*- coding: utf-8 -*-
import struct
import iota
import rocksdb_iota
import iotapy.storage.providers.types
from rocksdb_iota.merge_operators import StringAppendOperator
from iotapy.storage import converter
KB = 1024
MB = KB * 1024
MERGED = ['tag', 'bundle', 'approvee', 'address', 'state_diff']
class RocksDBProvider:
BLOOM_FILTER_BITS_PER_KEY = 10
column_family_names = [
b'default',
b'transaction',
b'transaction-metadata',
b'milestone',
b'stateDiff',
b'address',
b'approvee',
b'bundle',
b'tag'
]
column_family_python_mapping = {
'transaction_metadata': 'transaction-metadata',
'state_diff': 'stateDiff'
}
def __init__(self, db_path, db_log_path, cache_size=4096, read_only=True):
self.db = None
self.db_path = db_path
self.db_log_path = db_log_path
self.cache_size = cache_size
self.read_only = read_only
self.available = False
def init(self):
self.init_db(self.db_path, self.db_log_path)
self.available = True
def init_db(self, db_path, db_log_path):
options = rocksdb_iota.Options(
create_if_missing=True,
db_log_dir=db_log_path,
max_log_file_size=MB,
max_manifest_file_size=MB,
max_open_files=10000,
max_background_compactions=1
)
options.allow_concurrent_memtable_write = True
# XXX: How to use this?
block_based_table_config = rocksdb_iota.BlockBasedTableFactory(
filter_policy=rocksdb_iota.BloomFilterPolicy(self.BLOOM_FILTER_BITS_PER_KEY),
block_size_deviation=10,
block_restart_interval=16,
block_cache=rocksdb_iota.LRUCache(self.cache_size * KB),
block_cache_compressed=rocksdb_iota.LRUCache(32 * KB, shard_bits=10))
options.table_factory = block_based_table_config
# XXX: How to use this?
column_family_options = rocksdb_iota.ColumnFamilyOptions(
merge_operator=StringAppendOperator(),
table_factory=block_based_table_config,
max_write_buffer_number=2,
write_buffer_size=2 * MB)
try:
self.db = rocksdb_iota.DB(
self.db_path, options, self.column_family_names,
read_only=self.read_only)
except rocksdb_iota.errors.InvalidArgument as e:
if 'Column family not found' in str(e):
# Currently, rocksdb_iota didn't support
# "create_if_column_family_missing" option, if we detect this
# is a new database, we will need to create its whole
# column family manually.
self.db = rocksdb_iota.DB(
self.db_path, options, [b'default'], read_only=self.read_only)
# Skip to create b'default'
for column_family in self.column_family_names[1:]:
self.db.create_column_family(column_family)
else:
raise e
def _convert_column_to_handler(self, column):
if not isinstance(column, str):
raise TypeError('Column type should be str')
db_column = self.column_family_python_mapping.get(column, column)
ch = self.db.column_family_handles.get(bytes(db_column, 'ascii'))
if ch is None:
raise KeyError('Invalid column family name: %s' % (column))
return ch
def _convert_key_column(self, key, column):
# Convert column to column family handler
ch = self._convert_column_to_handler(column)
# Expand iota.Tag to iota.Hash
if column == 'tag':
if not isinstance(key, iota.Tag):
raise TypeError('Tag key type should be iota.Tag')
key = iota.Hash(str(key))
# Convert key into trits-binary
if column == 'milestone':
if not isinstance(key, int):
raise TypeError('Milestone key type should be int')
key = struct.pack('>l', key)
else:
if not isinstance(key, iota.TryteString):
raise TypeError('Key type should be iota.TryteString')
if len(key) != iota.Hash.LEN:
raise ValueError('Key length must be 81 trytes')
key = converter.from_trits_to_binary(key.as_trits())
return key, ch
def _get(self, key, bytes_, column):
# Convert value (bytes_) into data object
obj = getattr(iotapy.storage.providers.types, column).get(bytes_, key)
# Handle metadata
if obj and key and column == 'transaction':
obj.set_metadata(self.get(key, 'transaction_metadata'))
return obj
def _get_key(self, bytes_, column):
return getattr(iotapy.storage.providers.types, column).get_key(bytes_)
def _save(self, value, column):
# Convert value to bytes
return getattr(iotapy.storage.providers.types, column).save(value)
def get(self, key, column):
k, ch = self._convert_key_column(key, column)
# Get binary data from database
bytes_ = self.db.get(k, ch)
return self._get(key, bytes_, column)
def next(self, key, column):
key, ch = self._convert_key_column(key, column)
it = self.db.iteritems(ch)
it.seek(key)
next(it)
# XXX: We will get segfault if this is NULL in database
key, bytes_ = it.get()
key = self._get_key(key, column)
# Convert into data object
return key, self._get(key, bytes_, column)
def first(self, column):
ch = self._convert_column_to_handler(column)
it = self.db.iteritems(ch)
it.seek_to_first()
# XXX: We will get segfault if this is NULL in database
key, bytes_ = it.get()
key = self._get_key(key, column)
# Convert into data object
return key, self._get(key, bytes_, column)
def latest(self, column):
ch = self._convert_column_to_handler(column)
it = self.db.iteritems(ch)
it.seek_to_last()
# XXX: We will get segfault if this is NULL in database
key, bytes_ = it.get()
key = self._get_key(key, column)
# Convert into data object
return key, self._get(key, bytes_, column)
def may_exist(self, key, column, fetch=False):
key, ch = self._convert_key_column(key, column)
# XXX: Not working......
return self.db.key_may_exist(key, ch)[0]
def save(self, key, value, column):
key, ch = self._convert_key_column(key, column)
value = self._save(value, column)
self.db.put(key, value, ch)
def store(self, key, value, column):
# Store is different then save, currently deailing with transaction
# that transaction will save more data to other column
batches = getattr(iotapy.storage.providers.types, column).store(key, value)
write_batch = rocksdb_iota.WriteBatch()
for k, v, column in batches:
k, ch = self._convert_key_column(k, column)
v = self._save(v, column)
if column in MERGED:
write_batch.merge(k, v, ch)
else:
write_batch.put(k, v, ch)
self.db.write(write_batch)
|
[
"git@louie.lu"
] |
git@louie.lu
|
0d6eebcca8341a60b672a11fb77f631f6af68501
|
b3203d01b01d8dbb3298fa25a2bc2da3e20b0019
|
/enumnamecrawler/valueassigner/increment.py
|
39877eb27b520b42aeb7b6f28aa1c7009b0353f9
|
[
"MIT"
] |
permissive
|
yinyin/enumnamecrawler
|
17acfe45727b697249c8a004972a9076740f5152
|
48e98ff16db91e6e21cbf0641642672ca728f6d0
|
refs/heads/master
| 2021-03-30T20:23:12.774056
| 2018-03-21T17:04:47
| 2018-03-21T17:04:47
| 125,081,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
# -*- coding: utf-8 -*-
class Incrementer(object):
def __init__(self, base=-1, step=-1, *args, **kwds):
# type: (int, int) -> None
super(Incrementer, self).__init__(*args, **kwds)
if step == 0:
raise ValueError("step value must != 0: %r" % (step, ))
self.base = base
self.step = step
def _compute_base(self, v_max, v_min):
if self.step > 0:
c = v_max
dstep = self.step - 1
else:
c = v_min
dstep = self.step + 1
return int((c - self.base + dstep) / self.step) * self.step + self.base
def __call__(self, enumelements):
v_max = self.base
v_min = self.base
for enumelem in enumelements:
if enumelem.value is None:
continue
aux = enumelem.value
v_max = max(v_max, aux)
v_min = min(v_min, aux)
c = self._compute_base(v_max, v_min)
for enumelem in enumelements:
if enumelem.value is not None:
continue
enumelem.value = c
c = c + self.step
|
[
"yinyinl@gmail.com"
] |
yinyinl@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.