blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d363d937b3058c5e74879e225eb4aea35ff3d0d0
|
a2a2f20676d43ef62bf4e5fb066d016b7a0b7504
|
/NOTES-SECTION-03/Ejercicio-6.py
|
fe27bd3b1b146bedd8ba6bb1283f58391f89e775
|
[] |
no_license
|
Victoriasaurio/Python-Course-U
|
497d4e1a9840bdeb630db7c7d1a728f92d033200
|
852053832412820ff1a23812b767716cdf86f3e1
|
refs/heads/main
| 2023-02-16T13:44:48.778551
| 2021-01-11T23:45:54
| 2021-01-11T23:45:54
| 328,560,998
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
# Al realizar una consulta en un registro hemos obtenido una cadena de texto corrupta al revés.
# Al parecer contiene el nombre de un alumno y la nota de un exámen.
# ¿Cómo podríamos formatear la cadena y conseguir una estructura como la siguiente?
# Nombre Apellido ha sacado un Nota de nota.
cadena = "zeréP nauJ, 01"
nombre = cadena[-5] + cadena[-6] + cadena[-7] + cadena[-8]
apellido = cadena[-10] +cadena[-11] + cadena[-12] + cadena[-13] + cadena[-14]
nota = cadena[-1] + cadena[-2]
print(nombre + " " + apellido + " ha sacado un " + nota + " de nota")
# Otra solución del ejercicio
cadena_volteada = cadena[::-1] # Invierte la cadena - 10 ,Juan Pérez
print(cadena_volteada[4:], "ha sacado un", cadena_volteada[:2], "de nota.")
|
[
"victoriasaurio.guzmanc@gmail.com"
] |
victoriasaurio.guzmanc@gmail.com
|
85c20195420c31d2cc23fc5b7a8fdd127a1c2fed
|
00f808b76f6dde77aa898d31e2eea107cbcc3e40
|
/PlottingQuickstart.py
|
8730e9800a8cd25751ec939ffdfb6082bb91d948
|
[] |
no_license
|
benjaminverbeek/StatisticalMethodsInPhysics
|
5efb4294e192d76d74236ad55bab2b94dbac1c1d
|
0f245c87733e277ff5ddbefb63b14871bbe0bb01
|
refs/heads/master
| 2023-01-06T16:35:39.432961
| 2020-11-10T13:43:35
| 2020-11-10T13:43:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
############
# Plotting #
############
# Documentation: https://matplotlib.org/3.1.1/tutorials/introductory/pyplot.html
# Import modules
import matplotlib.pyplot as plt # for plotting
import numpy as np # for preparing some data to plot
# Generate 50 points between 0 and 2pi
x = np.linspace(0, 2*np.pi, 50)
print(x)
# Calculate the sine for these values
sine = np.sin(x)
print(sine)
# Plot the graph
# Create a canvas (fig) and an axes object (ax)
fig, ax = plt.subplots(1, figsize = (8,6))
# Plot the result
ax.plot(x, sine)
# Show the plot
plt.show()
# Let's do this properly
fig, ax = plt.subplots(1, figsize = (8,6))
ax.plot(x, sine, label='$\sin(x)$')
ax.set_xlabel("$x$")
ax.set_ylabel("$f(x)$")
plt.legend() # Will take the label to generate a legend
plt.show()
# Make this a little fancy
plt.style.use('seaborn-poster') # Makes some changes to line-width, label-size and tics
x = np.linspace(0, 2*np.pi, 50)
fig, (ax1, ax2) = plt.subplots(1,2, figsize = (8,4))
ax1.plot(x, np.sin(x), label="$\sin(x)$")
ax2.plot(x, np.cos(x), c='orange', label="$\cos(x)$")
ax1.set_xlabel("$x$")
ax2.set_xlabel("$x$")
ax1.set_ylabel("$f(x)$")
ax2.set_ylabel("$f^{\prime}(x)$")
ax1.legend()
ax2.legend()
plt.tight_layout() # important function for correct margins and spacing
plt.savefig("sineAndCosine.png", dpi=200)
plt.show()
|
[
"papenbrock.michael@googlemail.com"
] |
papenbrock.michael@googlemail.com
|
aa312e967fd86f344d8c77e221894ca010e2830d
|
a37c48267bfb8476476dad7219c4e3329f9e2991
|
/Packs/GreyNoise/Integrations/GreyNoise/GreyNoise_test.py
|
e8707046a884f7adfa98cf7e93dfa41d3ae14b8c
|
[
"MIT"
] |
permissive
|
adambaumeister/content
|
611ce9fba412a5eb28fbefa8a43282e98d3f9327
|
01b57f8c658c2faed047313d3034e8052ffa83ce
|
refs/heads/master
| 2023-03-09T18:16:18.623380
| 2022-07-13T18:11:09
| 2022-07-13T18:11:09
| 274,290,989
| 2
| 0
|
MIT
| 2023-03-06T12:22:17
| 2020-06-23T02:36:53
|
Python
|
UTF-8
|
Python
| false
| false
| 9,690
|
py
|
import pytest
import json
import GreyNoise
from test_data.input_data import ( # type: ignore
parse_code_and_body_data,
get_ip_reputation_score_data,
test_module_data,
ip_reputation_command_data,
ip_quick_check_command_data,
generate_advanced_query_data,
query_command_data,
get_ip_context_data_data,
stats_command_data,
riot_command_response_data,
context_command_response_data,
)
class DummyResponse:
"""
Dummy Response object of requests.response for unit testing.
"""
def __init__(self, headers, text, status_code):
self.headers = headers
self.text = text
self.status_code = status_code
def json(self):
"""
Dummy json method.
"""
return json.loads(self.text)
@pytest.mark.parametrize("input_data, expected_output", parse_code_and_body_data)
def test_parse_code_and_body(input_data, expected_output):
"""
Tests various combinations of error codes and messages.
"""
response = GreyNoise.parse_code_and_body(input_data)
assert response == expected_output
@pytest.mark.parametrize("input_data, expected_output", get_ip_reputation_score_data)
def test_get_ip_reputation_score(input_data, expected_output):
"""
Tests various combinations of GreyNoise classification data.
"""
response = GreyNoise.get_ip_reputation_score(input_data)
assert response == expected_output
@pytest.mark.parametrize("api_key, api_response, status_code, expected_output", test_module_data)
def test_test_module(api_key, api_response, status_code, expected_output, mocker):
"""
Tests test_module for GreyNoise integration.
"""
client = GreyNoise.Client(api_key, "dummy_server", 10, "proxy", False, "dummy_integration")
if isinstance(api_key, str) and api_key == "true_key":
mocker.patch("greynoise.GreyNoise._request", return_value=api_response)
response = GreyNoise.test_module(client)
assert response == expected_output
else:
dummy_response = DummyResponse({}, api_response, status_code)
mocker.patch("requests.Session.get", return_value=dummy_response)
with pytest.raises(Exception) as err:
_ = GreyNoise.test_module(client)
assert str(err.value) == expected_output
@pytest.mark.parametrize("args, test_scenario, api_response, status_code, expected_output", ip_reputation_command_data)
def test_ip_reputation_command(args, test_scenario, api_response, status_code, expected_output, mocker):
"""
Tests various combinations of vald and invalid responses for IPReputation command.
"""
client = GreyNoise.Client("true_api_key", "dummy_server", 10, "proxy", False, "dummy_integration")
dummy_response = DummyResponse({"Content-Type": "application/json"}, json.dumps(api_response), status_code)
if test_scenario == "positive":
mocker.patch("requests.Session.get", return_value=dummy_response)
response = GreyNoise.ip_reputation_command(client, args)
assert response[0].outputs == expected_output
else:
mocker.patch("requests.Session.get", return_value=dummy_response)
with pytest.raises(Exception) as err:
_ = GreyNoise.ip_reputation_command(client, args)
assert str(err.value) == expected_output
@pytest.mark.parametrize("args, test_scenario, api_response, status_code, expected_output", ip_quick_check_command_data)
def test_ip_quick_check_command(args, test_scenario, api_response, status_code, expected_output, mocker):
"""
Tests various combinations of valid and invalid responses for ip-quick-check command.
"""
client = GreyNoise.Client("true_api_key", "dummy_server", 10, "proxy", False, "dummy_integration")
dummy_response = DummyResponse({"Content-Type": "application/json"}, json.dumps(api_response), status_code)
if test_scenario == "positive":
mocker.patch("requests.Session.get", return_value=dummy_response)
response = GreyNoise.ip_quick_check_command(client, args)
assert response.outputs == expected_output
elif test_scenario == "negative" and status_code == 200:
mocker.patch("requests.Session.get", return_value=dummy_response)
response = GreyNoise.ip_quick_check_command(client, args)
with open("test_data/quick_check.md") as f:
expected_hr = f.read()
assert response.readable_output == expected_hr
elif test_scenario == "negative":
mocker.patch("requests.Session.get", return_value=dummy_response)
with pytest.raises(Exception) as err:
_ = GreyNoise.ip_quick_check_command(client, args)
assert str(err.value) == expected_output
elif test_scenario == "custom":
mocker.patch("greynoise.GreyNoise.quick", return_value=api_response)
with pytest.raises(Exception) as err:
_ = GreyNoise.ip_quick_check_command(client, args)
assert str(err.value) == expected_output
@pytest.mark.parametrize("args, expected_output", generate_advanced_query_data)
def test_generate_advanced_query(args, expected_output):
"""
Tests various combinations of command arguments to generate GreyNoise advanced_query for query/stats command.
"""
response = GreyNoise.generate_advanced_query(args)
assert response == expected_output
@pytest.mark.parametrize("args, test_scenario, api_response, status_code, expected_output", query_command_data)
def test_query_command(args, test_scenario, api_response, status_code, expected_output, mocker):
"""
Tests various combinations of valid and invalid responses for query command.
"""
client = GreyNoise.Client("true_api_key", "dummy_server", 10, "proxy", False, "dummy_integration")
dummy_response = DummyResponse({"Content-Type": "application/json"}, json.dumps(api_response), status_code)
mocker.patch("requests.Session.get", return_value=dummy_response)
if test_scenario == "positive":
response = GreyNoise.query_command(client, args)
assert response.outputs[GreyNoise.QUERY_OUTPUT_PREFIX["IP"]] == expected_output["data"]
else:
with pytest.raises(Exception) as err:
_ = GreyNoise.query_command(client, args)
assert str(err.value) == expected_output
@pytest.mark.parametrize("args, test_scenario, api_response, status_code, expected_output", stats_command_data)
def test_stats_command(args, test_scenario, api_response, status_code, expected_output, mocker):
"""
Tests various combinations of valid and invalid responses for stats command.
"""
client = GreyNoise.Client("true_api_key", "dummy_server", 10, "proxy", False, "dummy_integration")
dummy_response = DummyResponse({"Content-Type": "application/json"}, json.dumps(api_response), status_code)
mocker.patch("requests.Session.get", return_value=dummy_response)
if test_scenario == "positive":
response = GreyNoise.stats_command(client, args)
assert response.outputs == expected_output
else:
with pytest.raises(Exception) as err:
_ = GreyNoise.stats_command(client, args)
assert str(err.value) == expected_output
@pytest.mark.parametrize("input_data, expected_output", get_ip_context_data_data)
def test_get_ip_context_data(input_data, expected_output):
"""
Tests various combinations for converting ip-context and query command responses from sdk to Human Readable format.
"""
response = GreyNoise.get_ip_context_data(input_data)
assert response == expected_output
@pytest.mark.parametrize("test_scenario, status_code, input_data, expected", riot_command_response_data)
def test_riot_command(mocker, test_scenario, status_code, input_data, expected):
"""
Test various inputs for riot command
"""
client = GreyNoise.Client(
api_key="true_api_key",
api_server="dummy_server",
timeout=10,
proxy="proxy",
use_cache=False,
integration_name="dummy_integration",
)
dummy_response = DummyResponse({"Content-Type": "application/json"}, json.dumps(expected["raw_data"]), status_code)
mocker.patch("requests.Session.get", return_value=dummy_response)
if test_scenario == "positive":
response = GreyNoise.riot_command(client, input_data)
assert response.outputs == expected["raw_data"]
else:
with pytest.raises(Exception) as err:
_ = GreyNoise.riot_command(client, input_data)
assert str(err.value) == expected["error_message"].format(input_data["ip"])
@pytest.mark.parametrize(
"args, test_scenario, api_response, status_code, expected_output", context_command_response_data
)
def test_context_command(mocker, args, test_scenario, api_response, status_code, expected_output):
"""
Test various inputs for context command
"""
client = GreyNoise.Client(
api_key="true_api_key",
api_server="dummy_server",
timeout=10,
proxy="proxy",
use_cache=False,
integration_name="dummy_integration",
)
dummy_response = DummyResponse({"Content-Type": "application/json"}, json.dumps(expected_output), status_code)
mocker.patch("requests.Session.get", return_value=dummy_response)
if test_scenario == "positive":
response = GreyNoise.context_command(client, args)
assert response.outputs == expected_output
else:
mocker.patch("requests.Session.get", return_value=dummy_response)
with pytest.raises(Exception) as err:
_ = GreyNoise.ip_reputation_command(client, args)
print("this is err: " + str(err))
assert str(err.value) == expected_output
|
[
"noreply@github.com"
] |
adambaumeister.noreply@github.com
|
2f20dcdee2da7a54db94994e026b99016778fbc6
|
0fda53530e3105c413f4573d8b7ee40df5e16456
|
/setup.py
|
182c8d082826b43b7670ecb4c0f53afa164c3c1e
|
[
"MIT"
] |
permissive
|
colorfusion/animelyrics
|
c2414b4a5d62e86655e84b6702e2bcf654e1a85f
|
823cf43030f6b6f02f44b7caf1bd77b493dfb455
|
refs/heads/master
| 2021-06-13T13:13:50.041415
| 2019-06-23T15:22:30
| 2019-06-23T15:22:30
| 193,237,981
| 3
| 1
|
MIT
| 2021-04-29T22:02:56
| 2019-06-22T13:38:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,219
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Referenced from https://github.com/kennethreitz/setup.py
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = "animelyrics"
DESCRIPTION = "Library to extract song lyrics from www.animelyrics.com"
URL = "https://github.com/colorfusion/animelyrics"
EMAIL = "melvinyzw@gmail.com"
AUTHOR = "Melvin Yeo"
REQUIRES_PYTHON = ">=3.0.0"
VERSION = "0.1.0"
# What packages are required for this module to be executed?
INSTALL_REQUIRED = ["requests", "google", "beautifulsoup4", "lxml"]
# What packages are optional?
EXTRAS = {}
SETUP_REQUIRED = ["pytest-runner"]
TESTS_REQUIRED = ["pytest"]
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, "__version__.py")) as f:
exec(f.read(), about)
else:
about["__version__"] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
self.status("Uploading the package to PyPI via Twine…")
os.system("twine upload dist/*")
self.status("Pushing git tags…")
os.system("git tag v{0}".format(about["__version__"]))
os.system("git push --tags")
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about["__version__"],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# If your package is a single module, use this instead of 'packages':
# py_modules=['animelyrics'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
setup_requires=SETUP_REQUIRED,
install_requires=INSTALL_REQUIRED,
tests_require=TESTS_REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license="MIT",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Internet :: WWW/HTTP :: Indexing/Search",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: General",
],
# $ setup.py publish support.
cmdclass={"upload": UploadCommand},
)
|
[
"melvinyzw@gmail.com"
] |
melvinyzw@gmail.com
|
69d48301686380c0deeb43c870b36b441c8f549e
|
507a28e47205a6933ba2d5065f24c5d26384909f
|
/todo/views.py
|
3e8e0915e3d49cd59b14dad03d4640baf94525b3
|
[] |
no_license
|
luoyueyaoqing/todolist
|
b977fac4c09bb2f83e42f444085d989543cbcbe9
|
ba49047150335e6da14306cdf8df35e754faaf63
|
refs/heads/master
| 2020-03-26T23:25:23.904954
| 2018-08-30T06:37:49
| 2018-08-30T06:37:49
| 145,541,090
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,786
|
py
|
from django.shortcuts import render, redirect, get_object_or_404, HttpResponseRedirect
from django.contrib import messages
from .models import Todo, User
from django.contrib.auth import login, authenticate, logout
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.decorators import login_required
import datetime
def index_register(request):
if request.method == "POST":
username = request.POST.get('username')
password1 = request.POST.get('password1')
password2 = request.POST.get('password2')
if not User.objects.filter(username=username).exists():
if password1 == password2:
User.objects.create_user(username=username, password=password1)
messages.success(request, '注册成功')
return redirect(to='login')
else:
messages.warning(request, '两次密码输入不一致')
else:
messages.warning(request, "账号已存在")
return render(request, 'register.html')
def index_login(request):
next_url = request.GET.get('next')
if request.method == "POST":
form = AuthenticationForm(data=request.POST)
if form.is_valid():
login(request, form.get_user())
if next_url:
return redirect(next_url)
return redirect('index')
return HttpResponseRedirect(request.get_full_path())
return render(request, 'login.html', {'next_url': next_url})
@login_required
def index(request):
users = User.objects.all()
todos = Todo.objects.filter(user=request.user)
return render(request, 'index.html', {'todos': todos, 'users': users})
def user_page(request):
uid = request.GET.get('uid')
todo_user = get_object_or_404(User, id=uid)
todos = Todo.objects.filter(user=todo_user)
return render(request, 'user_page.html', locals())
@login_required
def user_update(request):
user = request.user
if request.method == "POST":
user.nickname = request.POST.get('nickname')
gender = request.POST.get('gender')
user.gender = user.user_gender(gender)
user.info = request.POST.get('info')
user.save()
return redirect(to=index)
return render(request, 'user_update.html', {'user': user})
@login_required
def add_todo(request):
user = get_object_or_404(User, id=request.user.id)
if request.method != "POST":
messages.warning(request, "请求方法不对")
else:
task = request.POST.get('task')
if task:
if Todo.objects.filter(user=user, task=task).exists():
messages.warning(request, '任务已存在')
else:
Todo.objects.create(user=user, task=task, complete=False)
messages.success(request, '任务添加成功')
else:
messages.warning(request, '请输入任务')
return redirect(to=index)
@login_required
def detail_todo(request, todo_id):
todo = get_object_or_404(Todo, id=todo_id)
return render(request, 'detail_todo.html', {'todo': todo})
@login_required
def do_todo(request, id):
todo = get_object_or_404(Todo, id=id)
if todo:
todo.complete = True
todo.save()
messages.success(request, '任务已完成')
else:
messages.warning(request, '操作失败')
return redirect(to=index)
@login_required
def del_todo(request, id):
todo = get_object_or_404(Todo, id=id)
if todo:
todo.delete()
messages.success(request, '任务已删除')
else:
messages.warning(request, '操作失败')
return redirect(to=index)
|
[
"shmilyfae@163.com"
] |
shmilyfae@163.com
|
55174b3f69b513d0485118c8bde0ce8bf9e44825
|
041e1122b51d9e6557a30264fe7ae0dfbffa0879
|
/samples/openapi3/client/petstore/python-aiohttp/petstore_api/models/class_model.py
|
ed62d02c8980fbcfca8489787c3263de3263a1ae
|
[
"Apache-2.0"
] |
permissive
|
shibayan/openapi-generator
|
59ca5dea4313a72f7a8d68e78420332b14ea619f
|
8b26d81163fa4cd069425c23957f16a8286ec732
|
refs/heads/master
| 2023-08-03T12:25:23.705512
| 2023-06-20T09:11:33
| 2023-06-20T09:11:33
| 234,938,986
| 2
| 0
|
Apache-2.0
| 2021-08-11T15:23:32
| 2020-01-19T17:38:17
|
Java
|
UTF-8
|
Python
| false
| false
| 2,008
|
py
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Optional
from pydantic import BaseModel, Field, StrictStr
class ClassModel(BaseModel):
"""
Model for testing model with \"_class\" property
"""
var_class: Optional[StrictStr] = Field(None, alias="_class")
__properties = ["_class"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
def to_str(self) -> str:
"""Returns the string representation of the model using alias"""
return pprint.pformat(self.dict(by_alias=True))
def to_json(self) -> str:
"""Returns the JSON representation of the model using alias"""
return json.dumps(self.to_dict())
@classmethod
def from_json(cls, json_str: str) -> ClassModel:
"""Create an instance of ClassModel from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self):
"""Returns the dictionary representation of the model using alias"""
_dict = self.dict(by_alias=True,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> ClassModel:
"""Create an instance of ClassModel from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return ClassModel.parse_obj(obj)
_obj = ClassModel.parse_obj({
"var_class": obj.get("_class")
})
return _obj
|
[
"noreply@github.com"
] |
shibayan.noreply@github.com
|
5b3a9b74ae35c93c4f7d3282d2ac7cacd3fcea52
|
e4f6e8b4d389ae81c7f4ff9fb01f1851f3d0a27c
|
/Buoi 2/maze.py
|
498b789113ef464655d31b8f6986532e16741cf8
|
[] |
no_license
|
MihThanh/VuMinhThanh--Fundamental--C4E14
|
1695914752a4cd3f113202725c72fc8da4d315ab
|
cdc58c84ed479d3ce9781290f9f667d1af24c649
|
refs/heads/master
| 2021-08-19T11:01:07.206783
| 2017-11-26T01:39:12
| 2017-11-26T01:39:12
| 109,583,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
from turtle import *
shape("turtle")
# left(90)
# forward(n + 10)
# left(90)
# forward(n + 10)
# left(90)
# forward(n + 20)
# left(90)
for i in range(50):
forward(i*5)
left(90)
mainloop()
|
[
"minhthanh@Minhs-MacBook-Pro.local"
] |
minhthanh@Minhs-MacBook-Pro.local
|
7926971f519ad8ae0e026b35dc5c0ed0d6584580
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/mwhoffman_pybo/pybo-master/pybo/recommenders.py
|
a97e51c89a8b80c68975ed9064be97173feb32de
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 841
|
py
|
"""
Recommendations.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from . import solvers
__all__ = ['best_latent', 'best_incumbent']
def best_latent(model, bounds, X):
"""
Given a model return the best recommendation, corresponding to the point
with maximum posterior mean.
"""
def mu(X, grad=False):
"""Posterior mean objective function."""
if grad:
return model.predict(X, True)[::2]
else:
return model.predict(X)[0]
xbest, _ = solvers.solve_lbfgs(mu, bounds, xgrid=X)
return xbest
def best_incumbent(model, _, X):
"""
Return a recommendation given by the best latent function value evaluated
at points seen so far.
"""
f, _ = model.predict(X)
return X[f.argmax()]
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
4768754f3420d19911c26e62e222ae89383283a9
|
370b79839660022fdba49a28abbc5fffba4151a1
|
/FindLine/findline.py
|
cc8ae4075bd7cd2378179f3d3a7361cd11387f05
|
[] |
no_license
|
cyhbrilliant/auto_drive.python
|
b6323112660a483cae137b958a1aedd827e6cbf9
|
908669a82980a84c127672ae1a0fdf2d2dea84b3
|
refs/heads/master
| 2020-03-22T14:28:57.341183
| 2018-07-08T16:11:00
| 2018-07-08T16:11:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,043
|
py
|
import numpy as np
import cv2
def findline(image):
# image = cv2.resize(image, dsize=(320, 240))
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# gray = cv2.equalizeHist(gray)
kernel_size = 5
blur_gray = cv2.GaussianBlur(gray, (kernel_size, kernel_size), 0)
low_threshold = 150
high_threshold = 400
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
edges[0:120, :] = 0
edges[:, :60] = 0
edges[:, 300:] = 0
edges[220:240, :] = 0
# mask = np.zeros_like(edges)
# ignore_mask_color = 255
# # This time we are defining a four sided polygon to mask
# imshape = image.shape
# vertices = np.array([[(50,imshape[0]),(420, 280), (550, 280), (950,imshape[0])]], dtype=np.int32)
# cv2.fillPoly(mask, vertices, ignore_mask_color)
# masked_edges = cv2.bitwise_and(edges, mask)
rho = 5 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 30 # minimum number of pixels making up a line
max_line_gap = 20 # maximum gap in pixels between connectable line segments
def linefilter(pt1, pt2):
if pt1[0] > pt2[0]:
return False
if abs(pt1[1] - pt2[1]) < 20:
return False
return True
imgcopy = image.copy()
# Run Hough on edge detected image
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
# if type(lines) != type(None):
if lines is not None:
for line in lines:
pt1 = (line[0][0], line[0][1])
pt2 = (line[0][2], line[0][3])
if linefilter(pt1, pt2):
cv2.line(imgcopy, pt1, pt2, (0, 0, 255), 5)
# print(lines)
# cv2.imshow('1', blur_gray)
# cv2.imshow('2', edges)
# cv2.imshow('3', image)
# cv2.waitKey(0)
return imgcopy
|
[
"965833120@qq.com"
] |
965833120@qq.com
|
595e637221e0ba732c5c4483a30824788cfeb4bd
|
49296c69348c743f234807ff6390687079b6b5d9
|
/client/server_lib/omero_model_EventType_ice.py
|
ae9a4b8869439da36faf333c05a61efb605ae31d
|
[] |
no_license
|
crs4/omero.biobank-docker
|
2cb4b00f37115916d5b140cbdaf24c12bd8be9ef
|
e332a6eccad44384cd6a3a12e6da17eb89a6cd96
|
refs/heads/master
| 2023-09-02T04:36:21.401597
| 2014-12-07T17:34:27
| 2014-12-07T17:34:27
| 26,125,831
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,309
|
py
|
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
#
# Ice version 3.4.2
#
# <auto-generated>
#
# Generated from file `EventType.ice'
#
# Warning: do not edit this file.
#
# </auto-generated>
#
import Ice, IcePy, __builtin__
import omero_model_IObject_ice
import omero_RTypes_ice
import omero_System_ice
import omero_Collections_ice
# Included module omero
_M_omero = Ice.openModule('omero')
# Included module omero.model
_M_omero.model = Ice.openModule('omero.model')
# Included module Ice
_M_Ice = Ice.openModule('Ice')
# Included module omero.sys
_M_omero.sys = Ice.openModule('omero.sys')
# Included module omero.api
_M_omero.api = Ice.openModule('omero.api')
# Start of module omero
__name__ = 'omero'
# Start of module omero.model
__name__ = 'omero.model'
# Start of module omero.model.enums
_M_omero.model.enums = Ice.openModule('omero.model.enums')
__name__ = 'omero.model.enums'
_M_omero.model.enums.EventTypeImport = "Import"
_M_omero.model.enums.EventTypeInternal = "Internal"
_M_omero.model.enums.EventTypeShoola = "Shoola"
_M_omero.model.enums.EventTypeUser = "User"
_M_omero.model.enums.EventTypeTask = "Task"
_M_omero.model.enums.EventTypeTest = "Test"
_M_omero.model.enums.EventTypeProcessing = "Processing"
_M_omero.model.enums.EventTypeFullText = "FullText"
_M_omero.model.enums.EventTypeSessions = "Sessions"
# End of module omero.model.enums
__name__ = 'omero.model'
if not _M_omero.model.__dict__.has_key('Details'):
_M_omero.model._t_Details = IcePy.declareClass('::omero::model::Details')
_M_omero.model._t_DetailsPrx = IcePy.declareProxy('::omero::model::Details')
if not _M_omero.model.__dict__.has_key('EventType'):
_M_omero.model.EventType = Ice.createTempClass()
class EventType(_M_omero.model.IObject):
def __init__(self, _id=None, _details=None, _loaded=False, _value=None):
if __builtin__.type(self) == _M_omero.model.EventType:
raise RuntimeError('omero.model.EventType is an abstract class')
_M_omero.model.IObject.__init__(self, _id, _details, _loaded)
self._value = _value
def ice_ids(self, current=None):
return ('::Ice::Object', '::omero::model::EventType', '::omero::model::IObject')
def ice_id(self, current=None):
return '::omero::model::EventType'
def ice_staticId():
return '::omero::model::EventType'
ice_staticId = staticmethod(ice_staticId)
def getValue(self, current=None):
pass
def setValue(self, theValue, current=None):
pass
def __str__(self):
return IcePy.stringify(self, _M_omero.model._t_EventType)
__repr__ = __str__
_M_omero.model.EventTypePrx = Ice.createTempClass()
class EventTypePrx(_M_omero.model.IObjectPrx):
def getValue(self, _ctx=None):
return _M_omero.model.EventType._op_getValue.invoke(self, ((), _ctx))
def begin_getValue(self, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.EventType._op_getValue.begin(self, ((), _response, _ex, _sent, _ctx))
def end_getValue(self, _r):
return _M_omero.model.EventType._op_getValue.end(self, _r)
def setValue(self, theValue, _ctx=None):
return _M_omero.model.EventType._op_setValue.invoke(self, ((theValue, ), _ctx))
def begin_setValue(self, theValue, _response=None, _ex=None, _sent=None, _ctx=None):
return _M_omero.model.EventType._op_setValue.begin(self, ((theValue, ), _response, _ex, _sent, _ctx))
def end_setValue(self, _r):
return _M_omero.model.EventType._op_setValue.end(self, _r)
def checkedCast(proxy, facetOrCtx=None, _ctx=None):
return _M_omero.model.EventTypePrx.ice_checkedCast(proxy, '::omero::model::EventType', facetOrCtx, _ctx)
checkedCast = staticmethod(checkedCast)
def uncheckedCast(proxy, facet=None):
return _M_omero.model.EventTypePrx.ice_uncheckedCast(proxy, facet)
uncheckedCast = staticmethod(uncheckedCast)
_M_omero.model._t_EventTypePrx = IcePy.defineProxy('::omero::model::EventType', EventTypePrx)
_M_omero.model._t_EventType = IcePy.declareClass('::omero::model::EventType')
_M_omero.model._t_EventType = IcePy.defineClass('::omero::model::EventType', EventType, (), True, _M_omero.model._t_IObject, (), (('_value', (), _M_omero._t_RString),))
EventType._ice_type = _M_omero.model._t_EventType
EventType._op_getValue = IcePy.Operation('getValue', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (), (), _M_omero._t_RString, ())
EventType._op_setValue = IcePy.Operation('setValue', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, (), (((), _M_omero._t_RString),), (), None, ())
_M_omero.model.EventType = EventType
del EventType
_M_omero.model.EventTypePrx = EventTypePrx
del EventTypePrx
# End of module omero.model
__name__ = 'omero'
# End of module omero
|
[
"gmauro@crs4.it"
] |
gmauro@crs4.it
|
f2d5578452e6ad1675bb94b5a8534320ee138af9
|
a89debaa27ea0cb87a6cb3dd3464b0c00f5f3e92
|
/braggvectors.py
|
23a52a6898cc833f65a3a77e5ba86ed3b1a52b64
|
[] |
no_license
|
PedroMDuarte/bragg-scattering
|
66aefb0d3f38f055c283bc6cf1663e0f60332dd1
|
3c1d4893e654720df18ec7fd0095e5de0979a5a4
|
refs/heads/master
| 2020-07-08T10:47:45.052122
| 2016-01-17T09:21:42
| 2016-01-17T09:21:42
| 11,858,425
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,253
|
py
|
import numpy as np
import vec3
import pylab
from scipy import stats
import scipy
l671 = 671.
l1064 = 1064.
# Coordinate system:
# input Bragg light for HHH propagates almost along +Y
# +Z is up
# Q vector for bragg scattering HHH
# Remember that for AFM there is a doubling of the unit cell, so the
# lattice spacing is lambda instead of lambda/2
Q = 2*np.pi/l1064 * vec3.vec3( -1., -1., 1.)
Qunit = Q/abs(Q)
# Calculate angle for HHH Bragg conditiion
# with respect to Q vector
braggTH = np.arccos( abs(Q) / 2. / (2.*np.pi/l671) )
print "HHH Bragg angle wrt Q = ", braggTH * 180. / np.pi
# Calculate angle for HHH Bragg condition
# with respect to y axis, when coming from
# under lattice beam 2.
from scipy.optimize import fsolve
def cond(x):
return np.sin(x)-np.cos(x) + 3./2. * l671 / l1064
braggTH2 = fsolve(cond, 0.)
print "HHH Bragg angle wrt -y axis = ", braggTH2 * 180. / np.pi
# Q for 100 scattering
Q100 = 2*np.pi / (l1064/2) * vec3.vec3( 0., +1, 0.)
Q100unit = Q100/abs(Q100)
# Calculate angle for 100 Bragg condition
# with respect to Q vector
braggTH100 = np.arccos( abs(Q100) / 2. / (2.*np.pi/l671) )
print
print "100 Bragg angle wrt Q = ", braggTH100 * 180. / np.pi
# Incoming and outgoing light vector for 100
kin100 = vec3.vec3()
kin100.set_spherical( 2.*np.pi/l671, np.pi/2 - braggTH100, 3* np.pi / 2)
kout100 = vec3.vec3()
kout100.set_spherical( 2.*np.pi/l671, np.pi/2 - braggTH100, np.pi / 2)
kMANTA = kout100
# Incoming light vector HHH
thi = np.pi/2 - braggTH2
phi = 90. * np.pi / 180.
kin = vec3.vec3()
kin.set_spherical( 2.*np.pi/l671, thi, phi )
# Default polarization of incoming light vector
kipol = [1.,0]
# Unit vector that points perp to Bragg cone
kinperp = vec3.cross( kin, vec3.cross(Q,kin) )
kinperp = kinperp / abs(kinperp)
# Direction of A2 detector
kout = kin + Q
a2 = kout / abs(kout)
kA2 = kout
# Unit vector perpendicular to plane of Q and A2
Qperp1 = vec3.cross( Q, a2 )
Qperp1 = Qperp1 / abs(Qperp1)
# Unit vector perpendicular to Q and in plane of kin,kout,Q
Qperp2 = vec3.cross( Q, Qperp1)
Qperp2 = Qperp2 / abs(Qperp2)
# Using Qunit and Qperp2 one can use the Bragg angle to
# easily parameterize vectors near kin and kout
# Define direction of A1 camera
a1 = vec3.vec3()
a1.set_spherical( 1., np.pi/2., np.pi/3. )
kA1 = a1*abs(kin)
#print kA1/abs(kA1)
#print kA2/abs(kA2)
#print kin/abs(kin)
# Angle between A1 and A2
thetaA1A2 = 180. / np.pi * np.arccos(kA1*kA2 / abs(kA1) / abs(kA2))
print "Angle between A1 and A2 = %.2f" % thetaA1A2
# Here two functions are defined that allow getting kin
# and kout vectors as a function of their angle measured
# from the nominal bragg angle (useful for rocking curve)
def kinput( angle ): # angle is in mrad
kia = -Qunit*np.cos(braggTH + angle/1000.) - Qperp2*np.sin(braggTH + angle/1000.)
kia = abs(kin) * kia
#b.add_points( (-1*kia/abs(kia)).tolist() )
return kia
def koutput( angle ): # angle is in mrad
kfa = Qunit*np.cos(braggTH + angle/1000.) - Qperp2*np.sin(braggTH + angle/1000.)
kfa = abs(kin) * kfa
#b.add_points( (kfa/abs(kfa)).tolist() )
return kfa
# I can define a variation of kinput in the plane of the
# chamber
def kinchamber( phi ):
k = vec3.vec3()
the_ = np.pi/2
phi_ = np.pi/2. + np.pi* phi/180.
k.set_spherical( 2.*np.pi/l671, the_, phi_ )
return k
# Here I can define a kinput angle by giving the polar and
# azimuthal angles of the window that the Bragg beam is comming in
# Using this definition, our nominal Bragg position is
# polar = np.pi/2 + 3.0degrees
# azim = 0.
def kinsph(theta, phi):
k = vec3.vec3()
the_ = np.pi/2 - ( np.pi*theta/180. - np.pi/2.)
phi_ = np.pi/2. + np.pi* phi/180.
k.set_spherical( 2.*np.pi/l671, the_, phi_ )
return k
ksquad =[]
ksquad.append ( kinsph(91., -75.) )
ksquad.append ( kinsph(90.,-14.) )
ksquad.append ( kinsph(88., -4.) )
ksquad.append ( kinsph(93., 0.) )
ksquad.append ( kinsph(91., 15.) )
ksquad.append ( kinsph(90., 34.) )
# Extra points for plot
#ksquad.append ( kinsph(90.,-2.))
#ksquad.append ( kinsph(90., 2.))
#ksquad.append ( kinsph(90., 4.))
#ksquad.append ( kinsph(90., 6.))
ksquadth = np.array( [-75.,-14.,-4.,0.,15.,34.] \
# + [-2.,2.,4.,6.] \
) *1. * np.pi/180.
print
print "Difference wrt Bragg Q, |Q-K| * l1064 / (2)"
#print " Nominal K =", 1./(abs(kout-kin - Q)*l1064/(4*np.pi))
print " -250mrad K =", abs(kout-kinsph(90.,-14.)-Q)*l1064/2
print "Same port K =", abs(kout-kinsph(88.,-4.)-Q)*l1064/2
print " +250mrad K =", abs(kout-kinsph(91.,15.)-Q)*l1064/2
print " +500mrad K =", abs(kout-kinsph(90.,34.)-Q)*l1064/2
# Here I can define four koutput vectors at the four quadrants
# of the Bragg lens
kout_quadrants = []
kout_quadrants.append ( koutput(+50.))
kout_quadrants.append ( koutput(-50.))
def koutput_perp(angle):
kfa = kout + abs(kout)*np.sin(angle/1000.)* Qperp1
kfa = abs(kin) * kfa / abs(kfa)
return kfa
kout_quadrants.append( koutput_perp(+50.))
kout_quadrants.append( koutput_perp(-50.))
# Next, a function is defined that creates a list of
# koutput vectors in a circular solid angle, with a given
# diameter in mrad, centered around a given angle
def k2aperture( angle, aperture):
step = 20. #space in mrad of points in list
nstep = np.ceil(aperture/step)
avals = np.linspace( -nstep*step, nstep*step, 2*nstep+1, endpoint=True)
arrays = [avals,avals]
arr = np.empty([avals.size]*2+[2])
for i, a in enumerate( np.ix_(*arrays) ):
arr[...,i] = a
Kset = arr.reshape(-1,2)
#for K in kset:
#return arr.reshape(-1,2)
print [avals.size]*2 + [2]
print avals.shape
print arr.shape
#print arr
#print avals
###### VISUALIZATION #####
# The main vectors defined so far are plotted on the sphere here
import bsphere
b = bsphere.Bloch()
origin = vec3.vec3()
#b.add_arrow( origin, Q/abs(kin) , 'blue')
#b.add_arrow( -kin/abs(kin), origin, 'red')
#b.add_arrow( origin, kA2/abs(kA2), 'red')
#b.add_arrow( origin, kA1/abs(kA1), 'green')
b.add_arrow( origin, Q100/abs(kin100) , 'blue')
b.add_arrow( -kin100/abs(kin100), origin, 'red')
b.add_arrow( origin, kA2/abs(kA2), 'orange')
b.add_arrow( origin, kA1/abs(kA1), 'green')
b.add_arrow( origin, kMANTA/abs(kMANTA), 'red')
#b.show()
if __name__ == "__main__":
verbose = True
else:
verbose = False
##### VERTICAL LATTICE BEAM TILTED BY 30 mrad +Y, 20 mrad -X #####
# Direct lattice (For AFM lattice spacing is doubled )
a1 = 2. * l1064/2 * vec3.vec3( 1., 0., 0.)
a2 = 2. * l1064/2 * vec3.vec3( 0., 1., 0.)
a3 = vec3.vec3()
# Deviations observed in mirror mount
dy = -1. # inch
dx = -0.5
# inch # Be careful with the sign of the arctangent
L = 88.0 / 2.54
dphi = np.arctan( dy/ dx)
dtheta = np.sqrt(dy**2 + dx**2 ) / L
a3.set_spherical(2. * l1064/2 , dtheta, dphi)
# Reciprocal lattice
b1 = 2 * np.pi * vec3.cross( a2, a3) / ( a1 * vec3.cross(a2, a3) )
b2 = 2 * np.pi * vec3.cross( a3, a1) / ( a1 * vec3.cross(a2, a3) )
b3 = 2 * np.pi * vec3.cross( a1, a2) / ( a1 * vec3.cross(a2, a3) )
Qtilt = -b1 - b2 + b3
##print a3/abs(a3)
##print (a3/abs(a3)).get_spherical()
btilt = bsphere.Bloch()
###btilt.add_arrow( origin, a1/abs(a1) , 'blue')
###btilt.add_arrow( origin, a2/abs(a1) , 'blue')
btilt.add_arrow( origin, a3/abs(a1) , 'blue')
###btilt.add_arrow( origin, b1/abs(b1) , 'red')
###btilt.add_arrow( origin, b2/abs(b1) , 'red')
###btilt.add_arrow( origin, b3/abs(b1) , 'red')
###btilt.add_arrow( origin, Qtilt/ abs(kin), 'green')
###btilt.add_arrow( origin, Q/ abs(kin), 'black')
###btilt.show()
if verbose:
print
print "### TILTED TOP LATTICE BEAM ###\n"
print "Qtilt spherical coords.:"
print (Qtilt/abs(Qtilt)).get_spherical()
print "Q spherical coords.:"
print (Q/abs(Q)).get_spherical()
# Calculate angle for HHH Bragg conditiion
# with respect to Q vector
print "Percent difference between Q and Qtilt:"
print 100*(abs(Qtilt)-abs(Q))/abs(Q)
print 100*(Q-Qtilt)/abs(Q)
braggTHtilt = np.arccos( abs(Qtilt) / 2. / (2.*np.pi/l671) )
if verbose:
print
print "HHH Bragg angle wrt Qtilt = ", braggTHtilt * 180. / np.pi
print "Delta Bragg angle (tilt/notilt) = ",
print (braggTH - braggTHtilt) * 1000. , "mrad"
# Find the actual kinTilt that satisfies exactly the Bragg condition
# First find two vectors that are perpendicular to Qtilt
Qtilt_p1 = vec3.cross( Qtilt, kA2 )
Qtilt_p1 = Qtilt_p1 / abs(Qtilt_p1) * abs(Qtilt/2) * np.tan(braggTHtilt)
Qtilt_p2 = vec3.cross( Qtilt_p1, Qtilt )
Qtilt_p2 = Qtilt_p2 / abs(Qtilt_p2) * abs(Qtilt/2) * np.tan(braggTHtilt)
# This plots them on the sphere for checking
###tharray = np.linspace( 0., 2*np.pi, 30 )
###kTilt_p = [ Qtilt/2. + Qtilt_p1*np.sin(th) + Qtilt_p2*np.cos(th) for th in tharray ]
###for kt in kTilt_p:
### btilt.add_arrow( origin, kt/abs(kin), 'purple' )
###btilt.show()
# I want to find the Bragg output vector that is closest to kA2
def delta_kA2 ( theta ):
kt = Qtilt/2. + Qtilt_p1*np.sin(theta) + Qtilt_p2*np.cos(theta)
return np.arccos( kt * kA2 / ( abs(kt) * abs(kA2) ) )
# Here it can be verified graphically that the minimum is indeed at theta=0
###thX = np.linspace( -np.pi/16, np.pi/16, 100)
###thY = np.array([ delta_kA2(th) for th in thX])
###import matplotlib.pyplot as plt
###plt.plot( thX, thY)
###plt.show()
# The same theta=0 minimum is obtained using a numerical minimization
###th_min = scipy.optimize.brent( delta_kA2)
###print th_min
kOutTilt = Qtilt/2. + Qtilt_p2
kinTilt = kOutTilt - Qtilt
if verbose:
print
print "Angle between current output and Qtilt = ",
thetaA2tilt = np.arccos( Qtilt * kA2 / ( abs(Qtilt) * abs(kA2) ) )
print thetaA2tilt * 180./np.pi, "deg"
print "Deviation of current kA2 from Bragg condition =",
print (braggTHtilt - thetaA2tilt ) * 1000. , "mrad"
#kinTilt = kA2 - Qtilt
print
print "Angle between current input and kinTilt =",
print np.arccos( kinTilt * kin / ( abs(kinTilt) * abs(kin) ) ) *1000., "mrad"
kinS = kin.get_spherical()
kinTiltS = kinTilt.get_spherical()
print " dTheta = ", (kinS[1] - kinTiltS[1])*1000.
print " dPhi = ", (kinS[2] - kinTiltS[2])*1000.
# Here I printed out a short description of the system to send
# to the theorists
def printsph( l, k, ):
sph = (k/abs(kin)).get_spherical()
cartU = (k/abs(kin))
cart = (k/abs(kin)) * 532./671.
cartA1 = (kA1/abs(kin)) * 532./671.
cartA2 = (kA2/abs(kin)) * 532./671.
cartM = (kMANTA/abs(kin)) * 532./671.
QA1 = cartA1 - cart
QA2 = cartA2 - cart
QM = cartM - cart
cstr = '(%+.3f, %+.3f, %+.3f)'
print ('%16s = (%+.3f*pi, %+.3f*pi) = '+cstr+' = '+cstr+' ==> '+cstr +' '+cstr+' '+cstr) % \
(l,sph[1]/np.pi,sph[2]/np.pi, cartU[0],cartU[1],cartU[2], cart[0], cart[1], cart[2], \
QA1[0],QA1[1],QA1[2], QA2[0],QA2[1],QA2[2], QM[0],QM[1],QM[2])
if verbose:
print
print "##### SYSTEM DESCRIPTION #####\n"
print "Optical lattice original design has three input beams which propagate in directions:\n"
print "1. +x (0.500*pi, 0.000*pi)"
print "2. -y (0.500*pi, -0.500*pi)"
print "3. -z (1.000*pi, 0.000*pi)"
print
print "These three beams are retro reflected to form the lattice.\n"
print "For the beams on the xy plane we are confident that they point along the intended direction, however the beam along z is tilted."
print "As a result, in our actual setup the input beams propagate in the following directions:\n"
printsph( '1. +x', a1)
printsph( '2. -y', -a2)
printsph( '3. tilted z', -a3)
print
print "List of available input k vectors."
print "The pair represents polar and azimuthal angle."
print "Example: the HHH Input light propagates along +y\n"
print "\t\t Spherical \t\t Unit Cartesian \t\t Normed : |k671|==532/671 \tk_A1 - k_Input \t\t k_A2 - k_Input \t k_M - k_Input"
#printsph('HHH Input', kin)
printsph('100 Input', kin100)
for i, k in enumerate(ksquad):
printsph('Input #%d'%i, k)
print
print "List of available output k vectors."
print "The pair represents polar and azimuthal angle."
print "Example: The ANDOR1 camera is on the xy plane, at the line y=x*tan(60deg) "
printsph('ANDOR1', kA1)
printsph('ANDOR2 (HHH)', kA2)
printsph('MANTA (100)', kMANTA)
###print "kin100",kin100/abs(kin100)
###print (kin100/abs(kin100)).get_spherical()
###print "kA1",kA1/abs(kA1)
###print (kA1/abs(kA1)).get_spherical()
###print "kA2",kA2/abs(kA2)
###print (kA2/abs(kA2)).get_spherical()
###print "kMANTA",kMANTA/abs(kMANTA)
###print (kMANTA/abs(kMANTA)).get_spherical()
print
print "Bragg vector Q1"
print "kInput HHH = ", kin
print "kOutput HHH = ", kA1
print "Q HHH = ", kA1 - kin
print "Q * (532/2/np.pi) = ", (532/np.pi/2)*(kA1-kin)
print
print "Bragg vector Q2"
print "kInput HHH = ", kin
print "kOutput HHH = ", kA2
print "Q HHH = ", kA2 - kin
print "Q * (532/2/np.pi) = ", (532/np.pi/2)*(kA2-kin)
|
[
"pmd323@gmail.com"
] |
pmd323@gmail.com
|
9bd1564da82ff53bcd0503ae8247e966903cb1c8
|
b7384e1a893368abca9fb64153f61455fee01a11
|
/Ejercicios - Febrero3/tempCodeRunnerFile.py
|
0516452c30eef59e9872646d65e9e87706319ad9
|
[] |
no_license
|
AnaGVF/Programas-Procesamiento-Imagenes-OpenCV
|
43122bc10a64fdb5f3fc9054a388d0ffe65b7eb3
|
0fc9fa822ee7bef4402b9b7c4f2ef8f91676a53e
|
refs/heads/main
| 2023-05-30T20:14:41.164126
| 2021-06-12T18:38:16
| 2021-06-12T18:38:16
| 376,359,595
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26
|
py
|
# print(matrizAritmetica)
|
[
"aniavassallo@gmail.com"
] |
aniavassallo@gmail.com
|
27583ed8d12a927f919d0bf2846c7fb0f92bd05c
|
8cb1637a09bc704b7a83f18b1d6c013b0de06c65
|
/Tugas 3.3 (Metode Bagidua).py
|
a4dba0ee83ac3d8a90310d90fb7607ec6992e158
|
[] |
no_license
|
nurfiskah/Metode-Numerik
|
4db962b1b765ebc5c71c60f81df8653270434de9
|
f949be303ef6b96492d46d1b02804215d052d9d7
|
refs/heads/master
| 2021-01-09T11:38:26.677173
| 2020-05-28T09:35:33
| 2020-05-28T09:35:33
| 242,286,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
import math
def f(x):
return x**3 + 2*x**2 + 10*x - 20
a = 1
b = 1.5
e = 0.000001
N = 100
iterasi = 0
print('==================================')
print(' c f(c)')
print('==================================')
while True:
iterasi += 1
c = (a + b)/2
if f(a)*f(c) < 0:
b = c
else:
a = c
print('{:7.6f} \t {:+15.10f}'.format(c, f(c)))
if abs(f(c)) < e or iterasi >= N:
break
print('==================================')
|
[
"noreply@github.com"
] |
nurfiskah.noreply@github.com
|
ab2bdf880cc67f0a59eee1e21470ef04e6ea6b74
|
c8312ad2b4cb17b0b5f169d8871b99a4f80db4ce
|
/examples/train_mnist.py
|
946757422cc0561d881a41c9c18bb4739ec8c9a8
|
[] |
no_license
|
ysasaki6023/bibliotheca
|
c832fe7e99818ec0bb7507ef7a1ab83bf569c5df
|
3331f60838ffabaa4864b5ebb9b48a365d36402f
|
refs/heads/master
| 2021-08-23T04:56:08.721679
| 2017-12-03T12:54:33
| 2017-12-03T12:54:33
| 112,337,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,193
|
py
|
# -*- coding: utf-8 -*-
import os,sys,argparse
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
from chainer.functions.loss import softmax_cross_entropy
from chainer.functions.evaluation import accuracy
from chainer import reporter
# Network definition
class net(chainer.Chain):
def __init__(self, n_units, n_out):
super(net, self).__init__()
with self.init_scope():
# the size of the inputs to each layer will be inferred
self.l1 = L.Linear(None, n_units) # n_in -> n_units
self.l2 = L.Linear(None, n_units) # n_units -> n_units
self.l3 = L.Linear(None, n_out) # n_units -> n_out
def __call__(self, x):
h = x
h = F.relu(self.l1(h))
h = F.relu(self.l2(h))
h = self.l3(h)
return h
class Model(L.Classifier):
def __call__(self,x,t):
y = self.predictor(x)
loss = F.softmax_cross_entropy(x,t)
acc = accuracy.accuracy(x,t)
reporter.report({"accuracy": acc,"loss":loss}, self)
return loss
def main():
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=100,help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,help='Number of sweeps over the dataset to train')
parser.add_argument('--frequency', '-f', type=int, default=-1,help='Frequency of taking a snapshot')
parser.add_argument('--gpu', '-g', type=int, default=-1,help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='result',help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1000,help='Number of units')
parser.add_argument('--noplot', dest='plot', action='store_false',help='Disable PlotReport extension')
args = parser.parse_args()
print('GPU: {}'.format(args.gpu))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Set up a neural network to train
# Classifier reports softmax cross entropy loss and accuracy at every
# iteration, which will be used by the PrintReport extension below.
predictor = net(args.unit, 10)
#model = L.Classifier(predictor,lossfun=)
model = Model(predictor)
if args.gpu >= 0:
# Make a specified GPU current
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu() # Copy the model to the GPU
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist()
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
print(next(train_iter))
# Set up a trainer
updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
# Dump a computational graph from 'loss' variable at the first iteration
# The "main" refers to the target link of the "main" optimizer.
trainer.extend(extensions.dump_graph('main/loss'))
# Take a snapshot for each specified epoch
frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Save two plot images to the result dir
if args.plot and extensions.PlotReport.available():
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'))
trainer.extend(
extensions.PlotReport(
['main/accuracy', 'validation/main/accuracy'],
'epoch', file_name='accuracy.png'))
# Print selected entries of the log to stdout
# Here "main" refers to the target link of the "main" optimizer again, and
# "validation" refers to the default name of the Evaluator extension.
# Entries other than 'epoch' are reported by the Classifier link, called by
# either the updater or the evaluator.
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
# Print a progress bar to stdout
trainer.extend(extensions.ProgressBar())
if args.resume:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
if __name__=="__main__":
main()
|
[
"ysasaki6023@gmail.com"
] |
ysasaki6023@gmail.com
|
8a3c3cdb649c3cd6f8a31a3906227e7678f6a976
|
ce2af0d270a9c07a9f3825d6af16c5bf518c2553
|
/datasets/dataset_factory.py
|
1b6afd10f9ca5b1bd9320fd78039b5f580632c6e
|
[] |
no_license
|
PeterWang1986/radar
|
7695a0687c9d1199b0a8c135e0ef5890647860cd
|
42e968fae76abc38daef4c2529826f20d7adb0dd
|
refs/heads/master
| 2020-04-28T21:19:15.949534
| 2019-04-10T14:52:01
| 2019-04-10T14:52:01
| 175,578,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datasets import shtech_dataset
datasets_map = {
'shtech_part_A': shtech_dataset,
'shtech_part_B': shtech_dataset
}
def get_dataset(name, dataset_dir, FLAGS):
if name not in datasets_map:
raise ValueError('currently NOT support dataset name: %s' % name)
return datasets_map[name].get_dataset(dataset_dir, FLAGS)
|
[
"peng.wang@weimob.com"
] |
peng.wang@weimob.com
|
8601c293232fb517990f8f5f8780e5bdee340477
|
09ba03345c1118b3bb0ab971a13a5561a32a1441
|
/filter_plugins/custom.py
|
e033afd6ae40354908bfa27a210aec2a656e42eb
|
[] |
no_license
|
henryshue/es_playbook
|
a43dbb2eece830108ddf06eac2bc213795551e69
|
4a5f1523e31cd63fb891f54be8d1351b3682721c
|
refs/heads/master
| 2020-03-31T11:24:17.918735
| 2018-10-16T02:12:39
| 2018-10-16T02:12:39
| 152,175,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
__author__ = 'henryshue'
import re
import os.path
from six import string_types
def append_to_list(values=[], suffix=''):
if isinstance(values, string_types):
values = values.split(',')
return [str(value+suffix) for value in values]
def array_to_str(values=[],separator=','):
return separator.join(values)
class FilterModule(object):
def filters(self):
return {'append_to_list':append_to_list,
'array_to_str':array_to_str}
|
[
"henryshue@163.com"
] |
henryshue@163.com
|
106ad1dc828dbe88883b891e1fe869c1d0139ec7
|
ccc050265da18c7318443ee0b88a6810b69b318d
|
/example/vol9/9.11-9.12/admin2.py
|
250f86aafee2c3940911a2c59347bf8d77a1f6de
|
[] |
no_license
|
563213341/git-example
|
d1bdaf45cb58c93a4f7cb0fc2796303961479672
|
68ca17c0834a7a183a9fbac528371b95a7a945c3
|
refs/heads/master
| 2020-05-28T10:17:22.538465
| 2019-05-28T06:24:03
| 2019-05-28T06:24:03
| 188,967,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
from admin1 import Us
#----------------------------------------------------------------------------
class Pri():
def __init__(self,pri=['can add post','can del post']):
self.pri=pri
def showpri(self):
print('管理员的权限是'+self.pri[0]+'和'+self.pri[1])
#--------------------------------------------------------------------------
class Admin(Us):
def __init__(self,firstname,lastname,userage,usercountry):
super().__init__(firstname,lastname,userage,usercountry)
self.pri=Pri()
|
[
"1033470717@qq.com"
] |
1033470717@qq.com
|
b1622a3fa6408cd97be037a5618fe1655d8bafd3
|
48e9c4cf96689e2caaf1ba69a0eaa4377f8e60d6
|
/com/dxm/normal_tool/gzip_util.py
|
1e33905fa1fc4768d2314accc01352288f89a9a3
|
[] |
no_license
|
charlie93321/youget
|
f764d7cdf4f635d8abde0d4f1dc8e713e528efef
|
96e6b13a006ae1e693f3e9b78f94212726e8673a
|
refs/heads/master
| 2021-05-18T02:25:33.370780
| 2020-11-07T10:19:32
| 2020-11-07T10:19:32
| 251,064,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
import gzip
from com.dxm.normal_tool.str_util import is_empty
def gzip_decode(str1:str):
if is_empty(str1):
return '请输入非空字符串!!!'
else:
return gzip.decompress(str1.encode("utf-8")).decode("utf-8")
|
[
"2459060612@qq.com"
] |
2459060612@qq.com
|
dc243ef0e49fa3ff8a861f221d8d50665be49a2d
|
e3355456512fe013878e357916f8ebde35fe1505
|
/chat/routing.py
|
d1253516037c58f4123da0b1bb0e3a9d82f94069
|
[] |
no_license
|
alaalqadi/e-university-phi
|
84e87a5e7ae550d9d17707639444e720d0f9443d
|
9223a739ab954bd004a24784af09ab75551af309
|
refs/heads/master
| 2022-04-28T05:47:48.748226
| 2020-01-22T11:09:39
| 2020-01-22T11:09:39
| 231,812,008
| 0
| 0
| null | 2022-04-22T22:57:49
| 2020-01-04T18:46:43
|
CSS
|
UTF-8
|
Python
| false
| false
| 186
|
py
|
# chat/routing.py
from django.urls import re_path
from chat.consumer import ChatConsumer
websocket_urlpatterns = [
re_path(r'ws/session_view/(?P<room_name>\w+)/$', ChatConsumer),
]
|
[
"a.alqadi@sit-mena.com"
] |
a.alqadi@sit-mena.com
|
a4ce0a8a070a51c2527899c48bfd7575035d5284
|
f47dad4711552a97f4da6fc47c6c628efe3fa28a
|
/blog/views.py
|
3a86a1d9b7b00127a91ac36dd5e3948c16a0d63c
|
[
"MIT"
] |
permissive
|
Haw2K/my-first-blog
|
e683c3ad294f63953ace6ca98c6ad6d80128ec2c
|
8a4d92552548adc36832cd65f30b24fa41725574
|
refs/heads/master
| 2021-09-12T22:05:02.986779
| 2018-04-21T11:16:03
| 2018-04-21T11:16:03
| 125,200,750
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,213
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from .models import Post
from .forms import PostForm
from django.shortcuts import redirect
from .models import InstabotDjangoModel
from .forms import PostInstabotDjangoModel
import os
import time
from .src import InstaBot
def post_list(request):
#posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
posts = InstabotDjangoModel.objects.all().order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(InstabotDjangoModel, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_new(request):
# if request.method == "POST":
# form = PostInstabotDjangoModel(request.POST)
# if form.is_valid():
# post = form.save(commit=False)
# post.author = request.user
# post.published_date = timezone.now()
# post.save()
# return redirect('post_detail', pk=post.pk)
# else:
# form = PostInstabotDjangoModel()
# form = PostInstabotDjangoModel()
# return render(request, 'blog/post_edit.html', {'form': form})
bot = InstaBot(
login="shotaowl",
password="Danil5891",
like_per_day=1000,
comments_per_day=0,
tag_list=['краснаяполяна', 'газпромлаура', 'сочи', 'совариум', 'sochi', 'krasnaypolyna', 'sovarium',
'фотографсочи'],
tag_blacklist=['rain', 'thunderstorm'],
user_blacklist={},
max_like_for_one_tag=50,
follow_per_day=300,
follow_time=8 * 60,
unfollow_per_day=300,
unfollow_break_min=15,
unfollow_break_max=30,
log_mod=0,
proxy='',
# List of list of words, each of which will be used to generate comment
# For example: "This shot feels wow!"
comment_list=[["this", "the", "your"],
["photo", "picture", "pic", "shot", "snapshot"],
["is", "looks", "feels", "is really"],
["great", "super", "good", "very good", "good", "wow",
"WOW", "cool", "GREAT", "magnificent", "magical",
"very cool", "stylish", "beautiful", "so beautiful",
"so stylish", "so professional", "lovely",
"so lovely", "very lovely", "glorious", "so glorious",
"very glorious", "adorable", "excellent", "amazing"],
[".", "..", "...", "!", "!!", "!!!"]],
# Use unwanted_username_list to block usernames containing a string
## Will do partial matches; i.e. 'mozart' will block 'legend_mozart'
### 'free_followers' will be blocked because it contains 'free'
unwanted_username_list=[
'second', 'stuff', 'art', 'project', 'love', 'life', 'food', 'blog',
'free', 'keren', 'photo', 'graphy', 'indo', 'travel', 'art', 'shop',
'store', 'sex', 'toko', 'jual', 'online', 'murah', 'jam', 'kaos',
'case', 'baju', 'fashion', 'corp', 'tas', 'butik', 'grosir', 'karpet',
'sosis', 'salon', 'skin', 'care', 'cloth', 'tech', 'rental', 'kamera',
'beauty', 'express', 'kredit', 'collection', 'impor', 'preloved',
'follow', 'follower', 'gain', '.id', '_id', 'bags'
],
unfollow_whitelist=['example_user_1', 'example_user_2'])
while True:
bot.new_auto_mod()
def post_edit(request, pk):
post = get_object_or_404(InstabotDjangoModel, pk=pk)
if request.method == "POST":
form = PostInstabotDjangoModel(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostInstabotDjangoModel(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
|
[
"haw22k@gmail.com"
] |
haw22k@gmail.com
|
9c673287d86449ff60e55ac9556ea1559adaf8f3
|
bfa00115a57f87a1cafce9c54fcff3cf659550db
|
/manualbook/project/models.py
|
dd812f3219cfa5ee4a7b1acf9a26f813a614e0a8
|
[] |
no_license
|
maligitcode/mbpd
|
ac7b5f23e20f68639ebd7c23c548d63c8fc9bbb2
|
2a1bd00d5554d5cd927e94b7617b939056959ce0
|
refs/heads/master
| 2023-09-03T01:24:40.180025
| 2021-11-14T11:51:30
| 2021-11-14T11:51:30
| 370,704,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 853
|
py
|
from category.models import Category
from django.db import models
class Project(models.Model):
Category = models.ForeignKey(Category,related_name="Project",on_delete=models.CASCADE)
title = models.CharField(max_length=225)
progress = models.IntegerField(default=0)
date_updated = models.DateTimeField(auto_now_add=True)
class Meta:
db_table="project"
def __str__(self):
return self.title
class Projectdocument(models.Model):
Project = models.ForeignKey(Project,related_name="Document",on_delete=models.CASCADE)
title = models.CharField(max_length=225)
file = models.FileField(upload_to='documents/',null=True, blank=True)
date_upload = models.DateTimeField(auto_now_add=True)
class Meta:
db_table="document"
def __str__(self):
return self.title
|
[
"aliofficial.net@gmail.com"
] |
aliofficial.net@gmail.com
|
651fd06bcc7624e39f6af72889aff0f75d28c22f
|
f3a4d3799fc317d60130d1a4fba8aebc6915f112
|
/day 17/iq_size.py
|
a8b01143735cdb6f10559634391b5af4b851f148
|
[] |
no_license
|
prajjawal98/FSDK2019
|
aaf1a06c417b5ac896209ecb6d3edbc97b978199
|
44e78a3f48eacbdb52163484163a3f8759304ab3
|
refs/heads/master
| 2020-05-24T08:56:34.441693
| 2019-10-25T13:55:04
| 2019-10-25T13:55:04
| 187,194,361
| 0
| 1
| null | 2019-10-25T14:12:19
| 2019-05-17T10:10:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,918
|
py
|
"""
Q. (Create a program that fulfills the following specification.)
iq_size.csv
Are a person's brain size and body size (Height and weight) predictive of his or her intelligence?
Import the iq_size.csv file
It Contains the details of 38 students, where
Column 1: The intelligence (PIQ) of students
Column 2: The brain size (MRI) of students (given as count/10,000).
Column 3: The height (Height) of students (inches)
Column 4: The weight (Weight) of student (pounds)
What is the IQ of an individual with a given brain size of 90, height of 70 inches, and weight 150 pounds ?
Build an optimal model and conclude which is more useful in predicting intelligence Height, Weight or brain size.
"""
import matplotlib.pyplot as plt
import pandas as pd
#imports the CSV dataset using pandas
dataset = pd.read_csv('iq_size.csv')
print(dataset)
features=dataset.iloc[:,1:4].values
labels=dataset.iloc[:,0:1].values
import statsmodels.api as sm
features = sm.add_constant(features)
features_opt = features[:,:4]
regressor_OLS = sm.OLS(endog = labels, exog = features_opt).fit()
regressor_OLS.summary()
features_opt = features[:,:3]
regressor_OLS = sm.OLS(endog = labels, exog = features_opt).fit()
regressor_OLS.summary()
features_opt = features[:,1:3]
regressor_OLS = sm.OLS(endog = labels, exog = features_opt).fit()
regressor_OLS.summary()
features_opt = features[:,1:2]
regressor_OLS = sm.OLS(endog = labels, exog = features_opt).fit()
regressor_OLS.summary()
from sklearn.preprocessing import PolynomialFeatures
poly_object = PolynomialFeatures(degree = 5)
features_poly = poly_object.fit_transform(features)
from sklearn.linear_model import LinearRegression
lin_reg_2 = LinearRegression()
lin_reg_2.fit(features_poly, labels)
import numpy as np
x = np.array([90,65,117])
x=x.reshape(1,-1)
print( lin_reg_2.predict(poly_object.transform(x)))
|
[
"prajjawalkansal1218@gmail.com"
] |
prajjawalkansal1218@gmail.com
|
1a1ecb11837480581dbf17ba1ae556b649d9bf22
|
1c068a12bc46ef5d32a2c2c1a908e82cf9beb0f3
|
/PyHello.py
|
4972c4f1861d65fcbbcb9daa831e5a47e7655812
|
[] |
no_license
|
TTheHolyOne/Hello-World
|
0a71749c29d29db42867c2cab97c9f2cc8b7e0d9
|
104626c4319c02de5c4cd14c6a4d1a09537fc06c
|
refs/heads/master
| 2023-05-08T22:55:16.158415
| 2021-05-29T00:23:49
| 2021-05-29T00:23:49
| 371,834,402
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29
|
py
|
print("Hello World!")
input()
|
[
"69742571+TTheHolyOne@users.noreply.github.com"
] |
69742571+TTheHolyOne@users.noreply.github.com
|
885863d55d2dd902e260f398635abc158b9b77e7
|
79d336c562a2da7a9a00ca6b73e3a7480055c981
|
/parse.py
|
e44e796dbb847ad70c7f2cd31eb5acc14696831d
|
[] |
no_license
|
acekingke/foxbase_inCloud
|
5da2f5c367dc0dfb3eeac0322191506fd9eb1b94
|
ac6a10210732fabb086e7b8b41759fc4eeff8c2d
|
refs/heads/master
| 2021-01-09T06:54:58.881032
| 2019-01-06T08:19:50
| 2019-01-06T08:19:50
| 60,998,702
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,302
|
py
|
#-*_ coding:utf-8 -*-
__author__ = 'kyc'
from lex import lg
from rply import ParserGenerator
import fox_ast as ast
from err import *
pg = ParserGenerator([i.name for i in lg.rules],
precedence=[("right", ["OR"]),("right",["AND"]),("right",["NOT"]),
("left",["GT","GE","LT","LE"]),
("left", ['PLUS', 'MINUS']),("left",["MUL","DIV","MOD"]) ,
("right",["UMINUS"]),("left",["POWER"])
], cache_id="myparser")
@pg.production("prog : block_cmd")
def main(p):
return p[0]
@pg.production("block_cmd : cmd block_cmd")
def block_cmd_many(p):
return ast.Box_cmd_block(p[0], p[1].cmd_list)
@pg.production("block_cmd : cmd ")
def block_cmd_one(p):
return ast.Box_cmd_block(p[0], list())
@pg.production("cmd : expr")
@pg.production("cmd : assign_cmd")
@pg.production("cmd : print_cmd")
@pg.production("cmd : if_cmd")
@pg.production("cmd : docase_cmd")
@pg.production("cmd : do_while_cmd")
@pg.production("cmd : exit_cmd")
@pg.production("cmd : loop_cmd")
@pg.production("cmd : for_cmd")
@pg.production("cmd : do_cmd")
@pg.production("cmd : accept_cmd")
# for while docmd
def cmd(p):
return p[0]
@pg.production("loop_cmd : LOOP")
@pg.production("exit_cmd : EXIT")
def loop_or_exit(p):
if p[0].name == 'EXIT':
return ast.Box_exit_cmd()
elif p[0].name == 'LOOP':
return ast.Box_loop_cmd()
@pg.production("expr : NUMBER")
@pg.production("expr : DATE")
@pg.production("expr : TrueValue")
@pg.production("expr : FalseValue")
@pg.production("expr : STRING")
@pg.production("expr : IDENTIFIER")
#func_cmd
@pg.production("expr : func_cmd")
def expression_number(p):
if p[0].name == 'NUMBER':
if "." in p[0].getstr():
return ast.Box_expr(float(p[0].getstr()), "NUMBER", "FLOAT")
else :
return ast.Box_expr(int(p[0].getstr()), "NUMBER", "INT")
elif p[0].name == "DATE":
return ast.Box_expr(p[0].getstr(), "DATE")
elif p[0].name == "TrueValue" or p[0].name == 'FalseValue':
return ast.Box_expr(p[0].getstr(), "LOGIC")
elif p[0].name == "STRING":
return ast.Box_expr(p[0].getstr(), "STRING")
elif p[0].name == 'IDENTIFIER':
return ast.get_variable(p[0].getstr())
elif p[0].name == "FUNCTION":
return p[0]
else:
raise ParserError("type error")
@pg.production("expr : expr PLUS expr")
@pg.production("expr : expr MINUS expr")
@pg.production("expr : expr DIV expr")
@pg.production("expr : expr MUL expr")
@pg.production("expr : expr MOD expr")
@pg.production("expr : expr POWER expr")
def expression_op(p):
op = p[1].name
left = p[0]
right = p[2]
return ast.Box_op(op, left, right)
# 负号处理
@pg.production("expr : MINUS expr",precedence='UMINUS')
def expression_op2(p):
op = "UMINUS"
return ast.Box_op(op, p[1], None)
# relation op
# contain
@pg.production("expr : expr GT expr")
@pg.production("expr : expr LT expr")
@pg.production("expr : expr LE expr")
@pg.production("expr : expr GE expr")
@pg.production("expr : expr CONTAIN expr")
@pg.production("expr : expr EQ expr")
@pg.production("expr : expr NE expr")
def expression_relation_op(p):
op = p[1].name
left = p[0]
right = p[2]
return ast.Box_relop( op, left, right)
# logic op
@pg.production("expr : expr AND expr")
@pg.production("expr : expr OR expr")
@pg.production("expr : NOT expr")
def expression_logic_op(p):
if len(p) == 2: #is not
op = p[0].name
left = p[1]
return ast.Box_logic_expr(op, left, None )
else : # is and or
op = p[1].name
left = p[0]
right = p[2]
return ast.Box_logic_expr(op, left, right)
@pg.production("expr : LPAREN expr RPAREN")
def p_expression_group(p):
return p[1]
# assign cmd
@pg.production("assign_cmd : IDENTIFIER EQU expr")
def assign_cmd(p):
varname = p[0].getstr()
r = None
if not ast.get_variable(varname):
r = ast.new_variable(varname, "global")
else:
r = ast.get_variable(varname)
r.set_expr(p[2])
return ast.Box_assign_cmd(r, p[2])
# print cmd
@pg.production("print_cmd : QPUT expr")
def print_cmd(p):
return ast.Box_print_cmd( p[1])
# if
@pg.production("if_cmd : IF expr block_cmd ENDIF")
def if_cmd1(p):
return ast.Box_if_cmd(p[1], p[2], None)
@pg.production("if_cmd : IF expr block_cmd ELSE block_cmd ENDIF")
def if_cmd4(p):
return ast.Box_if_cmd(p[1], p[2], p[4])
#todo: do cmd
@pg.production("do_cmd : DO FILE_NAME")
def do_cmd(p):
return ast.Box_do_cmd(p[1].getstr())
@pg.production("docase_cmd : DO CASE case_list OTHERWISE block_cmd ENDCASE")
def docase_cmd(p):
return ast.Box_do_case(p[2], p[4])
@pg.production("docase_cmd : DO CASE case_list ENDCASE")
def docase_cmd2(p):
return ast.Box_do_case(p[2],None)
@pg.production("case_list : CASE expr block_cmd ")
def case_list_one(p):
return ast.Box_case_list((p[1], p[2]), [])
@pg.production("case_list : CASE expr block_cmd case_list")
def case_list_many(p):
return ast.Box_case_list((p[1], p[2]), p[3].case_list)
#do while
#
#DO WHILE lExpression
# Commands
# [LOOP]
# [EXIT]
# ENDDO
@pg.production("do_while_cmd : DO WHILE expr block_cmd ENDDO")
def do_while(p):
return ast.Box_while_cmd(p[2], p[3])
# for
#FOR VarName = nInitialValue TO nFinalValue [STEP nIncrement]
# Commands
# [EXIT]
# [LOOP]
#ENDFOR | NEXT
@pg.production("for_cmd : FOR assign_cmd TO expr block_cmd ENDFOR")
@pg.production("for_cmd : FOR assign_cmd TO expr block_cmd NEXT")
@pg.production("for_cmd : FOR assign_cmd TO expr STEP expr block_cmd NEXT")
@pg.production("for_cmd : FOR assign_cmd TO expr STEP expr block_cmd ENDFOR")
def do_for_cmd(p):
initval = p[1]
finalval = p[3]
cmd = None
step = None
if len(p) == 6:
cmd = p[4]
elif len(p) == 8:
step = p[5]
cmd = p[6]
return ast.Box_for_cmd( initval, finalval, step, cmd)
#todo: procedure
# function
@pg.production("func_cmd : IDENTIFIER LPAREN arg_list RPAREN ")
def do_func_cmd(p):
return ast.Box_func_cmd(p[0].getstr(), p[2])
@pg.production("arg_list : arg_list COMMA expr")
def arg_list(p):
p[0].add(p[2])
return p[0]
@pg.production("arg_list : expr")
@pg.production("arg_list : none")
def arg_list_none(p):
#return []
return ast.Box_arg_list(p[0] and [p[0]])
#FUNCTION FunctionName
# [ LPARAMETERS parameter1 [ ,parameter2 ] , ... ]
# Commands
# [ RETURN [ eExpression ] ]
#[ENDFUNC]
@pg.production("accept_cmd : ACCEPT accept_item accept_lst")
def accept_cmd(p):
return ast.Box_accept_cmd(p[1], p[2])
@pg.production("accept_lst : COMMA accept_item accept_lst")
@pg.production("accept_lst : none")
def accept_lst(p):
if len(p) == 1:
return ast.Box_accept_item_list(None, [])
else:
return ast.Box_accept_item_list(p[1], p[2].item_list)
@pg.production("accept_item : STRING TO IDENTIFIER")
def accept_item(p):
return ast.Box_accept_item(p[0].getstr(), p[2].getstr())
# just for "none"
@pg.production("none : ")
def do_none(p):
return None
@pg.error
def error_handler(token):
raise ValueError("Ran into a %s where it wasn't expected, at line %d, col %d" % (token.gettokentype(),token.source_pos.lineno, token.source_pos.colno))
parser = pg.build()
|
[
"aceking.ke@gmail.com"
] |
aceking.ke@gmail.com
|
9859d3336fc0a1dc7ac7fa48303de97166685c27
|
480a6db59473a0f5bf5aadd53a193eccb1ae0840
|
/rev2.py
|
81381ad3a747ac49fe2e5ce9ced4d300fca3222d
|
[] |
no_license
|
EsaikaniL/GREK
|
ff89e92995f68c449c841f5ea1d2ac74a8911cd1
|
5587e02e603a15fc208f97d16fa68c441b64519a
|
refs/heads/master
| 2020-04-15T01:45:02.074581
| 2019-06-17T12:12:29
| 2019-06-17T12:12:29
| 164,290,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49
|
py
|
//esaikani
a=str(input())
print()
print(a[::-1])
|
[
"noreply@github.com"
] |
EsaikaniL.noreply@github.com
|
230cead36af92854085213848dd8db024bb990db
|
3030b17925bc10945baea83207879a186d8a5f9a
|
/check_for_patient.py
|
4ac2202c512c0d59ca7c417b3eb69f3952939c00
|
[] |
no_license
|
hermespara/internship1
|
5459875e63f6ffa9a3492e86d64f8c8ddef03296
|
9264d235af7028889c079cfb8f4ed810c173a736
|
refs/heads/master
| 2020-03-10T13:59:02.453995
| 2018-07-20T14:26:52
| 2018-07-20T14:26:52
| 129,414,048
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,541
|
py
|
#!/usr/bin/python3.6
import csv
import re
lymphocytes_path = "/home/hermesparaqindes/Bureau/internship1/data_with_lympho"
lymphocytes_data = open(lymphocytes_path, "r")
all_lymphocytes_data = list(csv.reader(lymphocytes_data, delimiter='\t'))
sample_id = []
type_tissue = []
l = []
for row in all_lymphocytes_data:
sample_id.append(row[1])
type_tissue.append(row[3])
reg = r'^([\w]+-[\w]+)'
dicto_for_id_tissue = dict(zip(sample_id, type_tissue))
new_dict_id_tissue = {}
for ind_id, tis in dicto_for_id_tissue.items():
#short_ID = ind_id[:12]
#print(ind_id)
short_ID = "-".join(ind_id.split("-",2)[:2])
#print(short_ID)
if short_ID not in new_dict_id_tissue:
new_dict_id_tissue[short_ID]= [tis]
else:
new_dict_id_tissue[short_ID].append(tis)
#print(new_dict_id_tissue)
#for line in all_lymphocytes_data:
# print(line)
important_patient = ['lymphocytes', 'Skin']
important_patient1 = ' lymphocytes'
important_patient2 = ' Not Sun'
important_patient3 = 'Skin'
#print(new_dict)
potential_patient = []
for new, new_ in new_dict_id_tissue.items():
#print(new_)
if important_patient2 in str(new_) and important_patient1 in str(new_): #or important_patient3 in str(new_):
#print(new, new_)
potential_patient.append(new)
#if any(x in important_patient for x in str(new_)) == False:
#print(new, new_)
#else:
#pass
#print(new)
#print(potential_patient)
#print(len(potential_patient))
for ligne in all_lymphocytes_data:
for field in ligne:
for potential in potential_patient:
if potential in field:
print(ligne[0], '\t', ligne[1], '\t', ligne[2], '\t', ligne[3], '\t' ,ligne[4], '\t' ,ligne[5], '\t' ,ligne[6], '\t' ,ligne[7], '\t' , ligne[8], '\t', ligne[9])
'''
class Patient:
def __init__(self, row, header):
self.__dict__ = dict(zip(header,row))
def __str__(self):
return str(self.__dict__)
lymphocytes_path = "/home/hermesparaqindes/Bureau/dbGaP-13871/files/phs000424.v7.pht002743.v7.p2.c1.GTEx_Sample_Attributes.GRU.txt/data_with_lympho"
lymphocytes_data = open(lymphocytes_path, "r")
all_lymphocytes_data = list(csv.reader(lymphocytes_data, delimiter='\t'))
print(all_lymphocytes_data[0])
patient_instance = [Patient(i, all_lymphocytes_data[0]) for i in all_lymphocytes_data[1:]]
for patient in patient_instance:
#print(patient.SUBJID)
if 'lymphocytes' in patient.SMTSD:
print(patient.SUBJID, patient.SMTS)
'''
|
[
"hermes.paraqindes@etu.univ-lyon1.fr"
] |
hermes.paraqindes@etu.univ-lyon1.fr
|
ebb96eec080091e005df22076ceb5a03516b5897
|
0858b595708746fc18a3aa12d636748eb39c4c62
|
/mbaidu.py
|
397a5d662a9d7fe8104db95f0f1aedca59b9909b
|
[] |
no_license
|
zanjs/selenium-demo-python
|
cf04abd858d2da55b619c435f0d6e0ffc3780e1c
|
7fa7b95e435befac0b1f68333c5fe841f858211f
|
refs/heads/master
| 2020-09-19T18:17:52.009640
| 2017-06-20T07:16:08
| 2017-06-20T07:16:08
| 94,495,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
# -*- coding: utf-8 -*-
from selenium import webdriver
# from time import sleep
import time
import os.path
dir = os.path.dirname(os.path.abspath('.'))
chrome_driver_path = dir + '/tools/chromedriver.exe'
# driver = webdriver.Chrome() # 打开火狐浏览器
driver = webdriver.Chrome()
driver.get('http://www.baidu.com') # 打开百度界面
driver.find_element_by_id('kw').send_keys('zanjs') # 在搜索框内输入想要搜索内容
time.sleep(2) # 浏览器加载需要时间
driver.find_element_by_id('su').click() # 搜索完成
# mobileEmulation = {'deviceName': 'Apple iPhone 4'}
# options = webdriver.ChromeOptions()
# options.add_experimental_option('mobileEmulation', mobileEmulation)
#
# driver = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=options)
#
# driver.get('http://m.baidu.com')
#
# sleep(3)
driver.close()
|
[
"root@zanjs.com"
] |
root@zanjs.com
|
18cea130a676303c0f585f6fa2f7903698039649
|
088d8ad3f707b4c5f1d0d0c1603d7284314992c9
|
/day-2/python-shebang.py
|
6e7e40256072f31e8daf3dd1101a1790063c77cd
|
[] |
no_license
|
acorg/2018-cambridge-python-course
|
a9c608969325a2496c35aa13a4c7682fd6130b74
|
3ff24d7d0380fcc7d6d5ced41b02d67d54bdb807
|
refs/heads/master
| 2020-03-14T08:19:38.537855
| 2018-05-21T17:23:49
| 2018-05-21T17:23:49
| 131,522,604
| 1
| 4
| null | 2018-05-08T10:50:11
| 2018-04-29T19:17:56
|
Python
|
UTF-8
|
Python
| false
| false
| 39
|
py
|
#!/usr/bin/env python3
print('hello')
|
[
"terry@jon.es"
] |
terry@jon.es
|
8d337c3374fa5e20ce50e0bd239902655ca513dc
|
e262e64415335060868e9f7f73ab8701e3be2f7b
|
/.history/demo1_20201107182822.py
|
1eb9150ebe9f80c0b8ddac8b3ba09dd5744c1cdf
|
[] |
no_license
|
Allison001/developer_test
|
6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63
|
b8e04b4b248b0c10a35e93128a5323165990052c
|
refs/heads/master
| 2023-06-18T08:46:40.202383
| 2021-07-23T03:31:54
| 2021-07-23T03:31:54
| 322,807,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
# import sys
# import yaml
# from demo import search
# from demo import a
# from demo import search1
# search("A")
# print(a)
# search1(12)
# print(dir())
# print(sys.path)
# name = "zhang"
# age = 12
# print("my name is:{0},my age is:{1}".format(name,age))
# list1 = [1,3,2]
# dic1 = {"a":1,"b":2}
# print("List is :{},Dic is :{}".format(list1,dic1))
# print("list id :%s,Dic is :%s" %(list1,dic1))
# list = ["happy",'nause','doctor']
# print("my job is:%s,%s,%s" %(list[0],list[1],list[2]))
# print("my job is:{},{},{}".format(*list))
# dic1 = {"name":"tom","age":12}
# print("my name is:{name},my age is: {age}".format(**dic1))
# print("my name is:%s,my age is:%d" %(dic1["name"],dic1["age"]))
name = "allison"
age = 23
list1 = [1,2,3,]
dic1 = {"name":1,"a":"b"}
print(f"my name is {name},my age is {age},my list is:{list1[0]},my dic is:{dic1[0]}")
|
[
"zhangyingxbba@gmail.com"
] |
zhangyingxbba@gmail.com
|
030b67abd935004fc22bfa69136d8108dd56fb74
|
2b075d245f29814587f1ccd9f135121bf64393ee
|
/tests/test_aiomongo.py
|
fb0c6686f6d1e020a5877eed06ffe7af718a7f93
|
[] |
no_license
|
judy2k/aiomongo
|
d1dad601bb1d6fdecc6fc8d22de69563027be1c9
|
a11b84ad3707bc2004b393fdb54defd199480178
|
refs/heads/master
| 2023-01-03T13:08:16.674184
| 2020-10-27T10:43:06
| 2020-10-27T10:43:06
| 307,435,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 70
|
py
|
import aiomongo
def test_main():
assert aiomongo.main() is None
|
[
"judy@judy.co.uk"
] |
judy@judy.co.uk
|
d28ea03afe7e024f5d9ba019ca199290f18fb353
|
e60af01af329ef4f3882a68dcfb10577ebe230d6
|
/serial/serialTest.py
|
619118eeb74fdecb4df32c78d97a5ae8a397fadc
|
[] |
no_license
|
theunkn0wn1/Prometheus
|
bb26693e25c64c4042158b68c07ea9a7dc199248
|
fe611c1a7003c7c59f345d3adbb7e27508c2dcaf
|
refs/heads/master
| 2021-01-19T00:05:26.873641
| 2017-04-04T22:59:35
| 2017-04-04T22:59:35
| 87,143,683
| 0
| 0
| null | 2017-04-04T23:00:26
| 2017-04-04T03:19:10
| null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
#####
# Author: TheUnkn0wn1
# Function: Establish Serial comms between Pi and RoboClaw
###
import serial #Common pySerial library
import RPi.GPIO as gpio #renaming because I hate caps...
port = "/dev/ttyAMA0"
tx = 8 #Red wire
rx = 10 #purple
try:
ser = serial.Serial(port,38400,timeout=5)
except Exception as error:
print("An error occured executing the serial connection attempt")
print(type(error))
print(error)
|
[
"thhunkn0wnd@gmail.com"
] |
thhunkn0wnd@gmail.com
|
77f7744df99a9870ef74e9699379b0c709b4c335
|
d06ff0f1afee67e993739c916fe2ffe0aeba4fde
|
/helpers.py
|
876b3e8467ac9be62de5bb7307270a6691d4b2b4
|
[] |
no_license
|
mjurkus/ai_bootcamp_capstone
|
ba23fa56d63e054cdb5d928073a37f3a84103f7c
|
dfb319e31f460fc86722dfd7cb986ad4c09afe1a
|
refs/heads/master
| 2022-07-09T04:25:16.154882
| 2020-11-14T09:10:30
| 2020-11-14T09:10:30
| 243,940,793
| 0
| 0
| null | 2022-06-22T01:20:21
| 2020-02-29T09:33:14
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,672
|
py
|
import cv2
import tensorflow as tf
from utils import *
IMAGE_FEATURE_MAP = {
"image/encoded": tf.io.FixedLenFeature([], tf.string),
"image/object/bbox/xmin": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/ymin": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/xmax": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/ymax": tf.io.VarLenFeature(tf.float32),
"image/object/class/text": tf.io.VarLenFeature(tf.string),
}
def parse_record(record, class_table, size):
x = tf.io.parse_single_example(record, IMAGE_FEATURE_MAP)
x_train = tf.image.decode_jpeg(x["image/encoded"], channels=3)
x_train = tf.image.resize(x_train, (size, size))
class_text = tf.sparse.to_dense(x["image/object/class/text"], default_value="")
labels = tf.cast(class_table.lookup(class_text), tf.float32)
y_train = tf.stack(
[
tf.sparse.to_dense(x["image/object/bbox/xmin"]),
tf.sparse.to_dense(x["image/object/bbox/ymin"]),
tf.sparse.to_dense(x["image/object/bbox/xmax"]),
tf.sparse.to_dense(x["image/object/bbox/ymax"]),
labels,
],
axis=1,
)
max_boxes = 10 # change this
paddings = [[0, max_boxes - tf.shape(y_train)[0]], [0, 0]]
y_train = tf.pad(y_train, paddings)
return x_train, y_train
def load_dataset(file_pattern, class_file, size):
LN = -1
class_table = tf.lookup.StaticHashTable(
tf.lookup.TextFileInitializer(
class_file, tf.string, 0, tf.int64, LN, delimiter="\n"
),
-1,
)
files = tf.data.Dataset.list_files(file_pattern)
dataset = files.flat_map(tf.data.TFRecordDataset)
return dataset.map(lambda record: parse_record(record, class_table, size))
def draw_bbx(img, outputs, class_names):
boxes, objectness, classes, nums = outputs
boxes, objectness, classes, nums = boxes[0], objectness[0], classes[0], nums[0]
wh = np.flip(img.shape[0:2])
for i in range(nums):
x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
img = cv2.putText(
img,
"{} {:.4f}".format(class_names[int(classes[i])], objectness[i]),
x1y1,
cv2.FONT_HERSHEY_COMPLEX_SMALL,
1,
(0, 0, 255),
2,
)
return img
def yolo_boxes(pred, anchors, n_classes):
# pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
grid_size = tf.shape(pred)[1]
box_xy, box_wh, objectness, class_probs = tf.split(
pred, (2, 2, 1, n_classes), axis=-1
)
box_xy = tf.sigmoid(box_xy)
objectness = tf.sigmoid(objectness)
class_probs = tf.sigmoid(class_probs)
pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss
# !!! grid[x][y] == (y, x)
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, tf.float32)) / tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, objectness, class_probs, pred_box
def yolo_nms(outputs):
b, c, t = [], [], []
for o in outputs:
b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))
c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))
t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))
bbox = tf.concat(b, axis=1)
confidence = tf.concat(c, axis=1)
class_probs = tf.concat(t, axis=1)
scores = confidence * class_probs
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),
scores=tf.reshape(scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),
max_output_size_per_class=5,
max_total_size=5,
iou_threshold=0.5,
score_threshold=0.4,
)
return boxes, scores, classes, valid_detections
def broadcast_iou(box_1, box_2):
# box_1: (..., (x1, y1, x2, y2))
# box_2: (N, (x1, y1, x2, y2))
# broadcast boxes
box_1 = tf.expand_dims(box_1, -2)
box_2 = tf.expand_dims(box_2, 0)
# new_shape: (..., N, (x1, y1, x2, y2))
new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2))
box_1 = tf.broadcast_to(box_1, new_shape)
box_2 = tf.broadcast_to(box_2, new_shape)
int_w = tf.maximum(
tf.minimum(box_1[..., 2], box_2[..., 2])
- tf.maximum(box_1[..., 0], box_2[..., 0]),
0,
)
int_h = tf.maximum(
tf.minimum(box_1[..., 3], box_2[..., 3])
- tf.maximum(box_1[..., 1], box_2[..., 1]),
0,
)
int_area = int_w * int_h
box_1_area = (box_1[..., 2] - box_1[..., 0]) * (box_1[..., 3] - box_1[..., 1])
box_2_area = (box_2[..., 2] - box_2[..., 0]) * (box_2[..., 3] - box_2[..., 1])
return int_area / (box_1_area + box_2_area - int_area)
class BatchNormalization(tf.keras.layers.BatchNormalization):
def call(self, x, training=False):
if training is None:
training = tf.constant(False)
training = tf.logical_and(training, self.trainable)
return super().call(x, training)
def freeze_all(model, frozen=True):
model.trainable = not frozen
if isinstance(model, tf.keras.Model):
for l in model.layers:
freeze_all(l, frozen)
|
[
"martynas.jurkus@gmail.com"
] |
martynas.jurkus@gmail.com
|
13a94b2815f193ac50236560a2690b7c5133de3d
|
ae3d0e3c2fb614d96f6c787583c6e2e4cb654ad4
|
/leetcode/99. 恢复二叉搜索树.py
|
a263060c0ebf48b02614b7a0a10d8e9761e70e47
|
[] |
no_license
|
Cjz-Y/shuati
|
877c3f162ff75f764aa514076caccad1b6b43638
|
9ab35dbffed7865e41b437b026f2268d133357be
|
refs/heads/master
| 2023-02-02T10:34:05.705945
| 2020-12-14T01:41:39
| 2020-12-14T01:41:39
| 276,884,136
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,892
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# from leetcode.TreeNode import TreeNode
class Solution:
def recoverTree(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
last, now = None, None
ans = []
cur = root
while cur:
# 当前节点的做儿子为空
if not cur.left:
last = now
now = cur
if last and last.val > now.val:
ans.append(last)
ans.append(now)
cur = cur.right
# 当前节点的左儿子不为空
else:
# 搜索当前节点的前驱节点
precursor = cur.left
while precursor.right and precursor.right != cur:
precursor = precursor.right
# 如果前驱节点的右孩子为空,就把右孩子指向当前节点
if not precursor.right:
precursor.right = cur
cur = cur.left
# 如果前驱节点的右孩子 == 当前节点,那么将他右孩子设为空,输出当前节点,把当前节点更新为右孩子
elif precursor.right == cur:
precursor.right = None
last = now
now = cur
if last and last.val > now.val:
ans.append(last)
ans.append(now)
cur = cur.right
# print(ans)
if len(ans) == 2:
ans[0].val, ans[1].val = ans[1].val, ans[0].val
else:
ans[0].val, ans[-1].val = ans[-1].val, ans[0].val
|
[
"cjz.y@hotmail.com"
] |
cjz.y@hotmail.com
|
989bb9f120c94b48eaf6d979a920ff1b35c6bcfb
|
0a2aaa610797959b4401839835764023d05d259b
|
/tests/test_basic.py
|
64917ceacdda83ca21ebbc7b0b268ca701166228
|
[
"MIT"
] |
permissive
|
michaeltchapman/clianet
|
2f8cf45690abca51062cb8281610e49252c6d16f
|
266c7ccd7bbc40e303f6358a1e40a5ef84cd29d5
|
refs/heads/master
| 2021-09-10T08:39:44.460976
| 2018-03-23T03:14:37
| 2018-03-23T03:14:37
| 119,523,193
| 0
| 1
|
MIT
| 2018-03-09T20:51:57
| 2018-01-30T10:53:24
|
Python
|
UTF-8
|
Python
| false
| false
| 260
|
py
|
# -*- coding: utf-8 -*-
from .context import clianet
import unittest
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_absolute_truth_and_meaning(self):
assert True
if __name__ == '__main__':
unittest.main()
|
[
"woppin@gmail.com"
] |
woppin@gmail.com
|
13eee23cc5168b5158fe3437ffcbe35d17fa24f2
|
9431070f08eb587e00225b98cf27cf2f1494e519
|
/Think-Python/capitolo_4/poligono.py
|
04b3ca4d393be03bc4e26e9b85b11702f50c8d59
|
[] |
no_license
|
emilianot04/Exercise_Python
|
94908fd2612da077717de8907a4b9a39b9de9480
|
abc29498f4c7efe1c4e42ad24e3850ad2f330615
|
refs/heads/main
| 2023-06-24T12:37:02.167480
| 2021-07-21T16:29:07
| 2021-07-21T16:29:07
| 377,550,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
import turtle
bob = turtle.Turtle()
for i in range(4):
bob.fd(100)
bob.lt(90)
|
[
"Emiliano@iMac-Emiliano.fritz.box"
] |
Emiliano@iMac-Emiliano.fritz.box
|
328ce19dbfc12478a7b336671e2d49ab6767a337
|
5748b92c451efe67fabc9e588dcd5dcedbe29c36
|
/trunk/eggs/Products.NaayaGlossary/Products/NaayaGlossary/NyGlossaryElement.py
|
b00389f23d9ff48816ce16167e320ef03710be3a
|
[] |
no_license
|
Hamzahashmi4444/Salman
|
146d30303ff738f9c78525466b039e7a6a7bd1bb
|
611ac05be7771a46b26ff243359cfcafce738cb1
|
refs/heads/master
| 2023-02-16T14:05:35.070709
| 2021-01-18T06:56:23
| 2021-01-18T06:56:23
| 330,587,900
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,170
|
py
|
from Globals import InitializeClass
from AccessControl import ClassSecurityInfo
from OFS.SimpleItem import SimpleItem
from Products.PageTemplates.PageTemplateFile import PageTemplateFile
from AccessControl.Permissions import view_management_screens
from zope import interface
from zope import event
from Products.NaayaCore.FormsTool.NaayaTemplate import NaayaPageTemplateFile
# product imports
from constants import *
from utils import utils, catalog_utils
from interfaces import INyGlossaryElement
from events import ItemTranslationChanged
# constants
LABEL_OBJECT = 'Glossary element'
class ElementBasic:
""" define the basic properties for NyGlossaryElement """
def __init__(self, title, source, contributor):
""" constructor """
self.title = title
self.source = source
self.contributor = contributor
manage_addGlossaryElement_html = NaayaPageTemplateFile(
'zpt/NaayaGlossaryElement/add', globals(), 'glossary_element_add')
def manage_addGlossaryElement(self, id='', title='', source='', subjects=[],
contributor='', approved=1, REQUEST=None):
""" adds a new NyGlossaryElement object """
ob = NyGlossaryElement(id, title, source, subjects, contributor, approved)
self._setObject(id, ob)
element_obj = self._getOb(id)
element_obj.subjects = self.get_subject_by_codes(subjects)
element_obj.load_translations_list()
# imported here to avoid cross-import errors
from NyGlossary import set_default_translation
set_default_translation(element_obj)
if REQUEST:
return self.manage_main(self, REQUEST, update_menu=1)
class NyGlossaryElement(SimpleItem, ElementBasic, utils, catalog_utils):
""" NyGlossaryElement """
interface.implements(INyGlossaryElement)
meta_type = NAAYAGLOSSARY_ELEMENT_METATYPE
meta_label = LABEL_OBJECT
product_name = NAAYAGLOSSARY_PRODUCT_NAME
icon = 'misc_/NaayaGlossary/element.gif'
manage_options = (
{'label': 'Translations', 'action': 'translations_html'},
{'label': 'Properties', 'action': 'properties_html'},
{'label': "View", 'action': 'index_html'},
{'label': 'Undo', 'action': 'manage_UndoForm'},)
security = ClassSecurityInfo()
def __init__(self, id, title, source, subjects, contributor, approved):
""" constructor """
self.id = id
self.subjects = subjects
self.approved = approved
ElementBasic.__dict__['__init__'](self, title, source, contributor)
def is_published(self):
return self.approved
#####################
# BASIC PROPERTIES #
#####################
security.declareProtected(PERMISSION_MANAGE_NAAYAGLOSSARY,
'manageBasicProperties')
def manageBasicProperties(self, title='', source='', subjects=[],
contributor='', approved=0,
further_references='', REQUEST=None):
""" manage basic properties for NyGlossaryElement """
self.title = title
self.source = source
self.subjects = self.get_subject_by_codes(subjects)
self.contributor = contributor
self.approved = approved
self.further_references = further_references
self._p_changed = 1
self.cu_recatalog_object(self)
if REQUEST:
return REQUEST.RESPONSE.redirect('properties_html?save=ok')
security.declareProtected(PERMISSION_MANAGE_NAAYAGLOSSARY,
'approveElement')
def approveElement(self, REQUEST=None):
""" used for approval link in basket of approvals"""
self.approved = 1
if REQUEST:
return REQUEST.RESPONSE.redirect('index_approvals_html')
#########################
# THEME FUNCTIONS #
#########################
def code_in_subjects(self, code):
""" check if code is in the list """
for subj_info in self.subjects:
if subj_info['code'] == code:
return 1
return 0
def get_subjects(self):
""" get the languages """
self.utSortListOfDictionariesByKey(self.subjects, 'code')
return self.subjects
security.declareProtected(PERMISSION_MANAGE_NAAYAGLOSSARY, 'set_subjects')
def set_subjects(self, code, name):
""" set the languages """
append = self.subjects.append
append({'code': code, 'name': name})
security.declareProtected(PERMISSION_MANAGE_NAAYAGLOSSARY, 'del_subject')
def del_subject(self, code):
""" remove a language from list """
for subj_info in self.subjects:
if subj_info['code'] == code:
self.subjects.remove(subj_info)
#################################
# NAME TRANSLATIONS FUNCTIONS #
#################################
def get_translation_by_language(self, language):
""" get translation by language """
try:
return getattr(self.aq_base, language)
except:
return ''
def get_translation_by_language_for_js(self, language):
""" get translation by language for the javascript code"""
try:
translation = self.get_translation_by_language(language)
if not translation:
translation = self.title_or_id()
except AttributeError:
translation = self.title_or_id()
return translation.replace('_', ' ')
def check_if_no_translations(self):
""" check if translation """
for lang in self.get_english_names():
if self.get_translation_by_language(lang) != '':
return 1
return 0
security.declareProtected(PERMISSION_MANAGE_NAAYAGLOSSARY,
'set_translations_list')
def set_translations_list(self, language, translation):
""" set the languages """
real_self = self.aq_base
if getattr(real_self, language, u"") == translation:
# no need to do anything, so let's avoid generating a transaction
return
if translation == "":
if hasattr(real_self, language):
delattr(real_self, language)
else:
setattr(real_self, language, translation)
event.notify(ItemTranslationChanged(self, language, translation))
def load_translations_list(self):
""" load languages """
for lang in self.get_english_names():
self.set_translations_list(lang, '')
security.declareProtected(PERMISSION_MANAGE_NAAYAGLOSSARY,
'manageNameTranslations')
def manageNameTranslations(self, lang_code='', translation='',
REQUEST=None):
""" save translation for a language """
self.set_translations_list(lang_code, translation)
if REQUEST:
return REQUEST.RESPONSE.redirect('translations_html?tab=0')
#######################################
# DEFINITION TRANSLATIONS FUNCTIONS #
#######################################
def get_def_trans_by_language(self, language):
""" get translation by language """
return getattr(self.aq_base, self.definition_lang(language), '')
def check_if_no_def_trans(self):
""" check if translation """
for lang in self.get_english_names():
if self.get_def_trans_by_language(lang) != '':
return 1
return 0
security.declareProtected(PERMISSION_MANAGE_NAAYAGLOSSARY,
'set_def_trans_list')
def set_def_trans_list(self, language, translation):
""" set the languages """
self.set_translations_list(self.definition_lang(language), translation)
def load_def_trans_list(self):
""" load languages """
for lang in self.get_english_names():
self.set_translations_list(self.definition_lang(lang), '')
security.declareProtected(PERMISSION_MANAGE_NAAYAGLOSSARY,
'manageDefinitionTranslations')
def manageDefinitionTranslations(self, lang_code='', translation='',
REQUEST=None):
""" save translation for a language """
self.set_def_trans_list(lang_code, translation)
if REQUEST:
return REQUEST.RESPONSE.redirect('translations_html?tab=1')
#####################
# MANAGEMENT TABS #
#####################
security.declareProtected(view_management_screens, 'translations_html')
translations_html = PageTemplateFile(
"zpt/NaayaGlossaryElement/translations", globals())
security.declareProtected(view_management_screens, 'name_trans_html')
name_trans_html = PageTemplateFile("zpt/NaayaGlossaryElement/name_trans",
globals())
security.declareProtected(view_management_screens, 'definition_trans_html')
definition_trans_html = PageTemplateFile(
"zpt/NaayaGlossaryElement/definition_trans", globals())
security.declareProtected(view_management_screens, 'properties_html')
properties_html = NaayaPageTemplateFile(
'zpt/NaayaGlossaryElement/properties', globals(),
'glossary_element_properties')
view_elements_html = PageTemplateFile(
"zpt/NaayaGlossaryElement/view_elements", globals())
index_html = NaayaPageTemplateFile("zpt/NaayaGlossaryElement/index",
globals(), 'glossary_element_index')
#################
# SITE MAP #
#################
security.declarePublic('getGlossaryObTree')
def getGlossaryObTree(self):
""" """
return None
security.declareProtected(view_management_screens, 'manage_tabs')
def manage_tabs(self):
# we override manage_tabs to insert warning about synchronized glossary
if self.sync_remote_url:
extra_html = self.sync_info_text(zmi=True)
else:
extra_html = ''
return super(NyGlossaryElement, self).manage_tabs() + extra_html
InitializeClass(NyGlossaryElement)
|
[
"hamza@gmail.com"
] |
hamza@gmail.com
|
dbed9b3f873e4976e09053e35754052de605d39f
|
4ac801ac4a2af40c7b0782418398c2635a75fc08
|
/Webapp/test.py
|
ba6438b74b0d766c882d35352b2b9d297039bbf4
|
[] |
no_license
|
underhood31/Reddit-Flair-Detector
|
b6dcdd6d15d07791ab929e2f2f76fb6195ee69d5
|
e586c88346430c4725ae8a2fd221693247aa6445
|
refs/heads/master
| 2022-07-19T10:20:42.814833
| 2020-04-08T09:14:04
| 2020-04-08T09:14:04
| 197,002,253
| 0
| 0
| null | 2022-07-06T20:34:43
| 2019-07-15T13:17:07
|
HTML
|
UTF-8
|
Python
| false
| false
| 5,410
|
py
|
import pandas as pd
from collections import Counter
import string
import praw
import pickle
# flairToInt = {
# "Other": -1,
# "AMA":0,
# "AMA Concluded":0,
# "Casual AMA":0,
# "AskIndia":1,
# "Business/Finance":2,
# "Demonetization":3,
# "Entertainment":4,
# "Food":5,
# "Lifehacks":6,
# "Misleading":7,
# "Non-Political":8,
# "Photography":9,
# "Policy":10,
# "Policy & Economy":10,
# "Policy/Economy":10,
# "Politics":11,
# "Politics -- Source in comments": 11,
# "Politics [OLD]":11,
# "Scheduled":12,
# "Science & Technology":13,
# "Science/Technology":13,
# "Sports":14,
# "[R]eddiquette":15,
# "r/all":16,
# "/r/all":16
# }
def initWith(num,times):
toRet=[]
for i in range(times):
toRet.append(num)
return toRet
def CategoryVsTime():
others=initWith(0,24)
others.insert(0,"Others")
ama=initWith(0,24)
ama.insert(0,"AMA")
askIndia=initWith(0,24)
askIndia.insert(0,"AskIndia")
business=initWith(0,24)
business.insert(0,"Business/Finance")
demonetization=initWith(0,24)
demonetization.insert(0,"Demonetization")
entertainment=initWith(0,24)
entertainment.insert(0,"Entertainment")
food=initWith(0,24)
food.insert(0,"Food")
lifehacks=initWith(0,24)
lifehacks.insert(0,"LifeHacks")
misleading=initWith(0,24)
misleading.insert(0,"Misleading")
nonp=initWith(0,24)
nonp.insert(0,"Non-Political")
photo=initWith(0,24)
photo.insert(0,"Photography")
policy=initWith(0,24)
policy.insert(0,"Policy & economy")
politics=initWith(0,24)
politics.insert(0,"Politics")
scheduled=initWith(0,24)
scheduled.insert(0,"Scheduled")
scTech=initWith(0,24)
scTech.insert(0,"Science & Technology")
sports=initWith(0,24)
sports.insert(0,"Sports")
red=initWith(0,24)
red.insert(0,"[R]eddiquette")
rAll=initWith(0,24)
rAll.insert(0,"r/all")
allDataFrame = pd.read_csv("Data/all.csv", delimiter="\t")
hour = list(allDataFrame['Hour'])
category = list(allDataFrame['Flair'])
for i in range(len(hour)):
if category[i]==-1:
others[int(hour[i])+1]+=1
elif category[i]==0:
ama[int(hour[i])+1]+=1
elif category[i]==1:
askIndia[int(hour[i])+1]+=1
elif category[i]==2:
business[int(hour[i])+1]+=1
elif category[i]==3:
demonetization[int(hour[i])+1]+=1
elif category[i]==4:
entertainment[int(hour[i])+1]+=1
elif category[i]==5:
food[int(hour[i])+1]+=1
elif category[i]==6:
lifehacks[int(hour[i])+1]+=1
elif category[i]==7:
misleading[int(hour[i])+1]+=1
elif category[i]==8:
nonp[int(hour[i])+1]+=1
elif category[i]==9:
photo[int(hour[i])+1]+=1
elif category[i]==10:
policy[int(hour[i])+1]+=1
elif category[i]==11:
politics[int(hour[i])+1]+=1
elif category[i]==12:
scheduled[int(hour[i])+1]+=1
elif category[i]==13:
scTech[int(hour[i])+1]+=1
elif category[i]==14:
sports[int(hour[i])+1]+=1
elif category[i]==15:
red[int(hour[i])+1]+=1
elif category[i]==16:
rAll[int(hour[i])+1]+=1
return others,ama,askIndia,business,demonetization,entertainment,food,lifehacks,misleading,nonp,photo,policy,politics,scheduled,scTech,sports,red,rAll
def getDataByHeader(header):
allDataFrame = pd.read_csv("Data/all.csv", delimiter="\t")
col = list(allDataFrame[header])
category = list(allDataFrame['Flair'])
toret=initWith(0,18)
for i in range(len(col)):
if(category[i]!=-1):
toret[category[i]]+=col[i]
return toret
def getMost(flair):
allDataFrame = pd.read_csv("Data/all.csv", delimiter="\t")
category = list(allDataFrame['Flair'])
col = list(allDataFrame['Text'])
s=''
for i in range(len(col)):
if(category[i]==flair):
s += (" " + col[i])
toRem=["with", "that", "there", "their"]
for c in toRem:
s= s.replace(c," ")
# s=re.sub(r'\b\w{,3}\b', '', s)
s = ''.join([w+" " for w in s.split() if len(w)>3])
# print(s)
s=s.split()
counter = Counter(s)
return counter.most_common(4)
def useLink(link):
cred= praw.Reddit(client_id='HkBGGe_k4LXW9w', client_secret='yZQZeViIt5FDuLZSC3nxnkJFVto', user_agent='Flair_Detector')
p=praw.models.Submission(cred,url=link)
title=p.title.lower()
for i in string.punctuation:
title.replace(i,' ')
filename='./model/title_model.mod'
model = pickle.load(open(filename, 'rb'))
vectname = './model/title_vectorizer.vec'
cv = pickle.load(open(vectname, 'rb'))
return int(model.predict(cv.transform([title]))[0])
others,ama,askIndia,business,demonetization,entertainment,food,lifehacks,misleading,nonp,photo,policy,politics,scheduled,scTech,sports,red,rAll = CategoryVsTime()
likes = getDataByHeader("NumComments")
print(useLink('https://www.reddit.com/r/india/comments/cfw2bn/my_grandfather_second_from_left_with_pandit/'))
for i in range(-1,17):
s = getMost(i)
print(s)
|
[
"manavjeet18295@iiitd.ac.in"
] |
manavjeet18295@iiitd.ac.in
|
dfded09e2368f3220cf8d36afc860543bd3170c8
|
41b72533c0c053f5b4bd18523941a1b992b3e083
|
/ChengGuan/test_case/LiuCheng_currency/vectorValue.py
|
74be43a74fb05a08bc528cd52ded4b9f16071bd3
|
[] |
no_license
|
lqrby/dcms
|
5a5c457528519d97d1111b9737ba57c3204f0771
|
4515a1a82ffc9c48fd4aeab7e37ab122773a971e
|
refs/heads/master
| 2020-03-21T05:05:56.423374
| 2019-04-16T03:03:52
| 2019-04-16T03:03:52
| 138,144,058
| 0
| 0
| null | 2018-08-11T04:46:49
| 2018-06-21T08:45:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
# import math
# class VectorCompare():
# # 计算矢量大小
# # 计算平方和
# def magnitude(self, concordance):
# total = 0
# # concordance.iteritems:报错'dict' object has no attribute 'iteritems'
# # concordance.items()
# for word, count in concordance.items():
# total += count ** 2
# return math.sqrt(total)
# # 计算矢量之间的 cos 值
# def relation(self, concordance1, concordance2):
# topvalue = 0
# # concordance1.iteritems:报错'dict' object has no attribute 'iteritems'
# # concordance1.items()
# for word, count in concordance1.items():
# # if concordance2.has_key(word):报错'dict' object has no attribute 'has_key'
# # 改成word in concordance2
# if word in concordance2:
# # 计算相乘的和
# topvalue += count * concordance2[word]
# return topvalue / (self.magnitude(concordance1) * self.magnitude(concordance2))
|
[
"748862180@qq.com"
] |
748862180@qq.com
|
f689127b2494ab7132926b803ec3f5acb2121c84
|
6ae1f55e5af0ee2346ade59fe428569eb922993f
|
/Week_03/77组合.py
|
20ad6176c99d3c908ed8549ab118acdd31ed9c93
|
[] |
no_license
|
Didcymakeaprogresstoday/algorithm009-class01
|
a8feab11688dd29d28f1b6164a091c6e881c24e3
|
6f6453f973bec8af722b773d79efe0117f80671f
|
refs/heads/master
| 2022-11-08T08:59:52.786609
| 2020-06-28T09:36:25
| 2020-06-28T09:36:25
| 264,979,086
| 0
| 0
| null | 2020-05-18T15:19:59
| 2020-05-18T15:19:58
| null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
class Solution:
def combine(self, n, k):
if n <= 0 or k <= 0 or k > n:
return []
res = []
self.dfs(1, k, n, [], res)
return res
def dfs(self, start, k, n, pre, res):
#已找到的组合存储在pre中,从start开始搜索新的元素
#当层数到k时,pre作为元素添加到res中
if len(pre) == k:
res.append(pre[:])
return
for i in range(start, n + 1):
pre.append(i)
self.dfs(i + 1, k, n, pre, res)
#回溯需要清理当前层,状态重置
pre.pop()
|
[
"yu_cai_hitsz@163.com"
] |
yu_cai_hitsz@163.com
|
cffe87584de4772f54273295d61b6432b209b00f
|
a77a97ae665e23c72fb4bd8c56eba381c81a543c
|
/lib/roi_pooling_layer/roi_pooling_op.py
|
299af5e23fcfa24e2a320c1811bf4aca9b1117a1
|
[
"MIT"
] |
permissive
|
sravi-uwmadison/visual-tensor-decomposition
|
86a93f51b77f9c4de874486e2ee13eb82b0bb0f7
|
4e80467aec3da64cfcd1226e95c8dac2edb4d50f
|
refs/heads/master
| 2020-04-15T13:31:53.374492
| 2018-10-30T19:14:07
| 2018-10-30T19:14:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
import tensorflow as tf
import os.path as osp
import os
op_file = 'roi_pooling_op_gpu_cuda8.so' # for CUDA 8
#op_file = 'roi_pooling_op_gpu.so' # CUDA 7.5
filename = osp.join(osp.dirname(__file__), op_file)
_roi_pooling_module = tf.load_op_library(filename)
roi_pool = _roi_pooling_module.roi_pool
roi_pool_grad = _roi_pooling_module.roi_pool_grad
|
[
"tzrtzr000@gmail.com"
] |
tzrtzr000@gmail.com
|
e768930f72bbe5a033b1355bd2ce6830e6613309
|
c317ecb0f4a5c4d466fcc61ffd202b55c3adc1b5
|
/mglive_vv_rows_one_day.py
|
bb58a41c52567c8545922ad58483c216d23fa611
|
[] |
no_license
|
chenshaopeng104716/extract_datas
|
48001660e035997d18a2415d00bfe34bfec91cb5
|
cecff83c75885407f68b74d567b4bff6211e7d1d
|
refs/heads/master
| 2021-01-12T12:15:21.013895
| 2016-11-08T01:31:26
| 2016-11-08T01:31:26
| 72,390,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,106
|
py
|
# -*- coding:utf-8 -*-
"""
@this module extract data from aws->dm_pv_fact->mglive_hour_fact(year,month,day,hour,bid,uid,vid,liveid,did,type)
"""
import zipfile
import os
import re
import csv
import codecs
import MySQLdb
import psycopg2
import datetime
import pandas as pd
import numpy as np
from tqdm import tqdm
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
####获取来源于mglive数据
def mglive_vv_data_get(year,month,day):
try:
conn = psycopg2.connect(database="dm_pv_fact", user="product_readonly", password="SDjTty7202d7Dldfui", host="54.222.196.128",port="2345")
sql="select hour,uid as uuid,vid,liveid,did,type from mglive_hour_fact where year='%s' and month='%s' and day='%s';"%(year,month,day)
print "start to get mglive_vv %s%s%s" %(year,month,day)
try:
mglive_data = pd.read_sql(sql,conn)
print "get mglive_vv data %s%s%s success"%(year,month,day)
except:
mglive_data = pd.DataFrame()
print "get mglive_vv data %s%s%s fails" % (year, month, day)
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[ 0 ], e.args[ 1 ])
return mglive_data
###建立当天的mglive_vv明细表
def mglive_vv_daily_create(date):
try:
conn=MySQLdb.connect(host='10.100.3.64',user='hefang',passwd='NYH#dfjirD872C$d&ss',db='live_user',port=3306,charset='utf8')
cur=conn.cursor()
cur.execute('drop table if exists mglive_vv_%s;' % date)
cur.execute('create table mglive_vv_%s (date int(8),hour int(2),bid int(2),uuid varchar(50),vid varchar(11),liveid varchar(50),did varchar(50),type int(2))ENGINE=MyISAM;' % date)
cur.execute('alter table mglive_vv_%s add index mglive_vv_index_%s (`date`)' % (date, date))
conn.commit()
cur.close
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[ 0 ], e.args[ 1 ])
pass
###更新mglive_vv总表及其子表
def mglive_vv_create(date_str):
try:
conn=MySQLdb.connect(host='10.100.3.64',user='hefang',passwd='NYH#dfjirD872C$d&ss',port=3306,db='live_user',charset='utf8')
cur=conn.cursor()
cur.execute('drop table if exists mglive_vv;')
cur.execute('create table mglive_vv (date int(8),hour int(2),bid int(2),uuid varchar(50),vid varchar(11),liveid varchar(50),did varchar(50),type int(2))ENGINE=MERGE;')
cur.execute('alter table mglive_vv add index mglive_vv_index (`date`);')
cur.execute('alter table mglive_vv union=(%s);' % date_str)
cur.close
conn.close()
print 'mglive_vv is updated!'
except:
print 'mglive_vv update fail!'
pass
###插入每日的mglive_vv
def mglive_vv_daily_insert(conn,cur,date,mglive_vv_insert):
error_path = '/root/hf/live_user/mglive_vv'
if not os.path.exists(error_path):
os.mkdir(error_path)
insert_tag = 1 ###插入成功表示1
try:
sql = 'insert into mglive_vv_'+date+' values('+','.join(map(lambda o: "%s",range(0,8)))+')'
cur.executemany(sql,mglive_vv_insert)
conn.commit()
except MySQLdb.Error,e:
insert_tag = 0 ###插入失败表示0
error_insertlog_path = '/root/hf/live_user/mglive_vv/mglive_vv_error_' + date + ".txt" # 存放插入错误的日志信息
f = open(error_insertlog_path, 'a')
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
f.write("Mysql Error %d: %s,%s" % ( e.args[0], e.args[1],mglive_vv_insert) + "\n")
f.close()
pass
return insert_tag
# 将校验数据的信息写入文件种
def write_checkinfo(check_date, orignal_rows, success_rows, percentage):
file_path = '/root/hf/live_user/mglive_vv' # 存放检验文本的目录
if not os.path.exists(file_path):
os.mkdir(file_path)
file_name = '/root/hf/live_user/mglive_vv/mglive_vv_check.txt' # 检验文本的名称
f = open(file_name, 'a')
print "start write checkfile"
f.write(str(check_date) + '\t\t' + str(orignal_rows) + '\t\t' + str(success_rows) + '\t\t' + str('%.5f%%' % percentage) + '\n')
print "write checkfile success"
f.close()
#获取前一天的日期
def day_get(d):
oneday = datetime.timedelta(days=1)
day = d - oneday
date_end=datetime.date(int(day.year),int(day.month),int(day.day))
return date_end
###获取从开始日期到现在的日期区间列表
def datelist(start_date,end_date):
result = []
curr_date = start_date
while curr_date != end_date:
result.append("%04d%02d%02d" % (curr_date.year, curr_date.month, curr_date.day))
curr_date += datetime.timedelta(1)
result.append("%04d%02d%02d" % (curr_date.year, curr_date.month, curr_date.day))
result_1 = list()
for i in range(len(result)):
result_1.append('mglive_vv_'+result[i])
datestr = ','.join(result_1)
return datestr
###获取从开始日期到现在的日期列表,日期的格式为yyyy,mm,dd
def datelist_new(start_date,end_date):
result = []
curr_date = start_date
while curr_date != end_date:
result.append("%04d,%02d,%02d" % (curr_date.year, curr_date.month, curr_date.day))
curr_date += datetime.timedelta(1)
result.append("%04d,%02d,%02d" % (curr_date.year, curr_date.month, curr_date.day))
return result
if __name__ == '__main__':
# 获取当前时间
start_date = datetime.date(2016,8,1) ###总表数据统计开始时间
d = datetime.datetime.now()
oneday = datetime.timedelta(days=1)
day = d - oneday
end_date = datetime.date(int(day.year), int(day.month), int(day.day))
sql_day="%02d" %day.day
sql_month="%02d" %day.month
sql_year="%04d" %day.year
date_list = datelist_new(start_date,end_date);
mglive_vv_date_str = datelist(start_date,end_date) # 获得日期列表
bid=14
date = end_date.strftime('%Y%m%d') ###本次数据插入时间格式
print date
mglive_vv_data=mglive_vv_data_get(sql_year,sql_month,sql_day)
length = len(mglive_vv_data) ###获取数据的长度
print length
try:
if length>0:
##创建每日表
mglive_vv_daily_create(date)
print 'start insert %s daily mglive_vv into database' % date
conn = MySQLdb.connect(host='10.100.3.64', user='hefang', passwd='NYH#dfjirD872C$d&ss', port=3306, db='live_user', charset='utf8')
cur = conn.cursor()
insert_success=0;##统计插入成功的行数
length_list = 10000 ###每10000行插入一次
length_split = (length - 1) / length_list + 1 ###将数据分段,每10000行为一段
for j in tqdm(range(length_split)):
data_list = list()
if j < length_split - 1:###在每1000为一份时一次插1000条
xrange_length = length_list
elif j == length_split - 1:###在最后的一份取剩下行数
xrange_length = length-length_list*j
for k in xrange(xrange_length):
j_loc = j * length_list + k
mglive_vv_target = mglive_vv_data.loc[j_loc]
data_everyrow = list() # 插入到新表的参数值
try:
hour = mglive_vv_target[ 'hour' ] if mglive_vv_target[ 'hour' ] is not None else ''
uuid=mglive_vv_target[ 'uuid' ] if mglive_vv_target[ 'uuid' ] is not None else ''
vid = mglive_vv_target[ 'vid' ] if mglive_vv_target[ 'vid' ] is not None else ''
liveid= mglive_vv_target[ 'liveid' ] if mglive_vv_target[ 'liveid' ] is not None else ''
did = mglive_vv_target[ 'did' ] if mglive_vv_target[ 'did' ] is not None else ''
type = mglive_vv_target[ 'type' ] if mglive_vv_target[ 'type' ] is not None else ''
data_everyrow.extend((date,hour,bid,uuid,vid,liveid,did,type))
data_list.append(data_everyrow)
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[ 0 ], e.args[ 1 ])
# print mglive_vv_target
###每一千行插入一次
insert_tag = mglive_vv_daily_insert(conn, cur,date,data_list)
if insert_tag == 1: ###插入成功
insert_success += xrange_length # 记录成功插入的条数
else:
pass
cur.close
conn.close()
# 将校验数据信息写入文件中
percentage = insert_success /float(length) * 100 ####成功的百分比
write_checkinfo(date,length, insert_success, percentage)
# 更新总表
mglive_vv_create(date_str=mglive_vv_date_str)
else:
print "get %s mglive_vv data failure" %date
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[ 0 ], e.args[ 1 ])
print 'insert mglive_vv %s daily data fail!' %date
|
[
"18855535980@163.com"
] |
18855535980@163.com
|
5866058cc2889519ff1eff01b6b1caa8fb952e39
|
a8e2d8243618236d159485e468c2a7068dafa76b
|
/base/operation_excel.py
|
d483c5792c2f1d7d836a39757e860fb173bb20a8
|
[] |
no_license
|
samguoy/jiankong_server
|
e3943730ea388240aeb239012491e266d9128693
|
7ea4ad482e117104a93438b75ecd4976d85277d7
|
refs/heads/master
| 2020-05-21T03:54:15.215609
| 2019-05-10T01:54:07
| 2019-05-10T01:54:07
| 185,900,067
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
import xlrd
class OperationExcel():
def __init__(self):
self.data = self.get_data()
#获取excel数据
def get_data(self,sheet_id = 0):
workbook = xlrd.open_workbook('../base/test_url.xlsx')
table = workbook.sheet_by_index(sheet_id)
return table
#获取url内容
def get_url_value(self,row):
return self.data.cell_value(row,1)
#获取url备注
def get_url_explain(self,row):
return self.data.cell_value(row,2)
#获取列数
def get_lines(self):
return self.data.nrows
#获取url list
def get_url_list(self):
url_list = []
lins = self.get_lines()
for i in range(1,lins):
url = self.get_url_value(i)
url_list.append(url)
return url_list
|
[
"guoyanzero@sina.com"
] |
guoyanzero@sina.com
|
6c7b4030690a6833b1bb6100a593da7f85d0d025
|
049fa1f7419471a4f77187b1ce0c3b7c8bde2177
|
/Email.py
|
f3ac6152868f3d74021af21f265ba0c05ac87f2a
|
[] |
no_license
|
JounyWang/Python-Tools
|
0658b7d3ea360aff81cd7e45d3e19308c76a59eb
|
75c7ddf56eaea7f3b460271ed5afffd28006805a
|
refs/heads/master
| 2021-07-24T02:40:35.647388
| 2017-11-06T03:07:00
| 2017-11-06T03:07:00
| 104,840,732
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# @Author: jouny
# @Date: 2017-10-03 10:01:30
# @Last Modified by: jouny
# @Last Modified time: 2017-10-06 19:55:50
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
from smtplib import SMTP_SSL
import sys
import time
import Config
def send_email(receivers,mail_content,mail_title,mail_attach):
try:
smtp = SMTP_SSL(Config.host_server)
smtp.login(Config.sender_mail, Config.pwd)
msg = MIMEMultipart()
msg.attach(MIMEText(mail_content, 'plain', 'utf-8'))
msg["Subject"] = Header(mail_title, 'utf-8')
if mail_attach:
att = MIMEText(open(mail_attach, 'rb').read(), 'base64', 'utf-8')
att["Content-Type"] = 'application/octet-stream'
att["Content-Disposition"] = 'attachment; filename="%s"'%mail_attach
msg.attach(att)
smtp.sendmail(Config.sender_mail, receivers, msg.as_string())
smtp.quit()
print 'send email to '+ str(receivers) +' success'
except Exception as e:
print "Error: send email faild\n"+str(e)
if __name__=='__main__':
send_email(Config.receivers,Config.mail_content,Config.mail_title,Config.mail_attach)
|
[
"linjie.wang@sihuatech.com"
] |
linjie.wang@sihuatech.com
|
f453e6ce5b0bfd9dba97ed707f82dc688620ec8a
|
e1bea7b0885cdfa259bf3a54fa8372175e93dbfb
|
/python/csevo/processor/AbstractProcessor.py
|
308d00c157f944743fbe4714180e122de931fb44
|
[] |
no_license
|
JiyangZhang/csevo
|
1ac726aaad0eb18e90992bf6205d94ddea8c8afb
|
cf81deee4bea412305d7631b82a09755eb06e79a
|
refs/heads/master
| 2023-02-25T08:47:42.175368
| 2021-02-03T03:24:48
| 2021-02-03T03:24:48
| 335,490,272
| 0
| 0
| null | 2023-09-13T18:14:57
| 2021-02-03T02:57:55
|
Python
|
UTF-8
|
Python
| false
| false
| 938
|
py
|
from typing import *
import abc
from pathlib import Path
from seutil import LoggingUtils
from csevo.data.MethodData import MethodData
from csevo.Environment import Environment
class AbstractProcessor:
logger = LoggingUtils.get_logger(__name__, LoggingUtils.DEBUG if Environment.is_debug else LoggingUtils.INFO)
def __init__(self):
return
@abc.abstractmethod
def process_data(self, method_data_list: List[dict], data_type: str, output_dir: Path, traversal="None") -> List[int]:
"""
Processes the list of method data, for the given data_type.
:param method_data_list: list of MethodData
:param data_type: the data_type (one of {train, val, test})
:param output_dir: the directory to put the processed data, prepared for this model
:return: the list of data indexes (in the method_data_list) that failed to process
"""
raise NotImplementedError
|
[
"jiyang.zhang@utexas.edu"
] |
jiyang.zhang@utexas.edu
|
248a2a2fd69b5ec6d9ac180a3b7ff377de38b57d
|
c88b98cbfbf1a9af54bc4840ece721481f578460
|
/src/Notes_APP/admin.py
|
4df72eac7aadf39efb48234db417c33d0684ccf1
|
[] |
no_license
|
wesamalnobani/Notes---WebSite
|
6e3bbdafbe92b95404bd1c14d1bc0f5e4591dac0
|
65c1cb76bbfdb81d7152d62e6022d811ccc56b72
|
refs/heads/master
| 2020-04-22T06:24:22.307181
| 2019-02-11T19:43:31
| 2019-02-11T19:43:31
| 170,144,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
from django.contrib import admin
from .models import Notes
# Register your models here.
class NotesAdmin(admin.ModelAdmin):
list_filter =['active', 'created', 'tags']
list_display = ['title', 'created', 'active' ]
search_fields = ['title']
admin.site.register(Notes, NotesAdmin)
|
[
"wesam.alnobani@gmail.com"
] |
wesam.alnobani@gmail.com
|
0e05804ec2a0e13c30733be92479e7913d58543e
|
9ecfdfbe098070079c9d96eb41ddb73f95857f93
|
/Simple Chatty Bot/task/bot/bot.spec
|
d9b854f61e911e55b62c697fea0489b924e104a2
|
[] |
no_license
|
sathishkumar8594ys/Simple_Chatty_Bot
|
0e850c616bc6dbd1a970596a3a6105d38960f59a
|
b07c148fa057bd3171a86e6bb456342fbfd38bfe
|
refs/heads/master
| 2023-03-09T21:13:13.150854
| 2021-02-28T04:07:00
| 2021-02-28T04:07:00
| 343,017,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,012
|
spec
|
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['bot.py'],
pathex=['/home/sk/PycharmProjects/Simple Chatty Bot/Simple Chatty Bot/task/bot'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='bot',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='bot')
|
[
"sk@kali"
] |
sk@kali
|
88b5e60f14a115d454180edc652186ab1c3ad39f
|
264b48f1488611fca35caeabbabc6587fa72f111
|
/scrapy/homeDepot/spiders/quotes3_spider.py
|
327066877849feb582bdbf414c61beae216744b5
|
[] |
no_license
|
stcybrdgs/PythonScrapers
|
925fff3b65774715936cd81a4deaeb6d84ca55ca
|
aea504d72674ae13a256685338442846336cf4c8
|
refs/heads/master
| 2020-05-15T07:50:02.916133
| 2020-01-17T07:18:28
| 2020-01-17T07:18:28
| 182,148,396
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
# quotes3_spider.py
# getting started with a scrapy web scraper
# imports =========================================
import scrapy
# classes =========================================
class QuotesSpider(scrapy.Spider):
# identify the spider
# for this spider, the parse() method will be called
# to handle each of the urls in the url array even though
# it is not explicitly called--> rem parse() is Scrapy's
# default callback method
name = "quotes3"
start_urls = [
'http://quotes.toscrape.com/page/1/',
'http://quotes.toscrape.com/page/2/',
]
def parse(self, response):
for quote in response.css('div.quote'):
yield {
'text': quote.css('span.text::text').get(),
'author': quote.css('small.author::text').get(),
'tags': quote.css('div.tags a.tag::text').getall(),
}
# main =========================================
def main():
print('Done.')
if __name__ == '__main__': main()
|
[
"stcybrdgs@gmail.com"
] |
stcybrdgs@gmail.com
|
f162a2a8c62e9494f50e7e3ec5805431b37f673a
|
26140f92e856196e701869c04b60f3db4ebddd41
|
/iris.py
|
1a74e485ef84f79afbeb5e0d676b7e120b7ea1e9
|
[] |
no_license
|
TomonoriIshikawa/machine-learning
|
3a99b02f0f0345c5510df1b95b1698c3ff8dcf07
|
9b228548b7cfa7b1b3148e40eb5774100074f590
|
refs/heads/master
| 2020-07-12T17:09:08.070656
| 2019-08-30T13:33:46
| 2019-08-30T13:33:46
| 204,870,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
# アヤメデータの読み込み
iris_data = pd.read_csv("iris.csv", encoding="utf-8")
# アヤメデータをラベルと入力データに分類する
y = iris_data.loc[:,"Name"]
x = iris_data.loc[:,["SepalLength","SepalWidth","PetalLength","PetalWidth"]]
# 学習用とテストデータに分ける
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, train_size = 0.8, shuffle = True)
# 学習する
clf = SVC()
clf.fit(x_train, y_train)
# 評価する
y_pred = clf.predict(x_test)
print("正解率 = " , accuracy_score(y_test, y_pred))
|
[
"ishikawa@aidma-hd.jp"
] |
ishikawa@aidma-hd.jp
|
50c2c5e117974cc35fc294bb387217c30c9d1b73
|
e47e8605c0fd82c5f689d77d30a649b221056d47
|
/recieve_send_input.py
|
f417dde3626a3ee78e67a966eeae81442478fab5
|
[] |
no_license
|
DiegoMolero/RaspberryPianoServer
|
2741d24b0fddada5a6c6fa1479eaf79f5d956c40
|
b7650b9e0d433a0ae3bb10504595218935c76cf0
|
refs/heads/master
| 2021-09-17T14:48:39.490078
| 2018-07-02T22:49:53
| 2018-07-02T22:49:53
| 124,073,835
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,492
|
py
|
#!/usr/bin/env python
import sys
from threading import Thread
from socket import *
from time import *
TCP_PORT = 8000
BUFFER_SIZE = 1024 # Normally 1024, but we want fast response
# /-- Hololens Server Network ---
def sendData(data):
if 'conn' in globals():
conn.send(base+data.encode()) # echo
print('Data send: '+data)
def setupTCP():
print("Starting TCP Server")
s = socket(AF_INET, SOCK_STREAM)
s.bind(("", TCP_PORT))
s.listen(1)
print("Listening from...")
print("PORT:\t"+str(s.getsockname()[1])) #PORT
global conn,base
conn, addr = s.accept()
print ('Connection address:'+ str(addr))
conn.send('hello'.encode())
while 1:
base = conn.recv(BUFFER_SIZE)
if not base: break
print ("received data: "+ str(base.decode()))
# /-- Local Network ---
def startUDP(port):
print("Starting UDP Local Server, port:"+port)
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind(('localhost', int(port)))
sock.listen(0) # do not queue connections
while 1:
data = input("Give me input:")
sys.stdin.readline()
sendData(data)
def main(argv):
if(len(argv) != 2):
print ('Sintex error, this program needs 1 arguments: recieve_send.py <port>')
sys.exit(2)
TCPconnection = False
udp_server = Thread(target=startUDP,args=(argv[1],))
udp_server.daemon = False
udp_server.start()
sleep(1)
tcp_server = Thread(target=setupTCP)
tcp_server.daemon = False
tcp_server.start()
if __name__== "__main__":
main(sys.argv)
|
[
"diego.molero@alu.uclm.es"
] |
diego.molero@alu.uclm.es
|
282108fc4037ddb65adeb321187695351e7e68e3
|
be494d2fd78c3d8cf55321baa2f281ca69129062
|
/lab2/lab2.py
|
1ee28a8b8f77f0dcddc545555d3cb80b02ecdf2b
|
[] |
no_license
|
NoureldinYosri/MIT6.034AI
|
cf45d5ca6662236638120537f03444374f8d5af4
|
971a9cd7bb330cb78fc158a67b30d63de135798c
|
refs/heads/master
| 2021-01-01T19:54:54.598046
| 2017-08-27T21:54:22
| 2017-08-27T21:54:22
| 98,722,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,087
|
py
|
# Fall 2012 6.034 Lab 2: Search
#
# Your answers for the true and false questions will be in the following form.
# Your answers will look like one of the two below:
#ANSWER1 = True
#ANSWER1 = False
# 1: True or false - Hill Climbing search is guaranteed to find a solution
# if there is a solution
ANSWER1 = False
# 2: True or false - Best-first search will give an optimal search result
# (shortest path length).
# (If you don't know what we mean by best-first search, refer to
# http://courses.csail.mit.edu/6.034f/ai3/ch4.pdf (page 13 of the pdf).)
ANSWER2 = False
# 3: True or false - Best-first search and hill climbing make use of
# heuristic values of nodes.
ANSWER3 = True;
# 4: True or false - A* uses an extended-nodes set.
ANSWER4 = True;
# 5: True or false - Breadth first search is guaranteed to return a path
# with the shortest number of nodes.
ANSWER5 = True;
# 6: True or false - The regular branch and bound uses heuristic values
# to speed up the search for an optimal path.
ANSWER6 = False;
# Import the Graph data structure from 'search.py'
# Refer to search.py for documentation
from search import Graph
## Optional Warm-up: BFS and DFS
# If you implement these, the offline tester will test them.
# If you don't, it won't.
# The online tester will not test them.
def bfs(graph, start, goal):
queue = [start];
parent = {start : ""};
while len(queue):
cur = queue.pop(0);
if cur == goal: break;
for nxt in graph.get_connected_nodes(cur):
if nxt not in parent:
parent[nxt] = cur;
queue.append(nxt);
if goal not in parent: return [];
path = [];
while goal != "":
path.append(goal);
goal = parent[goal];
path.reverse();
return path;
## Once you have completed the breadth-first search,
## this part should be very simple to complete.
def dfs(graph, start, goal):
stack = [start];
parent = {start : ""};
while len(stack):
cur = stack.pop();
if cur == goal: break;
for nxt in graph.get_connected_nodes(cur):
if nxt not in parent:
parent[nxt] = cur;
stack.append(nxt);
if goal not in parent: return [];
path = [];
while goal != "":
path.append(goal);
goal = parent[goal];
path.reverse();
return path;
## Now we're going to add some heuristics into the search.
## Remember that hill-climbing is a modified version of depth-first search.
## Search direction should be towards lower heuristic values to the goal.
def hill_climbing(graph, start, goal):
queue = [[start]];
closed = set();
while len(queue):
cur_path = queue.pop(0);
cur_head = cur_path[-1];
if cur_head == goal: return cur_path;
if cur_head in closed: continue;
closed.add(cur_head);
frenge = [];
for nxt in graph.get_connected_nodes(cur_head):
if nxt not in closed and nxt not in cur_path:
new_path = [x for x in cur_path];
new_path.append(nxt);
frenge.append(new_path);
frenge.sort(key=lambda path:graph.get_heuristic(path[-1],goal));
queue = frenge + queue;
return [];
## Now we're going to implement beam search, a variation on BFS
## that caps the amount of memory used to store paths. Remember,
## we maintain only k candidate paths of length n in our agenda at any time.
## The k top candidates are to be determined using the
## graph get_heuristic function, with lower values being better values.
def beam_search(graph, start, goal, beam_width):
q1 = [];
q2 = [[start]];
while len(q2):
q1 = [x for x in q2];
q2 = [];
q1.sort(key=lambda path:graph.get_heuristic(path[-1],goal));
if len(q1) > beam_width: q1 = q1[:beam_width];
while len(q1):
cur_path = q1.pop(0);
cur_head = cur_path[-1];
if cur_head == goal: return cur_path;
for nxt in graph.get_connected_nodes(cur_head):
if nxt not in cur_path:
new_path = [x for x in cur_path];
new_path.append(nxt);
q2.append(new_path);
return [];
## Now we're going to try optimal search. The previous searches haven't
## used edge distances in the calculation.
## This function takes in a graph and a list of node names, and returns
## the sum of edge lengths along the path -- the total distance in the path.
def path_length(graph, node_names):
ret = 0;
for i in xrange(len(node_names) - 1):
e = graph.get_edge(node_names[i],node_names[i + 1]);
ret += e.length;
return ret;
def branch_and_bound(graph, start, goal):
queue = [[start]];
while len(queue):
cur_path = queue.pop(0);
cur_head = cur_path[-1];
if cur_head == goal: return cur_path;
for nxt in graph.get_connected_nodes(cur_head):
if nxt not in cur_path:
new_path = [x for x in cur_path];
new_path.append(nxt);
queue.append(new_path);
queue.sort(key=lambda path: path_length(graph,path));
return [];
def a_star(graph, start, goal):
queue = [[start]];
closed = set();
optimal = [];
while len(queue):
cur_path = queue.pop(0);
cur_head = cur_path[-1];
if optimal != [] and path_length(graph,cur_path) >= path_length(graph,optimal): break;
if cur_head == goal:
if optimal == [] or path_length(optimal) > path_length(cur_path):
optimal = [x for x in cur_path];
if cur_head in closed: continue;
closed.add(cur_head);
for nxt in graph.get_connected_nodes(cur_head):
if nxt not in closed and nxt not in cur_path:
new_path = [x for x in cur_path];
new_path.append(nxt);
queue.append(new_path);
queue.sort(key=lambda path: path_length(graph,path) + graph.get_heuristic(path[-1],goal));
return optimal;
## It's useful to determine if a graph has a consistent and admissible
## heuristic. You've seen graphs with heuristics that are
## admissible, but not consistent. Have you seen any graphs that are
## consistent, but not admissible?
def is_admissible(graph, goal):
for start in graph.nodes:
if graph.are_connected(start,goal):
optimal = a_star(graph,start,goal);
length = path_length(graph,optimal);
h = graph.get_heuristic(start,goal);
if h > length: return False;
elif start == goal and graph.get_heuristic(start,goal) > 0:
return False;
return True;
def is_consistent(graph, goal):
for e in graph.edges:
u,v = e.node1,e.node2;
h1,h2 = graph.get_heuristic(u,goal),graph.get_heuristic(v,goal);
dh = abs(h1 - h2);
if e.length < dh: return False;
return True;
HOW_MANY_HOURS_THIS_PSET_TOOK = '1.3'
WHAT_I_FOUND_INTERESTING = 'None'
WHAT_I_FOUND_BORING = 'ALL :('
|
[
"noureldinyosri@gmail.com"
] |
noureldinyosri@gmail.com
|
d7660d570944bddd9a535c5c46e416d0967b4aa0
|
a3d9701d8074ba66edf51a54038707acb9e0e9b6
|
/users/migrations/0012_auto_20190721_1736.py
|
aac18ba33eb1b421b31cec8fcefc59810334d62a
|
[] |
no_license
|
jdshah98/Food-Express
|
2e6b5cc7e0e6bc26f35a12b1a1bb2e336b756187
|
3b152239be47228540556e5de96586f7618d7de5
|
refs/heads/master
| 2020-06-19T04:13:04.258793
| 2019-09-10T07:36:01
| 2019-09-10T07:36:01
| 196,556,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
# Generated by Django 2.2.2 on 2019-07-21 12:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0011_auto_20190721_1703'),
]
operations = [
migrations.AlterField(
model_name='usertype',
name='usertype',
field=models.CharField(default='user', max_length=50),
),
]
|
[
"jdshahstudio@gmail.com"
] |
jdshahstudio@gmail.com
|
6242780faf6560025189bcab96433359a716fa1b
|
e43a96acd1cbd4c93fff43e264f30c9b4c552627
|
/perfect_number.py
|
8f5ff7f9f0e2d1527be604946b08621a2b224466
|
[] |
no_license
|
julywaltz/testCode
|
2f4d68fdd0a37bb2aab1b1f79c3b3abf797be1cf
|
e3fa401a8ff1a3effc25cd1ab9cd830c8b93b056
|
refs/heads/master
| 2020-06-04T09:05:27.420800
| 2019-07-06T16:07:51
| 2019-07-06T16:07:51
| 191,957,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
'''
@Author: Julywaltz
@Date: 2019-06-15 14:42:15
@LastEditors: Julywaltz
@LastEditTime: 2019-06-16 21:25:47
@Version: $Id$
'''
from math import sqrt
def p_num():
primes = []
for num in range(1, 10000000):
end = int(sqrt(num))
is_prime = True
for x in range(2, end + 1):
if num % x == 0:
is_prime = False
break
if is_prime and num != 1:
primes.append(num)
p_nums = []
for x in primes:
if 2**x - 1 in primes:
p_num = (2**x - 1) * 2**(x - 1)
p_nums.append(p_num)
print(p_nums)
if __name__ == "__main__":
p_num()
|
[
"julywaltz77@hotmail.com"
] |
julywaltz77@hotmail.com
|
39a12ece68a28beef7c51435ef74a705619d776b
|
092b33f35c5201f40859caca27e936c184f5bd19
|
/tao.py
|
d5efc36991b3ecb76a9260b6b5585ccc25edb16b
|
[] |
no_license
|
wokwak/python
|
c709a2d6498ad23d149025c23b8a56856b17812d
|
d060e12146b3ed2dfdfc40864fe497a22d8ef076
|
refs/heads/main
| 2023-01-06T07:08:18.242007
| 2020-11-08T08:59:06
| 2020-11-08T08:59:06
| 311,021,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
import turtle
import random
random.seed(10)
tao = turtle.Turtle()
tao.shape('turtle')
#tao.forward(100)
#tao.left(90)
tao.reset()
'''
for i in [10,50,90]:
print(i)
for i in range(100) :
tao.forward(100)
tao.left(100)
'''
#range(4)
#list(range(4))
'''
for j in range(10) :
for i in range(8) :
tao.forward(100)
tao.left(45)
tao.left(145)
'''
def regtangle():
for i in range(4):
tao.forward(100)
tao.left(90)
#regtangle()
for i in range(10):
regtangle()
tao.left(36)
|
[
"noreply@github.com"
] |
wokwak.noreply@github.com
|
7f833cec3bc0275e39a6025841f1a25440b25577
|
d572d95b5d80c7493812d2b55a79d9c1980f5c12
|
/app_development/my_weather_app.py
|
67d4862f91b8e0b4730581eb522a6d4365979679
|
[] |
no_license
|
nancydu317/SEVI_students_testing
|
34ca5d69db2a3ef166fb90394a4bcdfb04ec8731
|
f2819678900ba455de70b73bab9fde3f58ebcce6
|
refs/heads/main
| 2023-07-25T19:05:33.483571
| 2021-08-27T20:21:31
| 2021-08-27T20:21:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24
|
py
|
# start your app here!
|
[
"noreply@github.com"
] |
nancydu317.noreply@github.com
|
1b3ae2f3d60990a335626d66b054b5602bcffb32
|
fb2156803dbcd96a594feb7f4e84076477247f62
|
/LeetCode/107. Binary Tree Level Order Traversal II.py
|
2c288f66cdff04a9ee7ae3abf0ad3cc6a1c2f9d8
|
[] |
no_license
|
dynotw/Leetcode-Lintcode
|
dc9028cc6ffdeab9274d2c5771ca43a827014c16
|
b3d8534cdabd3b58fa13df457c65fb53ba37edd8
|
refs/heads/master
| 2022-07-26T21:34:50.221844
| 2022-07-12T12:58:23
| 2022-07-12T12:58:23
| 236,271,631
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,586
|
py
|
# Question:(it's the derivative of 102 Problems)
# Given a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).
# For example:
# Given binary tree [3,9,20,null,null,15,7],
# 3
# / \
# 9 20
# / \
# 15 7
# return its bottom-up level order traversal as:
# [
# [15,7],
# [9,20],
# [3]
# ]
# Answer:
class Solution:
def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:
ans = []
def bfs(root,level):
if root != None:
if len(ans) < level + 1:
ans.append([])
# 为了append[level]不出现 out of index,需要根据level情况给ans添加元素,使得存在ans[level]
# 针对这里的递归,都是先运行bfs.left,即对于相应的level,在运行bfs.left已经创建了ans[level]
# 可是后续我们还要运行bfs.right,而对应level的bfs.right与bfs.left是共享一个ans[level]的
# 所以不需要再重复创建ans[level],因此这个if语句块是判断是否需要创建ans[level]的
ans[level].append(root.val)
#不是将对象直接加入ans列表而是添加到ans列表中对应的列表对象中
bfs(root.left,level + 1)
bfs(root.right,level + 1)
bfs(root,0)
ans.reverse()
return ans
|
[
"noreply@github.com"
] |
dynotw.noreply@github.com
|
9c26d443975fb4cff69cbf00fe1c509d297bf707
|
bc9db7a44bd9aedfe2b55a8abe4711cefea7a9ad
|
/bisec/g/test_bisec.py
|
9ddc0ee25c2fc5c9961d72e61043a4f86292fbff
|
[] |
no_license
|
sdrave/braunschweig09
|
4c18c32ab5274203997dc087b8784198d79d68e4
|
82d167aa44eafea3534ede454cb498bcc177bb15
|
refs/heads/master
| 2020-04-22T17:09:32.178326
| 2019-02-14T11:16:17
| 2019-02-14T11:16:17
| 170,531,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
import pytest
from bisec import bisec
def test_result():
f = lambda x: x
x = bisec(f, -1, 1)
assert abs(x) < 1e-4
def test_result2():
f = lambda x: x
x = bisec(f, -2, 1)
assert abs(x) < 1e-4
def test_minus_plus():
f = lambda x: x**3 - 1
x = bisec(f, -2, 1.5)
assert abs(x) < 1e-4
def test_plus_minus():
f = lambda x: -x**3 + 1
x = bisec(f, -2, 1.5)
assert abs(x) < 1e-4
def test_no_zero():
f = lambda x: x**2 + 1
with pytest.raises(ValueError):
x = bisec(f, -2, 1.5)
def test_zero_left():
f = lambda x: x**2
x = bisec(f, 0, 1)
assert abs(x) < 1e-4
def test_zero_right():
f = lambda x: x**2
x = bisec(f, -1, 0)
assert abs(x) < 1e-4
def test_discont():
f = lambda x: -1 if x < 0 else 1
with pytest.raises(ValueError):
x = bisec(f, -2, 1.5)
def test_a_equal_b():
f = lambda x: x**2 - 1
with pytest.raises(ValueError):
x = bisec(f, -2, -2)
def test_a_equal_b_equal_root():
f = lambda x: x**2 - 1
x = bisec(f, -1, -1)
assert abs(x) < 1e-4
|
[
"stephanrave@uni-muenster.de"
] |
stephanrave@uni-muenster.de
|
b97f6b0ad2612c3236a005c72f6ec641fb1ed3fd
|
0a33333f80e7026e51dc6c6f3e567a6f5ff7d2b5
|
/FlourWorks/urls.py
|
040844c4527efa84b20e779cf53d8a2fa8375e6f
|
[] |
no_license
|
skiller3/FlourWorks
|
3b192ae0963356162262b3dbe7066759f15d1b6f
|
f7c392e9a60fc5cefdf9178b6d66fa2304222c6f
|
refs/heads/master
| 2020-12-25T19:15:00.841860
| 2015-09-03T17:49:28
| 2015-09-03T17:49:28
| 41,874,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
"""FlourWorks URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
|
[
"skye.isard@gmail.com"
] |
skye.isard@gmail.com
|
808a08e46a34c866d2d368d5e8e108f595bf1e99
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/EightTeV/TprimeTprime/TprimeTprimeToTGluonTGluoninc_M_1100_TuneZ2star_8TeV-madgraph_cff.py
|
79bbda03a0f1af54bda64a142b5001db8231e153
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,076
|
py
|
import FWCore.ParameterSet.Config as cms
#from Configuration.Generator.PythiaUEZ2Settings_cfi import *
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6HadronizerFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'MSTP(1) = 4',
'MSEL=8 ! fourth generation (t4) fermions',
'MWID(8)=2',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off',
'PMAS(5,1)=4.8 ! b quark mass', #from Spring11 4000040
'PMAS(6,1)=172.5 ! t quark mass', #from Spring11 4000040
'PMAS(8,1) = 1100.0D0 ! tprime quarks mass',
'PMAS(8,2) = 11.0D0',
'PMAS(8,3) = 110.0D0',
'VCKM(1,1) = 0.97414000D0',
'VCKM(1,2) = 0.22450000D0',
'VCKM(1,3) = 0.00420000D0',
'VCKM(1,4) = 0.02500000D0',
'VCKM(2,1) = 0.22560000D0',
'VCKM(2,2) = 0.97170000D0',
'VCKM(2,3) = 0.04109000D0',
'VCKM(2,4) = 0.05700000D0',
'VCKM(3,1) = 0.00100000D0',
'VCKM(3,2) = 0.06200000D0',
'VCKM(3,3) = 0.91000000D0',
'VCKM(3,4) = 0.41000000D0',
'VCKM(4,1) = 0.01300000D0',
'VCKM(4,2) = 0.04000000D0',
'VCKM(4,3) = 0.41000000D0',
'VCKM(4,4) = 0.91000000D0',
'KFDP(66,2)=6 ! defines g t4 (no check)',
'MDME(66,1)=1 ! g t4',
'MDME(67,1)=0 ! gamma t4',
'MDME(68,1)=0 ! Z0 t (2 : on for particle, off for anti-particle) ',
'MDME(69,1)=0 ! W d',
'MDME(70,1)=0 ! W s',
'MDME(71,1)=0 ! W b (3 : off for particle, on for particle) ',
'MDME(72,1)=0 ! W b4',
'MDME(73,1)=0 ! h0 t4',
'MDME(74,1)=-1 ! H+ b',
'MDME(75,1)=-1 ! H+ b4',
'BRAT(66) = 1.0D0',
'BRAT(67) = 0.0D0',
'BRAT(68) = 0.0D0',
'BRAT(69) = 0.0D0',
'BRAT(70) = 0.0D0',
'BRAT(71) = 0.0D0',
'BRAT(72) = 0.0D0',
'BRAT(73) = 0.0D0',
'BRAT(74) = 0.0D0',
'BRAT(75) = 0.0D0',
'MDME(174,1)=1 !Z decay into d dbar',
'MDME(175,1)=1 !Z decay into u ubar',
'MDME(176,1)=1 !Z decay into s sbar',
'MDME(177,1)=1 !Z decay into c cbar',
'MDME(178,1)=1 !Z decay into b bbar',
'MDME(179,1)=1 !Z decay into t tbar',
'MDME(180,1)=-1 !Z decay into b4 b4bar',
'MDME(181,1)=-1 !Z decay into t4 t4bar',
'MDME(182,1)=1 !Z decay into e- e+',
'MDME(183,1)=1 !Z decay into nu_e nu_ebar',
'MDME(184,1)=1 !Z decay into mu- mu+',
'MDME(185,1)=1 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=1 !Z decay into tau- tau+',
'MDME(187,1)=1 !Z decay into nu_tau nu_taubar',
'MDME(188,1)=-1 !Z decay into tau4 tau4bar',
'MDME(189,1)=-1 !Z decay into nu_tau4 nu_tau4bar',
'MDME(190,1)=1 !W decay into u dbar',
'MDME(191,1)=1 !W decay into c dbar',
'MDME(192,1)=1 !W decay into t dbar',
'MDME(193,1)=-1 !W decay into t4 dbar',
'MDME(194,1)=1 !W decay into u sbar',
'MDME(195,1)=1 !W decay into c sbar',
'MDME(196,1)=1 !W decay into t sbar',
'MDME(197,1)=-1 !W decay into t4 sbar',
'MDME(198,1)=1 !W decay into u bbar',
'MDME(199,1)=1 !W decay into c bbar',
'MDME(200,1)=1 !W decay into t bbar',
'MDME(201,1)=-1 !W decay into t4 bbar',
'MDME(202,1)=-1 !W decay into u b4bar',
'MDME(203,1)=-1 !W decay into c b4bar',
'MDME(204,1)=-1 !W decay into t b4bar',
'MDME(205,1)=-1 !W decay into t4 b4bar',
'MDME(206,1)=1 !W decay into e- nu_e',
'MDME(207,1)=1 !W decay into mu nu_mu',
'MDME(208,1)=1 !W decay into tau nu_tau',
'MDME(209,1)=-1 !W decay into tau4 nu_tau4'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
),
jetMatching = cms.untracked.PSet(
scheme = cms.string("Madgraph"),
mode = cms.string("auto"), # soup, or "inclusive" / "exclusive"
MEMAIN_etaclmax = cms.double(5.0),
MEMAIN_qcut = cms.double(-1),
MEMAIN_nqmatch = cms.int32(-1),
MEMAIN_minjets = cms.int32(-1),
MEMAIN_maxjets = cms.int32(-1),
MEMAIN_showerkt = cms.double(0),
MEMAIN_excres = cms.string(''),
outTree_flag = cms.int32(0)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch"
] |
sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch
|
a8dd055878cc344ce6826b75558dfc8eafed6946
|
64b2a1ecef2fe4cff1be312bf37edbd6c2ed089f
|
/node_modules/socket.io/node_modules/engine.io/node_modules/ws/build/config.gypi
|
72dd9b3a37279051bf3c6ca2761ddfacf5d101f8
|
[
"MIT"
] |
permissive
|
charliecooper45/node.js-tutorial
|
5d1441d95f976c5be007d8257d92d19658fb661b
|
6b9edba2e80a20f9455c76622cca16b8f0e4928d
|
refs/heads/master
| 2020-07-21T22:04:00.411590
| 2015-02-27T14:06:10
| 2015-02-27T14:06:10
| 31,421,982
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,169
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 48,
"host_arch": "ia32",
"node_install_npm": "true",
"node_prefix": "/usr",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "ia32",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/home/charlie/.node-gyp/0.10.33",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/1.4.28 node/v0.10.33 linux ia32",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"user": "",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "Infinity",
"userconfig": "/home/charlie/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr",
"registry": "https://registry.npmjs.org/",
"browser": "",
"cache_lock_wait": "10000",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/charlie/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "2",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "0.10.33",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"username": "",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/charlie/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"email": "",
"json": "",
"spin": "true"
}
}
|
[
"charliecooper98@gmail.com"
] |
charliecooper98@gmail.com
|
224dbd8cfc8120eb69d1a579c981c71daadf5102
|
6f2dd172c379fb5ae5436492790e604b08e93673
|
/query_service/resources/apis/crm/income_analyse/query_sql/member/__init__.py
|
b4d743b1c6495fdde5de7e12d532d0614b6f30c5
|
[] |
no_license
|
tankeryang/flasto-service
|
cf0ae761b6d9be94fa9f474d644cf77ffed3de50
|
8e542b76fb6240ad91cf1f8f9da7cbe1bbe144d0
|
refs/heads/master
| 2020-04-25T02:08:28.598086
| 2019-05-09T11:20:31
| 2019-05-09T11:20:31
| 172,428,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 59
|
py
|
from . import level, mul_dim, new_old, register_proportion
|
[
"yang.yang@trendy-global.com"
] |
yang.yang@trendy-global.com
|
8a7ff99e9a519b159a04cb85b2cff427de6f858b
|
26e76ac7c99e8a5c5452bcdfde8fc6e0b77aa202
|
/app/members/migrations/0013_remove_user_is_deleted.py
|
7ce40f107e03ba5aac536763e356bff42c881e27
|
[] |
no_license
|
limdongkyu/let-us-Go
|
eb741b527d21cb291e3d718e5b48c331bf42c99f
|
5768bf4f3c78af9196298f1720efb5ba0aa15efc
|
refs/heads/master
| 2023-06-01T03:38:40.341850
| 2021-06-14T08:26:45
| 2021-06-14T08:26:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
# Generated by Django 3.1 on 2020-08-23 11:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("members", "0012_auto_20190701_1511"),
]
operations = [
migrations.RemoveField(
model_name="user",
name="is_deleted",
),
]
|
[
"dev@lhy.kr"
] |
dev@lhy.kr
|
b9d7b448a9e0ad5806a73ae7cd16af4a49459b30
|
0e50cbc7dacd85b8c7aded78c522de6d3d0109d4
|
/ceppa/util/exp_methods/add_variables.py
|
f2a268b9c006eb30359789bb6ff4f823b2314fee
|
[] |
no_license
|
giorgio-o/HCM_RFC
|
0b7be0a609e4dee81a970ea49b77284af4089745
|
2170f4de1835661261b2522f0869d1d4f3363f73
|
refs/heads/master
| 2021-08-11T16:38:53.777690
| 2017-11-13T20:24:05
| 2017-11-13T20:24:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,182
|
py
|
"""
G.A.Onnis, 01.2017
Tecott Lab UCSF
"""
def add_variables_dict(experiment):
"""
"""
# attributes/features to save as npy
HCM_variables = {
'txy_data': [
'CT', # corrected X, Y, T: this is = backwardX so far. holds at_HB and non_HB Move
'CX',
'CY',
'recording_start_stop_time',
],
'timeSets': [
'F_timeSet', # corrected by 'at_device' spatial constraint
'W_timeSet',
'at_F_timeSet', # at feeder
'at_W_timeSet',
'F_timeSet_uncorrected', # raw photobeam/lickometer data
'W_timeSet_uncorrected',
'device_F_position_error_timeSet', # photobeam when not at feeder
'device_W_position_error_timeSet',
'devices_overlap_error_timeSet', # devices firing at the same time
],
'idxs': [
'idx_at_F', # bool, Move at Feeder CT timestamps index
'idx_at_W',
'idx_at_HB', # bool, at HomeBase
],
'position_data': [
'bin_times_24h_xbins12_ybins24', # total times, cage grid: xbins, ybins
'bin_times_24h_xbins2_ybins4'
],
'homebase': [
'rect_HB', # nest/homebase rectangle, cage grid: (2,4)
'obs_HB' # obs by Ethel
],
'qc': [
'flagged', # possibly ignored
'flagged_msgs', # reason
],
'to_compute': [
'CT_at_HB', 'CT_out_HB',
'at_HB_timeSet',
'idx_out_HB',
'AS_idx'
],
}
HCM_derived = {
'active_states': ['AS_timeSet', 'IS_timeSet'],
'bouts': [
'FB_timeSet', 'WB_timeSet', 'MB_timeSet',
'MB_idx',
],
'events': {
'M': [
'delta_t',
'distance',
'velocity',
'angle',
'turning_angle'
],
},
# 'totals': ['TF', 'TW', 'TM'],
}
features_by_type = {
'active_states': ['ASP', 'ASN', 'ASD'],
'totals': ['TF', 'TW', 'TM'],
'AS_intensities': ['FASInt', 'WASInt', 'MASInt'],
'bouts' : [
'FBASR', 'FBN', 'FBS', 'FBD', 'FBI',
'WBASR', 'WBN', 'WBS', 'WBD', 'WBI',
'MBASR', 'MBN', 'MBS', 'MBD', 'MBI'
],
# 'events': [
# 'FEN', 'FETD', 'FEAD',
# 'WEN', 'WETD', 'WEAD'
# #move
# ]
}
features = [
'ASP', 'ASN', 'ASD',
'TF', 'TW', 'TM',
'FASInt', 'WASInt', 'MASInt',
'FBASR', 'WBASR', 'MBASR',
'FBN', 'WBN', 'MBN',
'FBS', 'WBS', 'MBS',
'FBD', 'WBD', 'MBD',
'FBI', 'WBI', 'MBI'
]
features_by_activity = [
'ASP', 'ASN', 'ASD',
'TF', 'FASInt', 'FBASR', 'FBN', 'FBS', 'FBD', 'FBI',
'TW', 'WASInt', 'WBASR', 'WBN', 'WBS', 'WBD', 'WBI',
'TM', 'MASInt', 'MBASR', 'MBN', 'MBS', 'MBD', 'MBI'
]
feature_pairs = [
['ASN', 'ASP'],
['ASD', 'ASP'],
['ASN', 'ASD'],
['ASP', 'TF'],
['ASP', 'TW'],
['ASP', 'TM'],
['ASP', 'FASInt'],
['ASP', 'WASInt'],
['ASP', 'MASInt'],
['TF', 'TW'],
['TF', 'TM'],
['TW', 'TM'],
['FASInt', 'WASInt'],
['FASInt', 'MASInt'],
['WASInt', 'MASInt'],
['FASInt', 'FBASR'],
['FASInt', 'FBS'],
['WASInt', 'WBASR'],
['WASInt', 'WBS'],
['MASInt', 'MBASR'],
['MASInt', 'MBS'],
['FBS', 'FBASR'],
['FBS', 'FBI'],
['FBS', 'FBD'],
['FBD', 'FBI'],
['WBS', 'WBASR'],
['WBS', 'WBI'],
['WBS', 'WBD'],
['WBD', 'WBI'],
['MBS', 'MBASR'],
['MBS', 'MBI'],
['MBS', 'MBD'],
['MBD', 'MBI'],
]
levels = ['strain', 'mouse', 'mouseday']
experiment.HCM_variables = HCM_variables
experiment.HCM_derived = HCM_derived
experiment.features = features
experiment.features_by_type = features_by_type
experiment.features_by_activity = features_by_activity
experiment.feature_pairs = feature_pairs
experiment.levels = levels
# 'coeff': ['FC', 'LC']
# 'ingestion_totals' : ['FETS', 'WETS'],
# ordered_features = [
# 'TF', 'TW', 'TM',
# 'ASP', 'ASN', 'ASD',
# 'FASInt', 'WASInt', 'MASInt',
# 'FBASR', 'WBASR', 'MBASR',
# 'FBN', 'WBN', 'MBN',
# 'FBS', 'WBS', 'MBS',
# 'FBD', 'WBD', 'MBD',
# 'FBI', 'WBI', 'MBI'
# ]
|
[
"giorgio.onnis@uscf.edu"
] |
giorgio.onnis@uscf.edu
|
b01716708df47007dc97cb66bb6ae275f2128ed7
|
2e1803d0b1e3c69555317e8a461e37c84dcb00ba
|
/dataObjects/ListingDetail.py
|
0d9e6e6cf4057ba2633fa5bc3625d60571f29d6c
|
[] |
no_license
|
BustinArown/AirDND
|
a61b048e4303d4041077e381033dd73df456ee26
|
ebb88f705a47813565ba116a1f5a110ec68dd015
|
refs/heads/master
| 2021-08-26T07:05:01.997116
| 2017-11-22T01:03:42
| 2017-11-22T01:03:42
| 111,618,849
| 0
| 0
| null | 2017-11-22T00:55:55
| 2017-11-22T00:55:55
| null |
UTF-8
|
Python
| false
| false
| 1,392
|
py
|
class ListingDetail:
def __init__(self,bathrooms,bedrooms,beds,location,uuid,instantBookable,
isNewListing,lat,lng,name,neighborhood,propertyType,reviewsCount,
roomType,starRating):
self.bathrooms = bathrooms
self.bedrooms = bedrooms
self.beds = beds
self.location = location
self.uuid = uuid
self.instantBookable = instantBookable
self.isNewListing = isNewListing
self.lat = lat
self.lng = lng
self.name = name
self.neighborhood = neighborhood
self.propertyType = propertyType
self.reviewsCount = reviewsCount
self.roomType = roomType
self.starRating = starRating
return
def to_json(self):
return {
'bathrooms':self.bathrooms
'bedrooms':self.bedrooms
'beds':self.beds
'locaiton'self.location
'id':self.uuid
'instantBookable':self.instantBookable
'isNewListing':self.isNewListing
'lat':self.lat
'lng':self.lng
'name':self.name
'neighborhood':self.neighborhood
'propertyType':self.propertyType
'reviewsCount':self.reviewsCount
'roomType':self.roomType
'starRating':self.starRating
}
|
[
"noreply@github.com"
] |
BustinArown.noreply@github.com
|
bc608f4ad162e783d30e467f8a8fdc508dad380f
|
17cb2e5af78267ea0c793ab191c8181730081232
|
/networking tcpServer.py
|
244a20b130412e0c3e78300eda71ccb626545a9f
|
[] |
no_license
|
RaunakJalan/Python-Tutorial-Codes
|
2884ca7fec6f8917e890e4b8e1f751a3e3f57044
|
7b84ddc724730fb5fc845d01b7b584f984d9dc35
|
refs/heads/master
| 2020-08-15T15:02:32.443555
| 2019-10-30T06:56:58
| 2019-10-30T06:56:58
| 215,360,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 22 04:17:34 2018
@author: Raunak
"""
import socket
def main():
host = '127.0.0.1'
port = 5000
s = socket.socket()
s.bind((host, port))
s.listen(1)
#accepting the connection using socket object of client
c, addr = s.accept()
print("Connection from: "+str(addr))
while True:
data = c.recv(1024).decode('utf-8')
if not data:
break
print("From connected user: "+ data)
data = data.upper()
print("Sending: "+ data)
c.send(data.encode('utf-8'))
c.close()
if __name__ == "__main__":
main()
|
[
"kmachine09@gmail.com"
] |
kmachine09@gmail.com
|
9ddfc031d3e3aedd7411e807526b7f4d2406e336
|
5b81884bfdf113d13976484b5c835eec3faa60eb
|
/metalang/data/messagebase.tpl.py
|
dd133b11575a944d9067b5ec4aecdf2ce6071411
|
[
"BSD-3-Clause"
] |
permissive
|
udoprog/metalang
|
acf23480263c328b71b35b3d5adba06778228d13
|
6667eb02903ed45036cb5235d93774e4be2f6a2a
|
refs/heads/master
| 2023-09-04T17:59:32.338544
| 2012-04-19T18:54:12
| 2012-04-19T18:54:12
| 2,564,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,003
|
py
|
class base_metaclass(type):
def __init__(cls, name, bases, dct):
for b in bases:
if not hasattr(b, "__types__"):
continue
b.__types__[cls.__classname__] = cls
break
super(base_metaclass, cls).__init__(name, bases, dct)
class base:
__metaclass__ = base_metaclass
__classname__ = None
__keys__ = []
__types__ = dict()
@classmethod
def from_dict(klass, dct):
def from_list(lst):
l = list()
for v in lst:
if isinstance(v, list):
l.append(from_list(v))
elif isinstance(v, base):
l.append(from_dict(v))
else:
l.append(v)
return l
def from_dict(dct):
if "__class__" not in dct:
raise RuntimeError, "Dict does not contain required key '__class__'"
inst = klass.__types__.get(dct["__class__"])()
for k,v in dct.items():
if k.startswith("__"):
continue
if k not in inst.__keys__:
continue
if isinstance(v, list):
setattr(inst, k, from_list(v))
elif isinstance(v, dict):
setattr(inst, k, from_dict(v))
else:
setattr(inst, k, v)
return inst
return from_dict(dct)
def to_dict(self):
if self.__classname__ is None:
raise RuntimeError, "Cannot create dict from non inherited 'base' object"
def to_list(lst):
l = list()
for v in lst:
if isinstance(v, list):
l.append(to_list(v))
elif isinstance(v, base):
l.append(to_dict(v))
else:
l.append(v)
return l
def to_dict(self):
dct = dict()
dct["__class__"] = self.__classname__
for n in self.__keys__:
if not hasattr(self, n):
continue
v = getattr(self,n)
if isinstance(v, list):
dct[n] = to_list(v)
elif isinstance(v, base):
dct[n] = v.to_dict()
else:
dct[n] = v
return dct
return to_dict(self)
|
[
"johnjohn.tedro@gmail.com"
] |
johnjohn.tedro@gmail.com
|
e84dc3b2646e386521b8648940c88fbb0c87f802
|
a4ea8fa91e874d46992c421a8e539bf2a1bc9ba7
|
/backend/mainapp/foodtruck/admin.py
|
6f8b573a03a4b4350e6d911025f5344aa8a422bd
|
[] |
no_license
|
BlackCubes/foodtrucks-django-react
|
0b3038daf5bef5aab52a01984185ba1ef2a9324b
|
acd0d7bfefda2e946c92218a0efab1f2fea7eb6f
|
refs/heads/main
| 2023-06-13T01:06:06.497055
| 2021-05-27T14:00:22
| 2021-05-27T14:00:22
| 362,132,233
| 2
| 0
| null | 2021-05-26T19:53:39
| 2021-04-27T13:59:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,087
|
py
|
from django.contrib import admin
from jet.admin import CompactInline
from .models import Product, Truck, TruckImage
# Register your models here.
# PRODUCT INLINE
class ProductInline(CompactInline):
"""
TabularInline for Product.
Fieldsets: name, slug, info, image, price, quantity, and is_available.
Read Only: created_at.
"""
model = Product
fieldsets = (
(None, {'fields': ('name', 'slug', 'info',)}),
('Product Image', {'fields': ('image',)}),
('Miscellaneous', {'fields': ('price', 'quantity', 'is_available',)}),
)
readonly_fields = ('created_at',)
# TRUCKIMAGE INLINE
class TruckImageInline(admin.TabularInline):
"""
TabularInline for TruckImage.
Fieldsets: image and is_profile_image.
Read Only: created_at and updated_at.
"""
model = TruckImage
fieldsets = (
(None, {'fields': ('image', 'is_profile_image',)}),
)
readonly_fields = ('created_at', 'updated_at',)
# TRUCK ADMIN
class TruckAdmin(admin.ModelAdmin):
"""
Admin Form for Truck.
List Filter: name and email.
Fieldsets: name, slug, info, phone_number, email, and website.
Read Only: uuid, created_at, and updated_at.
Search Fields: name and email.
Inlines: TruckImageInline and ProductInline.
"""
list_filter = ('name', 'email',)
fieldsets = (
(None, {'fields': ('name', 'slug', 'info',)}),
('Contact', {'fields': ('phone_number', 'email', 'website',)}),
)
readonly_fields = ('uuid', 'created_at', 'updated_at',)
search_fields = ('name', 'email',)
inlines = (TruckImageInline, ProductInline,)
# LIKE INLINE
class LikeInline(admin.TabularInline):
"""
TabularInline for Like.
Fieldsets: like.
Read Only: created_at and emoji.
"""
model = 'social.Like'
fieldsets = (
(None, {'fields': ('like',)}),
)
readonly_fields = ('created_at', 'emoji')
# REVIEW INLINE
class ReviewInline(admin.TabularInline):
"""
TabularInline for Review.
Fieldsets: review.
Read Only: created_at.
"""
model = 'review.Review'
fieldsets = (
(None, {'fields': ('review',)}),
)
readonly_fields = ('created_at',)
# PRODUCT ADMIN
class ProductAdmin(admin.ModelAdmin):
"""
Admin Form for Product.
List Filter: name, price, and is_available.
Fieldsets: name, slug, info, image, price, quantity, is_available, and truck.
Read Only: uuid, created_at, and updated_at.
Search Fields: name.
"""
list_filter = ('name', 'price', 'is_available',)
fieldsets = (
(None, {'fields': ('name', 'slug', 'info',)}),
('Product Image', {'fields': ('image',)}),
('Miscellaneous', {'fields': ('price', 'quantity', 'is_available',)}),
('Truck Ownership', {'fields': ('truck',)}),
)
readonly_fields = ('uuid', 'created_at', 'updated_at',)
search_fields = ('name',)
inline = (LikeInline, ReviewInline,)
admin.site.register(Truck, TruckAdmin)
admin.site.register(Product, ProductAdmin)
|
[
"gutierrezelias1991@gmail.com"
] |
gutierrezelias1991@gmail.com
|
8500848939863dd67122555b23a6c141d94f4afd
|
66f830327b0543fdede9be8aa8b8f69f2faaaecc
|
/lib/OpenFermion-Cirq/openfermioncirq/__init__.py
|
9ecb141882a096dc65224f8ab8ba2ab32e235e43
|
[
"Apache-2.0"
] |
permissive
|
ana-tudor/Circuit_Notebooks
|
e1bb1160406bd084734c0db81740fd1752cfbb34
|
7445d06f18ca4d6b9262033d1feb0c8d185fa59d
|
refs/heads/master
| 2020-03-22T04:35:53.493037
| 2018-09-13T20:10:44
| 2018-09-13T20:10:44
| 139,508,509
| 0
| 0
| null | 2018-07-03T00:28:38
| 2018-07-03T00:28:37
| null |
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openfermioncirq.gates import (
CCZ,
CXXYY,
CYXXY,
ControlledXXYYGate,
ControlledYXXYGate,
FSWAP,
FermionicSwapGate,
Rot111Gate,
XXYY,
XXYYGate,
YXXY,
YXXYGate,
ZZ,
ZZGate)
from openfermioncirq.primitives import (
prepare_gaussian_state,
prepare_slater_determinant)
from openfermioncirq.primitives.bogoliubov_transform import bogoliubov_transform
from openfermioncirq.primitives.swap_network import swap_network
from openfermioncirq.trotter import simulate_trotter
from openfermioncirq.variational import (
HamiltonianObjective,
SplitOperatorTrotterAnsatz,
SwapNetworkTrotterAnsatz,
VariationalAnsatz,
VariationalObjective,
VariationalStudy)
# Import modules last to avoid circular dependencies
from openfermioncirq import (
gates,
optimization,
primitives,
trotter,
variational)
from ._version import __version__
|
[
"anamtudor@gmail.com"
] |
anamtudor@gmail.com
|
522032ba6be28f6e04533082dc0b05fb69eb9590
|
45e41863d617082d8e3fe1ef780f339ea983e83e
|
/ex7.py
|
9b58c07827d1c3b84549c7a15d1a377e2095538c
|
[] |
no_license
|
mtbottle/mts_project_euler_exercises
|
68de02e0f2acc28d093e49af80d6c9317825885e
|
d2ea4dcea346be9bad4517cc6bd9dbdfc0748e3f
|
refs/heads/master
| 2020-05-23T16:41:57.552530
| 2012-06-03T22:32:16
| 2012-06-03T22:32:16
| 2,707,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 606
|
py
|
# -*- coding: utf-8 -*-
#By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6^(th) prime is 13.
#What is the 10001^(st) prime number?
def get_th_prime_slow(n):
''' return the n'th prime, not efficient '''
lst_primes = []
i = 2
while len(lst_primes) != n:
not_prime = False
for k in lst_primes:
if i%k == 0:
not_prime = True
if not_prime == False:
lst_primes.append(i)
print i
i += 1
return lst_primes
if __name__ == "__main__":
# even getting 20 takes a couple of seconds =.=
print get_th_prime_slow(10001)
|
[
"mtsyne@gmail.com"
] |
mtsyne@gmail.com
|
daad330f81a58606c3b27784816536025d5b1052
|
84a1c325e3920a58aa35808209e838a0b5a3ea64
|
/practice/back_201801/Script_20180131_Custom.py
|
0d446a952b9098da12d1093d0bb7d421f66b649f
|
[] |
no_license
|
ZenryokuService/BlenderPython
|
542633cf6aad2d2e5c8baccdd735da0a5fe81c13
|
f8f5a1bb27612127829126eb58e860c03e34fd03
|
refs/heads/master
| 2021-05-08T22:29:09.632735
| 2018-04-07T09:00:20
| 2018-04-07T09:00:20
| 119,676,387
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
import bpy
name = 'TestPanel'
size = 1
rows = 2
columns = 2
def vert(column, row): return (column * size, row * size, 0)
def face(column, row):
return (column * row + row
, (column + 1 ) * rows + row
, (column + 1 ) * rows + 1 + row
, column * rows + 1 + row)
verts = [vert(x, y) for x in range(columns) for y in range(rows)]
faces = [face(x, y) for x in range(columns -1) for y in range(rows - 1)]
mesh = bpy.data.meshes.new(name)
mesh.from_pydata(verts, [], faces)
obj = bpy.data.objects.new(name, mesh)
bpy.context.scene.objects.link(obj)
###### for test ############
v = [x for x in range(3)]
print(v)
###### for test2 ############
w = [(x,y) for x in range(3) for y in range(2)]
print(w)
###### for test3 ############
verts_num = [vert(x, y) for x in range(3) for y in range(2)]
faces_num = [face(x, y) for x in range(3 -1) for y in range(2 - 1)]
print('*** verts_num ***')
print(verts_num)
print('*** faces_num ***')
print(faces_num)
|
[
"takk@takuminoMacBook-Pro.local"
] |
takk@takuminoMacBook-Pro.local
|
15f7e5eaec30b80d0f7b970bec6b9ff211a1428c
|
c13383ea93eb462542b8d714ac113b9a1806c918
|
/code/MILPtests.py
|
f972ad403191029e6ac356dfa4dfb6cda7a71c90
|
[] |
no_license
|
alexwolson/CRIO-for-Neighbourhood-Change
|
87263f9e13f2fa57c66ec2f7c676eb95f889cde4
|
b66c1aaad51ed6551ffa9b0556e5cf9edf9c6906
|
refs/heads/master
| 2022-12-06T15:53:32.705962
| 2020-08-17T18:08:20
| 2020-08-17T18:08:20
| 288,251,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,949
|
py
|
#!/usr/bin/env python
# coding: utf-8
import dill
from gurobipy import *
from shared import *
from milpshared import *
def MIP_model_BigM(LABEL, numTracts, numModels, numFeatures, runtimelimit, M_val):
# read in feature value and label value from dataframe
DF = readindata_std(LABEL, numFeatures)
df = DF.copy()
M = M_val
# feature data
# create feature value list Xij
X_val = df.iloc[:, 1:numFeatures+1].values.tolist()
Y = df.iloc[:, -1].tolist() # create label value list Yi
model = Model()
# Basically, I've just dropped lines with a Z -- since the weight regularizer was removed, this part is no longer used (should not affect optimization, but good to remove it just to be safe). -Scott
# Add variables
X = {}
E = {}
W = {}
B = {}
C = {}
for i in range(numTracts):
for j in range(numFeatures):
X[(i, j)] = X_val[i][j]
for i in range(numTracts):
for k in range(numModels):
E[(i, k)] = model.addVar(
lb=0, vtype=GRB.CONTINUOUS, name="E%d,%d" % (i, k))
for j in range(numFeatures):
for k in range(numModels):
W[(j, k)] = model.addVar(vtype=GRB.CONTINUOUS, name="W%d,%d" % (j, k))
for k in range(numModels):
B[k] = model.addVar(vtype=GRB.CONTINUOUS, name="B%d" % k)
for i in range(numTracts):
for k in range(numModels):
C[(i, k)] = model.addVar(vtype=GRB.BINARY, name="C%d,%d" % (i, k))
model.update()
# Add constraints
for i in range(numTracts):
model.addConstr(quicksum(C[(i, k)] for k in range(numModels)) == 1)
for i in range(numTracts):
for k in range(numModels):
model.addConstr(quicksum(W[(j, k)]*X[(i, j)] for j in range(
numFeatures)) + B[k] - Y[i] - E[(i, k)] <= M*(1-C[(i, k)]))
for i in range(numTracts):
for k in range(numModels):
model.addConstr(quicksum(-W[(j, k)]*X[(i, j)] for j in range(
numFeatures)) - B[k] + Y[i] - E[(i, k)] <= M*(1-C[(i, k)]))
# set objective
model.setObjective( quicksum( quicksum( E[(i,k)] for i in range(numTracts)) for k in range(numModels)))
model.Params.timeLimit = runtimelimit # 12 hours
# model.Params.LogFile = filepath+"MIP_bigM_real_log_m"+str(numModels)+"_f"+str(numFeatures)
model.optimize()
# model.write(filepath+"MIP_bigM_real_m"+str(numModels)+"_f"+str(numFeatures)+".sol")
df = pd.DataFrame(columns=['Dec_Var', 'Val'])
for v in model.getVars():
df = df.append({'Dec_Var': v.varName, 'Val': v.x}, ignore_index=True)
error_list = []
error_list = [x.X for x in model.getVars() if x.VarName.find('E') != -1]
# for b in myrange(0,numTracts*numModels-1,numModel):
# if model_list_raw[b]==1:
# mo
bias_list = [x.X for x in model.getVars() if x.VarName.find('B') != -1]
coef_list = [x.X for x in model.getVars() if x.VarName.find('W') != -1]
MAE = 0
for a in range(0, numTracts*numModels):
MAE = MAE + error_list[a]
MAE = MAE/numTracts
MSE = 0
for a in range(0, numTracts*numModels):
MSE = MSE + math.pow(error_list[a], 2)
MSE = MSE/numTracts
# weights_df = df.iloc[211*numModels:(211*numModels+numFeatures*numModels),:]
# intercept_df = df.iloc[(211*numModels+numFeatures*numModels):(211*numModels+numFeatures*numModels+numModels),:]
# model_df = df.iloc[(211*numModels+numFeatures*numModels+numModels):(211*numModels+numFeatures*numModels+numModels+211*numModels),:]
# return df, error, weights_df, intercept_df, model_df,model.MIPGap*100
return df, MAE, MSE, bias_list, coef_list, model.MIPGap*100
def collect_result(K, F):
# k rows, f columns (k = # of clusters, f = # of features)
MSElist = []
MAElist = []
Coeflist = []
Biaslist = []
resultlist = []
for k in tqdm(range(2, K+1)):
MSElist_sameCluster = []
MAElist_sameCluster = []
Coeflist_sameCluster = []
Biaslist_sameCluster = []
resultlist_sameCluster = []
for f in range(2, F+1):
# run the MILP model
M_val = pairwise_distance(211, 'change_incpc', f,k)
result, MAE, MSE, bias_list, coef_list, _ = MIP_model_BigM(
'change_incpc', 211, k, f, 3600, M_val)
# recording training MAE, MSE for MILP
MAElist_sameCluster.append(MAE)
MSElist_sameCluster.append(MSE)
# recording Bias term for MILP
Biaslist_sameCluster.append(bias_list)
# recording regression coefficients for MILP
coef_model = []
for a in range(0, k):
# getting all coefficients for one cluster
flat_list = []
for b in range(0, f):
flat_list.append(coef_list[a+b*k])
coef_model.append(flat_list)
feature_list = list(readindata_std(
'change_incpc', f).iloc[:, 1:f+1].columns)
Coef = pd.DataFrame({'feature': feature_list})
for c in range(0, k):
Coef['Cluster'+str(c+1)] = coef_model[c]
Coeflist_sameCluster.append(Coef)
# convert result into dataframe, each tract pair with its cluster assignment
result_df = result.copy()
tractid_df = readindata_std('change_incpc', f)
result_df = result_df[result_df['Dec_Var'].str.contains("C")]
result_df = result_df[result_df['Val'] > 0.9]
model_list = []
for _, row in result_df.iterrows():
assigned_label_text = row['Dec_Var']
assigned_label = int(assigned_label_text[-1])+1
model_list.append(assigned_label)
tractid_df = tractid_df.assign(model=model_list)
tractid_df = tractid_df.set_index('tractid')
resultlist_sameCluster.append(tractid_df)
bias_List = []
for h in range(0, k):
bias_List.append([bias_list[h]])
with open(f'{resultpath}milp/rawresults/result_{k}{f}.pickle','wb') as f:
pickle.dump((resultlist_sameCluster,Coeflist_sameCluster),f)
# recording result for k-means as the initialization with lowest MAE
MAElist.append(MAElist_sameCluster)
MSElist.append(MSElist_sameCluster)
Coeflist.append(Coeflist_sameCluster)
Biaslist.append(Biaslist_sameCluster)
resultlist.append(resultlist_sameCluster)
return MSElist, MAElist, (Coeflist, Biaslist), resultlist
def overlap(K, F, MILP_result_df):
MILP_result = MILP_result_df.copy()
with open(resultpath + 'kmeansresultlist.pickle','rb') as f:
Kmeans_result_df = pickle.load(f)
Kmeans_result = Kmeans_result_df.copy()
# for each combination of # of clusters & # of features
kmeans_pairID_list = []
kmeans_intersection_list = []
Jaccard_AB_list = []
Jaccard_A_list = []
Jaccard_B_list = []
Jaccard_index_sum_list = []
Jaccard_index_min_list = []
for k in range(2, K+1):
for f in range(2, F+1):
kmeans_cluster = []
MILP_cluster = []
# store tractid within each cluster for kmeans and MILP seperately
for a in range(0, k):
Kmeans_result[k-2][f-2] = Kmeans_result[k-2][f-2].reset_index()
temp_kmeans = Kmeans_result[k-2][f -
2].loc[Kmeans_result[k-2][f-2]['model'] == a+1]
kmeans_cluster.append(
temp_kmeans['tractid'].values.flatten().tolist())
MILP_result[k-2][f-2] = MILP_result[k-2][f-2].reset_index()
temp_MILP = MILP_result[k-2][f -
2].loc[MILP_result[k-2][f-2]['model'] == a+1]
MILP_cluster.append(
temp_MILP['tractid'].values.flatten().tolist())
Kmeans_result[k-2][f-2] = Kmeans_result[k -
2][f-2].set_index('tractid')
MILP_result[k-2][f-2] = MILP_result[k -
2][f-2].set_index('tractid')
# pair kmeans and MILP cluster to maximize interseted elements
kmeans_pairID = []
kmeans_intersection = []
Jaccard_AB = []
Jaccard_A = []
Jaccard_B = []
Jaccard_index_sum = []
Jaccard_index_min = []
kmeans_cluster_size = []
kmeans_cluster_size_ordered = []
for x in range(0, k):
kmeans_cluster_size.append(len(kmeans_cluster[x]))
kmeans_cluster_size_ordered.append(len(kmeans_cluster[x]))
kmeans_cluster_size_ordered.sort(reverse=True)
kmeans_cluster_order = []
for y in range(0, k):
kmeans_cluster_order.append(
kmeans_cluster_size.index(kmeans_cluster_size_ordered[y]))
for z in range(0, k):
b = kmeans_cluster_order[z]
intersection_list = []
intersection_length_list = []
for c in range(0, k):
intersection = []
intersection = list(
set(kmeans_cluster[b]).intersection(MILP_cluster[c]))
intersection_list.append(intersection)
intersection_length_list.append(len(intersection))
milpID = intersection_length_list.index(
max(intersection_length_list))
while (milpID in kmeans_pairID):
intersection_length_list[milpID] = -1
milpID = intersection_length_list.index(
max(intersection_length_list))
kmeans_pairID.append(milpID)
kmeans_intersection.append(intersection_list[milpID])
Jaccard_AB.append(intersection_length_list[milpID])
Jaccard_A.append(len(kmeans_cluster[b]))
Jaccard_B.append(len(MILP_cluster[milpID]))
# jaccard index over sum
Jaccard_index_sum.append(intersection_length_list[milpID]/(len(
kmeans_cluster[b])+len(MILP_cluster[milpID])-intersection_length_list[milpID]))
if len(MILP_cluster[milpID]) != 0:
Jaccard_index_min.append(
intersection_length_list[milpID]/min(len(kmeans_cluster[b]), len(MILP_cluster[milpID])))
else:
Jaccard_index_min.append(
intersection_length_list[milpID]/len(kmeans_cluster[b]))
kmeans_pairID_list.append(kmeans_pairID)
kmeans_intersection_list.append(kmeans_intersection)
Jaccard_AB_list.append(Jaccard_AB)
Jaccard_A_list.append(Jaccard_A)
Jaccard_B_list.append(Jaccard_B)
Jaccard_index_sum_list.append(Jaccard_index_sum)
Jaccard_index_min_list.append(Jaccard_index_min)
# visualize the overlap on a map
matched_tracts = []
for d in range(0, k):
matched_tracts = matched_tracts + kmeans_intersection[d]
matched_tract_df = Kmeans_result_df[k-2][f-2].copy()
for index, row in matched_tract_df.iterrows():
if (index in matched_tracts):
matched_tract_df.at[index, 'model'] = 1
else:
matched_tract_df.at[index, 'model'] = 0
print(str(k) + ' cluster, '+str(f)+' feature:')
for e in range(0, k):
print('Jaccard index (sum bottom) for cluster ' +
str(e+1)+' :'+str(Jaccard_index_sum[e]))
print('Jaccard index (min bottom) for cluster ' +
str(e+1)+' :'+str(Jaccard_index_min[e]))
print('Jaccard AnB for cluster ' +
str(e+1)+' :'+str(Jaccard_AB[e]))
print('Jaccard A for cluster ' +
str(e+1)+' :'+str(Jaccard_A[e]))
print('Jaccard B for cluster ' +
str(e+1)+' :'+str(Jaccard_B[e]))
# yellow is not matched, green is matched tracts
cluster_map(matched_tract_df, k,f,'matched_milp')
return kmeans_pairID_list, kmeans_intersection_list, Jaccard_AB_list, Jaccard_A_list, Jaccard_B_list, Jaccard_index_sum_list, Jaccard_index_min_list
if __name__ == '__main__':
MILP_result = display_result(2, 2, 'milp_test1', collect_result)
with open(f'{resultpath}bigmresults/results.pickle','wb') as f:
pickle.dump(overlap(
5, 5, MILP_result), f, protocol=4)
with open(f'{resultpath}bigmresults/milp.pickle','wb') as f:
pickle.dump(MILP_result,f,protocol=4)
|
[
"alex.olson@outlook.com"
] |
alex.olson@outlook.com
|
925131f2136ffb980d987ee5de1aa839ffe74a4a
|
b5348fac2c06feee3ff590107c3eb8cb744bffc8
|
/sortphotos/ExifTool.py
|
7cb6c445159ef1cf939e77b58bb28fa1f6dbf70b
|
[] |
no_license
|
arcadien/sortphotos
|
620fa03a9de4eeea64e017b22fe69f207cd06e18
|
17ca81c85010588c010501e8302dd7d5fec4373d
|
refs/heads/master
| 2023-03-08T13:12:41.370774
| 2023-02-26T15:14:08
| 2023-02-26T15:14:08
| 443,077,242
| 1
| 0
| null | 2022-01-01T09:43:38
| 2021-12-30T13:04:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,996
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import with_statement
import subprocess
import os
import sys
try:
import json
except ImportError:
import simplejson as json
import locale
# Setting locale to the 'local' value
locale.setlocale(locale.LC_ALL, '')
# Perl script for Exif data extraction
exiftool_location = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'exiftool',
'exiftool')
# this class is based on code from Sven Marnach
# http://stackoverflow.com/questions/10075115/call-exiftool-from-a-python-script
class ExifTool(object):
"""used to run ExifTool from Python and keep it open"""
sentinel = "{ready}"
def __init__(self, executable=exiftool_location, verbose=False):
self.executable = executable
self.verbose = verbose
def __enter__(self):
self.process = subprocess.Popen(
['perl', self.executable, "-stay_open", "True", "-@", "-"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.process.stdin.write(b'-stay_open\nFalse\n')
self.process.stdin.flush()
def execute(self, *args):
args = args + ("-execute\n",)
self.process.stdin.write(str.join("\n", args).encode('utf-8'))
self.process.stdin.flush()
output = ""
fd = self.process.stdout.fileno()
while not output.rstrip(' \t\n\r').endswith(self.sentinel):
increment = os.read(fd, 4096)
if self.verbose:
sys.stdout.write(increment.decode('utf-8'))
output += increment.decode('utf-8')
return output.rstrip(' \t\n\r')[:-len(self.sentinel)]
def get_metadata(self, *args):
try:
return json.loads(self.execute(*args))
except ValueError:
sys.stdout.write('No files to parse or invalid data\n')
|
[
"aurelien@skima.fr"
] |
aurelien@skima.fr
|
a22a3fed5bab16b99ce7847a911199cc7f5fb0fe
|
b2c0558a7574ba51f450fc63bf114c5a4a574e7d
|
/magnetodb/openstack/common/rpc/impl_fake.py
|
8e31fed6414a8d66f6313a20d47f4c871276f8e6
|
[] |
no_license
|
purpen/magnetodb
|
d1e31e400cd2b29dfc6a551c4c53cfbdc2443283
|
acdfb1bcf749a0bfc4572fe42af2f47ff8d0c8f3
|
refs/heads/master
| 2021-01-21T07:30:01.920182
| 2014-09-30T16:12:10
| 2014-10-08T14:28:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,825
|
py
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fake RPC implementation which calls proxy methods directly with no
queues. Casts will block, but this is very useful for tests.
"""
import inspect
# NOTE(russellb): We specifically want to use json, not our own jsonutils.
# jsonutils has some extra logic to automatically convert objects to primitive
# types so that they can be serialized. We want to catch all cases where
# non-primitive types make it into this code and treat it as an error.
import json
import time
import eventlet
import six
from magnetodb.openstack.common.rpc import common as rpc_common
CONSUMERS = {}
class RpcContext(rpc_common.CommonRpcContext):
def __init__(self, **kwargs):
super(RpcContext, self).__init__(**kwargs)
self._response = []
self._done = False
def deepcopy(self):
values = self.to_dict()
new_inst = self.__class__(**values)
new_inst._response = self._response
new_inst._done = self._done
return new_inst
def reply(self, reply=None, failure=None, ending=False):
if ending:
self._done = True
if not self._done:
self._response.append((reply, failure))
class Consumer(object):
def __init__(self, topic, proxy):
self.topic = topic
self.proxy = proxy
def call(self, context, version, method, namespace, args, timeout):
done = eventlet.event.Event()
def _inner():
ctxt = RpcContext.from_dict(context.to_dict())
try:
rval = self.proxy.dispatch(context, version, method,
namespace, **args)
res = []
# Caller might have called ctxt.reply() manually
for (reply, failure) in ctxt._response:
if failure:
six.reraise(failure[0], failure[1], failure[2])
res.append(reply)
# if ending not 'sent'...we might have more data to
# return from the function itself
if not ctxt._done:
if inspect.isgenerator(rval):
for val in rval:
res.append(val)
else:
res.append(rval)
done.send(res)
except rpc_common.ClientException as e:
done.send_exception(e._exc_info[1])
except Exception as e:
done.send_exception(e)
thread = eventlet.greenthread.spawn(_inner)
if timeout:
start_time = time.time()
while not done.ready():
eventlet.greenthread.sleep(1)
cur_time = time.time()
if (cur_time - start_time) > timeout:
thread.kill()
raise rpc_common.Timeout()
return done.wait()
class Connection(object):
"""Connection object."""
def __init__(self):
self.consumers = []
def create_consumer(self, topic, proxy, fanout=False):
consumer = Consumer(topic, proxy)
self.consumers.append(consumer)
if topic not in CONSUMERS:
CONSUMERS[topic] = []
CONSUMERS[topic].append(consumer)
def close(self):
for consumer in self.consumers:
CONSUMERS[consumer.topic].remove(consumer)
self.consumers = []
def consume_in_thread(self):
pass
def create_connection(conf, new=True):
"""Create a connection."""
return Connection()
def check_serialize(msg):
"""Make sure a message intended for rpc can be serialized."""
json.dumps(msg)
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
check_serialize(msg)
method = msg.get('method')
if not method:
return
args = msg.get('args', {})
version = msg.get('version')
namespace = msg.get('namespace')
try:
consumer = CONSUMERS[topic][0]
except (KeyError, IndexError):
raise rpc_common.Timeout("No consumers available")
else:
return consumer.call(context, version, method, namespace, args,
timeout)
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg):
check_serialize(msg)
try:
call(conf, context, topic, msg)
except Exception:
pass
def notify(conf, context, topic, msg, envelope):
check_serialize(msg)
def cleanup():
pass
def fanout_cast(conf, context, topic, msg):
"""Cast to all consumers of a topic."""
check_serialize(msg)
method = msg.get('method')
if not method:
return
args = msg.get('args', {})
version = msg.get('version')
namespace = msg.get('namespace')
for consumer in CONSUMERS.get(topic, []):
try:
consumer.call(context, version, method, namespace, args, None)
except Exception:
pass
|
[
"charles_wang@symantec.com"
] |
charles_wang@symantec.com
|
6aa8468ac534818aee698105788c1b1d7cdff263
|
4653f1798fab017f0abe44f5e6fc97d8bf33e720
|
/validacao.py
|
ededaf484a250acf0d70cd2477815db06142f0c1
|
[] |
no_license
|
Marcos001/PDI-Medical
|
ee1299fdb5c2422b454877e77e57df6c127dd142
|
61468824bc5b64570fb16654aa371445ca21a7ea
|
refs/heads/master
| 2021-09-13T18:31:58.007705
| 2018-05-03T02:55:50
| 2018-05-03T02:55:50
| 106,943,175
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
def acuracia(vn, fp, fn, vp):
return (vp+vn) / (vp+vn+fp+fn)
def especificidade(vn, fp):
return (vn) / (vn+fp)
def sensibilidade(fn, vp):
return (vp) / (vp+fn)
def main_validacao(vn, fp, fn, vp):
"""
:param vn: verdadeiros positivos
:param fp: falsos positivos
:param fn: falsos negativos
:param vp: verdadeiros positivos
:return:
"""
print(' Acuracia______________[ %.2f%s]' %((acuracia(vn, fp, fn, vp)*100), "%"))
print(' Especificidade________[ %.2f%s ]' %((especificidade(vn, fp)*100), "%"))
print(' Sensibilidade_________[ %.2f%s ]' %((sensibilidade(fn, vp)*100),"%"))
|
[
"santosMsantos01@gmail.com"
] |
santosMsantos01@gmail.com
|
d11d9ad105d091f0c56bdbe19bace7ffe4cf9315
|
8d14370115c39d92dfc12524d398a3c80694ad8a
|
/Companies/models.py
|
d6bf8d5569a97e0ebfdc67b776a4524ba41fc920
|
[] |
no_license
|
shirish-babbur/Stock-Market-Backend
|
32dd59c15976322243b34c5894317c0d58d4c112
|
709b64e939e88fca1de32aed138a26cfcaf62b6b
|
refs/heads/master
| 2021-01-23T23:56:28.979418
| 2018-02-24T13:53:29
| 2018-02-24T13:53:29
| 122,744,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
from django.db import models
class Stock(models.Model):
ticker = models.CharField(max_length = 10)
open = models.FloatField()
close = models.FloatField()
volume = models.IntegerField()
def __str__(self):
return self.ticker
|
[
"bbshirish@gmail.com"
] |
bbshirish@gmail.com
|
7591be7f49cf78131dd30a0adbbd95a3e6c17975
|
327e23419a11f73ffbb2382e718d7159d4c55d85
|
/main.py
|
cc5c4bfb95af10a010e36ecd31ae9ea462b44f97
|
[] |
no_license
|
RiturajJain/ecommerce_application
|
e3d0775e9b7d080674c72916fcfe6494b8d5b3e0
|
1bdd65d8df99ad334e0f955b4a558fd22ac68947
|
refs/heads/main
| 2023-04-15T22:13:28.967430
| 2021-04-19T08:59:09
| 2021-04-19T08:59:09
| 359,385,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,608
|
py
|
"""
This module contains code to start the Application and initialize it with some dummy data.
It can be extended to take the input from the User.
Key Points to Note:
1. At most places, I have raise and caught general Exception for simplicity purpose. But in a real system, this should be avoided as it can hide some bugs and make difficult to debug. Instead, specific or custom exceptions should be defined and used.
2. Getters and Setters should be defined for all the attributes in a class. They have been avoided here for simplicity.
"""
from Address import Address
from ECommerceSystem import ECommerceSystem
from Product import Product
from Storage import Storage
from User import User
from UserAuthSystem import UserAuthSystem
if __name__ == "__main__":
storage = Storage.get_instance()
user_auth = UserAuthSystem(storage)
address = Address("No. 5, 5th Main, Domlur 2nd Stage", "Bangalore", "Karnataka", "India", 560071)
user = User("rituraj.jain2020@gmail.com", "randompass", "Rituraj Jain", address, "23-07-1998")
product1 = Product("Moto G8 PowerLite", "Great phone at an affordable price", 10500)
product2 = Product("Mi 4A PRO Android LED TV", "80 cm (32 inches) HD Ready | Black", 14999)
product3 = Product("Fastrack reflex 3.0", "Full touch, color display, Heart rate monitor, Dual- tone silicone strap and up to 10 days battery life", 2495)
storage.add_user(user)
storage.add_product(product1)
storage.add_product(product2)
storage.add_product(product3)
ecommerce_system = ECommerceSystem(storage, user_auth)
ecommerce_system.start()
|
[
"rituraj.jain@embibe.com"
] |
rituraj.jain@embibe.com
|
6318d574c8476c0123d824a8058674cf3d42c494
|
0e7892977c6a73e8101f59938b343dc93517d6ad
|
/python/homework/Stepan/factorial.py
|
5e3d4e7046f9bb9826c0e7648037bf46e2bb3345
|
[] |
no_license
|
ITC-Vanadzor/ITC-Vanadzor
|
b24c467600b7fe4002222e2e566096b8476c7bb5
|
b96ff8c9bddd727c3d17f8f850764f75f20bedf9
|
refs/heads/master
| 2016-09-06T11:19:48.612034
| 2015-04-09T21:05:00
| 2015-04-09T21:05:00
| 27,506,035
| 3
| 1
| null | 2014-12-04T18:51:19
| 2014-12-03T20:24:17
|
Python
|
UTF-8
|
Python
| false
| false
| 250
|
py
|
#!/usr/bin/python
def factorial(n):
print "called factorial func"
print "n = ", n
if n == 0:
return 1
else:
return n * factorial(n-1)
'''
barev
'''
n=raw_input(" nermuceq n i arjeqy ")
n=int(n)
print factorial(n)
|
[
"StepanChaparyan"
] |
StepanChaparyan
|
f8ccb414dca4f46b66a521da2a98c639c65d0882
|
52c808be4b58407dbd845de2ebb0022132fa0312
|
/pcapsim/print.py
|
6bcfb0952a0a82ebbc16d9d593f1bd21626337e1
|
[] |
no_license
|
ejialan/tools
|
f2b9ecdd02634f59958f66faf6718c3101b914e3
|
4bf76d1556c48abe64696672d4a60a7d7e57ae07
|
refs/heads/master
| 2021-01-21T14:02:05.439053
| 2018-02-08T04:54:20
| 2018-02-08T04:54:20
| 5,930,979
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
#!/usr/bin/env python
import sys
from scapy.all import *
from scapy.utils import rdpcap
pkts=rdpcap(sys.argv[1]) # could be used like this rdpcap("filename",500) fetches first 500 pkts
for pkt in pkts:
print """____________________________________"""
pkt.show()
print ""
|
[
"jiangang.lan@ericsson.com"
] |
jiangang.lan@ericsson.com
|
d985604fbc47ea455b2eaa4b9c27a94e5c6043c1
|
989d7481599fd6e974a9d7fb0dd22cf061fc8038
|
/blog_content/apps.py
|
d1c7730014ec648c51fc95b3d6fbdd8881fad66f
|
[] |
no_license
|
KyalSmith/first_tech_blog
|
bb608c58d3d10f2cc3acf22fef10bd376364e154
|
3b0f6df2a2298240e9eabe0ee818554b07bc0613
|
refs/heads/master
| 2022-12-23T04:38:31.179288
| 2019-01-21T08:02:41
| 2019-01-21T08:02:41
| 160,322,667
| 0
| 0
| null | 2022-12-08T03:00:46
| 2018-12-04T08:18:23
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 163
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class BlogContentConfig(AppConfig):
name = 'blog_content'
|
[
"kyal.smith@gmail.com"
] |
kyal.smith@gmail.com
|
f095bd2dc3daf2803ffc5064395025e8223b2020
|
354bbbb89c36ce49b4c054e0cf0a980a4856e7e6
|
/sentimentanalyser.py
|
17e6cb8f4ab7fc4612643910779f3f0bdf37dcf5
|
[] |
no_license
|
mgoliyad/Deep-NLP
|
9075a19dc27cff5ec3351e64982fa8fcb7369da0
|
8879d73f25b459bcecd415e5b715cb2c9560d2fe
|
refs/heads/master
| 2020-07-28T22:43:12.699510
| 2019-09-24T16:41:11
| 2019-09-24T16:41:11
| 209,565,296
| 0
| 0
| null | 2019-09-20T12:51:27
| 2019-09-19T13:45:59
| null |
UTF-8
|
Python
| false
| false
| 8,237
|
py
|
import plac
import pathlib
from keras.layers import LSTM, Dense, Embedding, Bidirectional
from keras.models import Sequential
from keras.layers import TimeDistributed
from keras.optimizers import Adam
from spacy.compat import pickle
import spacy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
nlp = spacy.load("en_vectors_web_lg")
def get_labelled_sentences(docs, doc_labels):
labels = []
sentences = []
for doc, y in zip(docs, doc_labels):
for sent in doc.sents:
sentences.append(sent)
labels.append(y)
return sentences, numpy.asarray(labels, dtype="int32")
def plot_series(series_1, series_2, format="-", title=None, legend=None):
plt.plot(series_1)
plt.plot(series_2)
plt.title(title)
plt.legend(legend, loc='upper left')
plt.show()
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc')>0.99):
#if(logs.get('loss')<0.4):
print("\nReached 99% accuracy so cancelling training!")
self.model.stop_training = True
def get_vectors(docs, max_length):
docs = list(docs)
Xs = np.zeros((len(docs), max_length), dtype="int32")
for i, doc in enumerate(docs):
j = 0
for token in doc:
vector_id = token.vocab.vectors.find(key=token.orth)
if vector_id >= 0:
Xs[i, j] = vector_id
else:
Xs[i, j] = 0
j += 1
if j >= max_length:
break
return Xs
def train_model(
train_texts,
train_labels,
val_texts,
val_labels,
lstm_shape,
lstm_settings,
lstm_optimizer,
batch_size=100,
nb_epoch=5,
by_sentence=False,
):
print("Loading spaCy")
nlp.add_pipe(nlp.create_pipe("sentencizer"))
embeddings = get_embeddings(nlp.vocab)
model = compile_model(embeddings, lstm_shape, lstm_settings)
print("Parsing texts...")
train_docs = list(nlp.pipe(train_texts))
val_docs = list(nlp.pipe(val_texts))
if by_sentence:
train_docs, train_labels = get_labelled_sentences(train_docs, train_labels)
val_docs, val_labels = get_labelled_sentences(val_docs, val_labels)
train_X = get_vectors(train_docs, lstm_shape["max_length"])
val_X = get_vectors(val_docs, lstm_shape["max_length"])
callbacks = my_Callback()
estimator = model.fit(
train_X,
train_labels,
validation_data=(val_X, val_labels),
epochs=nb_epoch,
batch_size=batch_size,
callbacks=[callbacks]
)
plot_series(estimator.history['acc'], estimator.history['val_acc'], title='model accuracy', legend=['train', 'valid'])
plot_series(estimator.history['loss'], estimator.history['val_loss'], title='model loss', legend=['train', 'valid'])
predicted_prob = model.predict(val_X)
prediction = np.where(predicted_prob >=0.5, 1, 0)
count=0
for i in range(len(val_labels)):
#print(prediction[i], val_labels.iloc[i])
if (prediction[i] != val_labels.iloc[i]):
if count ==0:
print('Here is the list of misclassified texts:\n')
count+=1
print(val_docs[i], '\n')
print('We got ', count, 'out of ', val_labels.shape[0], 'misclassified texts')
return model
def compile_model(embeddings, shape, settings):
model = Sequential()
model.add(
Embedding(
embeddings.shape[0],
embeddings.shape[1],
input_length=shape["max_length"],
trainable=False,
weights=[embeddings],
mask_zero=True,
)
)
model.add(TimeDistributed(Dense(shape["nr_hidden"], use_bias=False)))
model.add(
Bidirectional(
LSTM(
shape["nr_hidden"],
recurrent_dropout=settings["dropout"],
dropout=settings["dropout"],
return_sequences=True
)
)
)
model.add(
Bidirectional(
LSTM(
shape["nr_hidden"],
recurrent_dropout=settings["dropout"],
dropout=settings["dropout"],
return_sequences=True,
)
)
)
model.add(
Bidirectional(
LSTM(
shape["nr_hidden"],
recurrent_dropout=settings["dropout"],
dropout=settings["dropout"],
return_sequences=True,
)
)
)
model.add(
Bidirectional(
LSTM(
shape["nr_hidden"],
recurrent_dropout=settings["dropout"],
dropout=settings["dropout"],
)
)
)
model.add(Dense(shape["nr_class"], activation="sigmoid"))
model.compile(
optimizer=Adam(lr=settings["lr"]),
loss="binary_crossentropy",
metrics=["accuracy"],
)
return model
def get_embeddings(vocab):
return vocab.vectors.data
def cleanup_text(docs, logging=False):
docs = docs.str.strip().replace("\n", " ").replace("\r", " ")
texts = []
counter = 1
for doc in docs:
if counter % 1000 == 0 and logging:
print("Processed %d out of %d documents." % (counter, len(docs)))
counter += 1
doc = nlp(doc, disable=['parser', 'ner'])
tokens = [tok.lemma_.lower().strip() for tok in doc if tok.lemma_ != '-PRON-' and tok.pos_ !='NUM' and tok.pos_ !='PUNCT']
tokens = ' '.join(tokens)
texts.append(tokens)
return pd.Series(texts)
def read_data(data_dir, training_portion):
texts = pd.DataFrame()
for filename in pathlib.Path(data_dir).iterdir():
with filename.open(encoding='latin-1') as file_:
if not file_.name.endswith('DS_Store'):
text = pd.read_csv(file_, usecols=[1, 2], encoding='latin-1')
texts = texts.append(text, ignore_index=True)
texts = texts.sample(frac=1)
text_cln = cleanup_text(texts.iloc[:, 1], logging=True)
sentiments = np.asarray(texts.iloc[:, 0].unique())
for i in range(len(sentiments)):
texts.iloc[:, 0].replace(sentiments[i], i, inplace=True)
train_size = int(len(texts) * training_portion)
train_texts, train_labels = text_cln[:train_size], texts.iloc[:train_size, 0]
val_texts, val_labels = text_cln[train_size:], texts.iloc[train_size:, 0]
return train_texts, train_labels, val_texts, val_labels
@plac.annotations(
train_dir=("Location of training file or directory"),
model_dir=("Location of output model directory",),
nr_hidden=("Number of hidden units", "option", "u", int),
max_length=("Maximum sentence length", "option", "l", int),
dropout=("Dropout", "option", "d", float),
learn_rate=("Learn rate", "option", "e", float),
nb_epoch=("Number of training epochs", "option", "n", int),
batch_size=("Size of minibatches for training LSTM", "option", "b", int),
)
def main(
model_dir='/Users/masha/Data/Model',
train_dir='/Users/masha/Data/Train',
nr_hidden=128,
max_length=100,
dropout=0.2,
learn_rate=0.0001,
nb_epoch=150,
batch_size=64,
#nr_examples=-1,
training_portion = .8,
): # Training params
if model_dir is not None:
model_dir = pathlib.Path(model_dir)
if train_dir is None:
print('Please provide training directory!')
train_texts, train_labels, val_texts, val_labels = read_data(train_dir, training_portion)
model = train_model(
train_texts,
train_labels,
val_texts,
val_labels,
{"nr_hidden": nr_hidden, "max_length": max_length, "nr_class": 1},
{"dropout": dropout, "lr": learn_rate},
{},
nb_epoch=nb_epoch,
batch_size=batch_size
)
weights = model.get_weights()
if model_dir is not None:
with (model_dir / "model").open("wb") as file_:
pickle.dump(weights[1:], file_)
with (model_dir / "config.json").open("w") as file_:
file_.write(model.to_json())
if __name__ == "__main__":
plac.call(main)
|
[
"noreply@github.com"
] |
mgoliyad.noreply@github.com
|
249195b6e47858e38b763eb4616606ee68555d24
|
1eec77e8734eb7de7f20232c29ac7f6df15e567e
|
/scripts_per_file/_face_reco/face_detect_only_face/face_detect.py
|
2d4c07ed07c372bc16c47c34ff2ea8717a1bffb8
|
[] |
no_license
|
dannyvai/Folder_Scrapper
|
2097d6a187a6b570e433d8c51a1cc1b81cd46d1e
|
75616d63326ca5bb320e6f0aeda9991343b61654
|
refs/heads/master
| 2021-01-17T08:54:44.727764
| 2016-04-06T20:09:40
| 2016-04-06T20:09:40
| 25,173,630
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
import cv2
def find_faces(imagePath):
cascPath = "/home/ubuser/scrapper/face_detect/haarcascade_frontalface_default.xml"
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the image
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(50, 50),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
print "Found {0} faces!".format(len(faces))
if len(faces) > 0 :
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow("Faces found", image)
cv2.waitKey(500)
return len(faces)
|
[
"danny.wainshtien@gmail.com"
] |
danny.wainshtien@gmail.com
|
31f529e91f3b8f0d3a305ceb7a7f4eb368554ab4
|
28347bdee730beaad6a90381c33c51f5a4cd31aa
|
/test/calc.py
|
6c16f759a10e4c3bf212b04ef58933d40afd0d38
|
[] |
no_license
|
lorerlrolerl/utilities
|
05ec8b4d2cf53d251c90950ed3e17859d421b91d
|
54eae7aefe2bf69382e057b19b01615d097dd3c5
|
refs/heads/main
| 2023-03-16T15:13:40.954308
| 2021-03-22T11:00:44
| 2021-03-22T11:00:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
def add(x, y):
"""Add Function"""
return x + y
def subtract(x, y):
"""Subtract Function"""
return x - y
def multiply(x, y):
"""Multiply Function"""
return x * y
def divide(x, y):
"""Divide Function"""
if y == 0:
raise ValueError('Can not divide by zero!')
return x/y
|
[
"yazdiha@ese.eur.nl"
] |
yazdiha@ese.eur.nl
|
21300b331f5afbd7f3b4ab4488703ee9c8633718
|
5a0de8d575dd64116e9e13be32cb9a86f0c682e7
|
/leetcode 8.py
|
2dce26256f9dac1a3f0656972ceb6e82f4f1cfca
|
[] |
no_license
|
sailll/leetcode-solution-by-myself
|
246499095b798e6a2789ad5d03fc4772ab897c1d
|
15f8fb94c614dba7fe42725a50ae082487da0374
|
refs/heads/master
| 2021-05-22T10:01:27.305761
| 2020-07-13T05:06:06
| 2020-07-13T05:06:06
| 54,723,761
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
class Solution(object):
def myAtoi(self, str):
str2=str.lstrip()
l=len(str2)
s=""
if(str2.startswith("-")):
b=1
else: b=0
if(l>=2 and (str2.startswith("+") or str2.startswith("-"))):
s2=str2[1:]
else:
s2=str2
for i in s2:
if((i<'0' or i>'9')):
break
if(i==' '):continue
s+=i
if(s==""):
return 0
a=int(s);
if(b): a=-a
max=2147483647
if(a>max): a=max
if(a<-max-1): a=-max-1
return a
"""
:type str: str
:rtype: int
"""
|
[
"noreply@github.com"
] |
sailll.noreply@github.com
|
5061e70cc6b91ea152bc29a49732bc912578e4c0
|
4fecc528447ecb420d2d4a42c5728db4d4fb86a7
|
/GroupTask/DensityEstimation.py
|
6aedaf07641264d2f6f5b9434cb24e59992ebcdf
|
[
"MIT"
] |
permissive
|
ivanov-an-spbu/2019_IT
|
973203e44d6f976172cdf0cabfdd7ea37c17fd44
|
f90e9194dd90e308b3aa7e179c118bb8482a62b8
|
refs/heads/master
| 2020-07-23T13:59:35.573850
| 2019-12-13T08:24:46
| 2019-12-13T08:24:46
| 207,582,878
| 3
| 51
|
MIT
| 2022-12-16T09:06:25
| 2019-09-10T14:33:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,927
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import gaussian_kde
#density estimation
def calculate_density(sensor_values, points, Y, density_out_buf):
for i,label in enumerate(np.unique(Y)): # for each class
kde = gaussian_kde(sensor_values[Y==label])
density_out_buf[:, i] = kde.evaluate(points)
return
#finds density curves intersections
def calculate_intersections(density, points):
dif_dens = density[:, 0] - density[:, 1]
sign = dif_dens[1:]*dif_dens[:-1]
intersections = points[1:][sign<0] # find places of sign changing
# to do: check also == 0 exactly
return intersections
def main():
df = pd.read_csv("data_density.csv")
df = df.drop("sample index", axis=1)
N=2 # only 2 columns (for example)
#plots histograms and density distribution
#for i in range(N):
# sensor = f"sensor{i}"
# sns.FacetGrid(df[[sensor, "class_label"]], hue="class_label").map(sns.distplot, sensor, bins=50)
fig, axes = plt.subplots(1, N, figsize=(50,10))
axes = axes.ravel()
n_count=2000
points = np.linspace(0,1,n_count) #values of sensors where density is estimated
density = np.empty((n_count, 2)) #density for each class
boundaries = [] # intersections of density curves for each sensor
X,Y = df.values[:,1:N+1], df.values[:,0]
for i,x in enumerate(X.T):
calculate_density(x, points, Y, density)
axes[i].plot(points, density)
intersection_points = calculate_intersections(density, points)
boundaries.append(np.append(intersection_points, 1))
for x_bound in intersection_points:
axes[i].axvline(x=x_bound, linestyle='--', color='k')
for b in boundaries: # found out boundaries for each class
print(b)
plt.show()
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
ivanov-an-spbu.noreply@github.com
|
6b47f7c645133d10406c7447c70c7489bba52b59
|
5f0b119b53f8de59dd1da92ed8497234c8383341
|
/main/urls.py
|
900848288da38e508e33433132c943dfb284a470
|
[
"MIT"
] |
permissive
|
Alenwear/Hospital-Quality-Management-System
|
cf5733f388b957365bd5510d0e791d819a63dfb1
|
6ccfb0ddb8df5341c3e3f1a3f10ba737fd5a5b86
|
refs/heads/master
| 2020-11-30T13:15:59.241927
| 2017-09-21T16:55:04
| 2017-09-21T16:55:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
from django.conf.urls import url
from . import views
app_name = 'main'
urlpatterns = [
url(r'^check', views.check, name='check'),
url(r'^showchecks/$', views.showchecks, name='showchecks'),
url(r'^success/$', views.success, name='success'),
]
|
[
"shadowspacex@163.com"
] |
shadowspacex@163.com
|
f7c337a8ca429c9e1d77586a0bc9a706fd650bf1
|
5841dd37f7a2801d1b96551e3c7cd58c1b18345b
|
/nimGame.py
|
1bda9378c3e3b5fca8b2656dfa994c2a3bd3d5f4
|
[] |
no_license
|
bmarsh5/pyGroupProject
|
0b199a4ab9ed989523c13e33b0e1af3d27086e86
|
61244935202831f0258ec9fb74b0994917aa45ae
|
refs/heads/master
| 2021-03-10T08:28:28.557671
| 2020-04-18T21:46:57
| 2020-04-18T21:46:57
| 246,438,648
| 0
| 0
| null | 2020-04-18T21:46:58
| 2020-03-11T00:34:22
|
Python
|
UTF-8
|
Python
| false
| false
| 5,423
|
py
|
"""
Name:
CSC 119-001
Date:040220
Program Name:nimGame
Description:
From a pile of marbles, a player or the computer may take up to
half of the marbles on each turn. The player who takes the last
marble loses.
You will note that the computer cannot be beaten in smart mode when it has the
first move, unless the pile size happens to be 15, 31, or 63.
Of course, a human player who has the first turn and knows the winning strategy
can win against the computer.
Bryan Marsh-
Jim Terry-
Kiara Billy-
Sources- General knowledge
"""
from random import randint
def main():
whoseTurn = 0
ballCount = 0
mode = 0
game = 0
game = playGame()
##the main function that starts the game
#
# there is no return. The game is played within this function.
# Bryan and some of Jim
def playGame() :
ballCount = ballCountFunction()
whoseTurn = turnFunction()
mode = computerMode()
while ballCount >= 2 :
if whoseTurn == 0 :
#PC turn
if mode == 0 or ballCount == 3 or ballCount == 7 or ballCount == 15 or ballCount == 31 or ballCount == 63 :
print("computer acting in dumb mode")
ballCount = computerDumbMode(ballCount)
print("The ball count is now",ballCount, "\n")
whoseTurn = 1
else :
print("computer acting in smart mode")
ballCount = computerSmartMode(ballCount)
whoseTurn = 1
print("Computer removed balls to make the pile:", ballCount,"balls\n")
else :
#Player turn
print("Player's turn")
ballCount = playerTurn(ballCount)
print("The ball count is now:",ballCount,"balls\n")
whoseTurn = 0
if ballCount == 1 :
if whoseTurn == 0 :
print("Computer loses and Player wins!!!!")
else :
print("Player loses and Computer wins")
##Generate a random integer of 0 or 1 for first turn.
# @turn is the random 0 or 1.
# @return Returns a 0 or 1. 0 is the computer.
#Jim
def turnFunction() :
turn = randint(0,1)
print("Turn =", turn)
return turn
## Computes the ball count.
# @ballCount is randomly generated between 10 and 100
# @return Returns the new calculated ball count
# Jim
def ballCountFunction() :
result = randint(10,100)
print("Ball count is ",result)
return result
## Computes a 0 or 1 to determine dumb or smart mode.
# @computerMode is a random 0 or 1
# @return the computer mode. 0 is dumb
# Jim
def computerMode() :
computerMode = randint(0,1)
print("Computer =", computerMode)
return computerMode
## Computes the ballcount while in dumb mode
#
# @return the ballcount while in dumb mode
# Bryan
def computerDumbMode(ballCount):
##Bryan
halfOfBalls = ballCount//2
dumbBallCount = randint(1,halfOfBalls)
result = ballCount - dumbBallCount
print("the computer takes away", dumbBallCount) #Prints the random value to be subtracted from the pile
return result
result = computerDumbMode(ballCount)
## Computes the ball count while in smart mode
#
# @return the ball count while in smart mode
# Bryan and some of Jim
def computerSmartMode(ballCount) :
if 10 <= ballCount <= 28:
return 7
elif 29 <= ballCount <= 46:
return 15
elif 47 <= ballCount <= 64:
return 31
elif 65 <= ballCount <= 100:
return 63
elif ballCount == 2 :
return 1
else :
return 3
## Computes the size of the pile of balls after the user makes a choice.
# @ballCount Takes the current pile size.
# @return Returns the new calculated ball count
# Bryan
def playerTurn (ballCount):
playerTurn = False
while not playerTurn:
print("The size of the ball pile is currently:", ballCount,"balls")
allowableChoice = ballCount//2
print("How many balls would you like to remove? Half of the pile size is: ", allowableChoice)
playerChoice = input("Player chooses to remove: ")
if playerChoice.isdigit() != True:
print("Your input was either negative or was not a number. Try again :(" )
else:
playerChoice = int(playerChoice)
if ballCount/2 < playerChoice or playerChoice == 0:
print("Dude. Seriously. Follow directions.")
else:
ballCount -= playerChoice
playerTurn = True
return ballCount
main()
'''
Test Case1
tests if the player input a non digit
input-
Player chooses to remove: two
Output-
Dude. Seriously. Follow directions.
-program loops until the player follow directions
Test Case2
tests if the player inputs more than half the ballcount
input
Player chooses to remove: 43
Output-
Dude. Seriously. Follow directions.
-program loops until the player follow directions
Test Case3
tests if the player inputs 0
input
Player chooses to remove: 0
Output
Dude. Seriously. Follow directions.
-program loops until the player follow directions
'''
|
[
"noreply@github.com"
] |
bmarsh5.noreply@github.com
|
2f174f15fe7cdac126c4585a1c60e935b18fe231
|
56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e
|
/CMGTools/H2TauTau/prod/TauES_test/nom/emb/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374658142/HTT_24Jul_newTES_manzoni_Nom_Jobs/Job_2/run_cfg.py
|
ab4eb96f5b69caf210b70ecddcd9ad84d4f6ac79
|
[] |
no_license
|
rmanzoni/HTT
|
18e6b583f04c0a6ca10142d9da3dd4c850cddabc
|
a03b227073b2d4d8a2abe95367c014694588bf98
|
refs/heads/master
| 2016-09-06T05:55:52.602604
| 2014-02-20T16:35:34
| 2014-02-20T16:35:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69,052
|
py
|
import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/TauES_test/nom/emb/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374658142/HTT_24Jul_newTES_manzoni_Nom_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
lumisToProcess = cms.untracked.VLuminosityBlockRange( ("190645:10-190645:110", "190646:1-190646:111", "190659:33-190659:167", "190679:1-190679:55", "190688:69-190688:249",
"190702:51-190702:53", "190702:55-190702:122", "190702:124-190702:169", "190703:1-190703:252", "190704:1-190704:3",
"190705:1-190705:5", "190705:7-190705:65", "190705:81-190705:336", "190705:338-190705:350", "190705:353-190705:383",
"190706:1-190706:126", "190707:1-190707:237", "190707:239-190707:257", "190708:1-190708:189", "190733:71-190733:96",
"190733:99-190733:389", "190733:392-190733:460", "190736:1-190736:80", "190736:83-190736:185", "190738:1-190738:130",
"190738:133-190738:226", "190738:229-190738:349", "190782:55-190782:181", "190782:184-190782:233", "190782:236-190782:399",
"190782:401-190782:409", "190895:64-190895:202", "190895:210-190895:302", "190895:305-190895:584", "190895:587-190895:948",
"190906:73-190906:256", "190906:259-190906:354", "190906:356-190906:496", "190945:124-190945:207", "190949:1-190949:81",
"191043:45-191043:46", "191046:1-191046:21", "191046:24-191046:82", "191046:84-191046:88", "191046:92-191046:116",
"191046:119-191046:180", "191046:183", "191046:185-191046:239", "191056:1", "191056:4-191056:9",
"191056:16-191056:17", "191056:19", "191057:1", "191057:4-191057:40", "191062:1",
"191062:3", "191062:5-191062:214", "191062:216-191062:541", "191090:1-191090:55", "191201:38-191201:49",
"191201:52-191201:79", "191202:1-191202:64", "191202:66-191202:68", "191202:87-191202:105", "191202:108-191202:118",
"191226:77-191226:78", "191226:81-191226:831", "191226:833-191226:1454", "191226:1456-191226:1466", "191226:1469-191226:1507",
"191226:1510-191226:1686", "191247:1-191247:153", "191247:156-191247:280", "191247:283-191247:606", "191247:608-191247:620",
"191247:622-191247:818", "191247:821-191247:834", "191247:837-191247:1031", "191247:1034-191247:1046", "191247:1049-191247:1140",
"191247:1143-191247:1187", "191247:1190-191247:1214", "191247:1217-191247:1224", "191248:1-191248:103", "191264:59-191264:79",
"191264:82-191264:152", "191264:155-191264:189", "191271:56-191271:223", "191271:225-191271:363", "191276:1-191276:16",
"191277:1-191277:28", "191277:30-191277:164", "191277:167-191277:253", "191277:255-191277:457", "191277:460-191277:535",
"191277:537-191277:576", "191277:579-191277:775", "191277:778-191277:811", "191277:813-191277:849", "191367:1-191367:2",
"191411:1-191411:23", "191695:1", "191718:43-191718:95", "191718:98-191718:207", "191720:1",
"191720:3-191720:15", "191720:17-191720:181", "191721:1", "191721:3-191721:34", "191721:36-191721:183",
"191721:186-191721:189", "191726:1-191726:13", "191810:15", "191810:22-191810:49", "191810:52-191810:92",
"191830:54-191830:242", "191830:245-191830:301", "191830:304-191830:393", "191833:1", "191833:3-191833:103",
"191834:1-191834:30", "191834:33-191834:74", "191834:77-191834:299", "191834:302-191834:352", "191837:1-191837:44",
"191837:47-191837:53", "191837:56-191837:65", "191856:1-191856:133", "191859:1-191859:28", "191859:31-191859:126",
"193093:1-193093:33", "193123:1-193123:27", "193124:1-193124:52", "193192:58-193192:86", "193193:1-193193:6",
"193193:8", "193193:11-193193:83", "193193:86-193193:120", "193193:122-193193:160", "193193:162-193193:274",
"193193:276-193193:495", "193193:497-193193:506", "193207:54-193207:182", "193334:29-193334:172", "193336:1-193336:264",
"193336:267-193336:492", "193336:495-193336:684", "193336:687-193336:729", "193336:732-193336:951", "193541:77-193541:101",
"193541:103-193541:413", "193541:416-193541:575", "193541:578-193541:619", "193556:41-193556:83", "193557:1-193557:84",
"193575:48-193575:173", "193575:176-193575:349", "193575:351-193575:394", "193575:397-193575:415", "193575:417-193575:658",
"193575:660-193575:752", "193621:60-193621:570", "193621:573-193621:769", "193621:772-193621:976", "193621:979-193621:1053",
"193621:1056-193621:1137", "193621:1139-193621:1193", "193621:1195-193621:1371", "193621:1373-193621:1654", "193834:1-193834:35",
"193835:1-193835:20", "193835:22-193835:26", "193836:1-193836:2", "193998:66-193998:113", "193998:115-193998:278",
"193999:1-193999:45", "194027:57-194027:113", "194050:53-194050:113", "194050:116-194050:273", "194050:275-194050:355",
"194050:357-194050:369", "194050:372-194050:391", "194050:394-194050:490", "194050:492-194050:814", "194050:816-194050:1435",
"194050:1437-194050:1735", "194050:1760-194050:1888", "194051:1-194051:12", "194052:1-194052:99", "194052:102-194052:166",
"194075:48-194075:101", "194075:103", "194075:105-194075:107", "194075:109", "194075:111",
"194076:1-194076:9", "194076:11-194076:55", "194076:58-194076:163", "194076:165-194076:228", "194076:230-194076:264",
"194076:267-194076:507", "194076:509-194076:527", "194076:530-194076:538", "194076:541-194076:562", "194076:565-194076:748",
"194108:81-194108:161", "194108:164-194108:264", "194108:266-194108:373", "194108:376-194108:396", "194108:398-194108:433",
"194108:436-194108:452", "194108:454-194108:577", "194108:579-194108:590", "194108:593-194108:668", "194108:671-194108:872",
"194115:66-194115:184", "194115:186-194115:338", "194115:340-194115:346", "194115:348-194115:493", "194115:496-194115:731",
"194115:819-194115:857", "194117:1-194117:38", "194119:1-194119:229", "194119:232-194119:261", "194120:1-194120:162",
"194120:165-194120:406", "194150:42-194150:127", "194150:129-194150:261", "194150:264-194150:311", "194151:47-194151:72",
"194151:75-194151:191", "194151:193-194151:238", "194151:240-194151:617", "194151:619", "194151:621",
"194151:623", "194153:1-194153:115", "194199:96-194199:227", "194199:229-194199:336", "194199:339-194199:402",
"194210:3-194210:195", "194210:198-194210:217", "194210:220-194210:359", "194210:361-194210:555", "194223:61-194223:112",
"194224:1-194224:126", "194224:129-194224:206", "194224:208-194224:250", "194224:253-194224:309", "194224:312-194224:386",
"194224:389-194224:412", "194225:1-194225:23", "194225:26-194225:47", "194225:49-194225:85", "194225:88-194225:149",
"194270:56-194270:68", "194303:56-194303:66", "194303:69-194303:102", "194304:1-194304:43", "194304:46",
"194305:1-194305:84", "194314:52-194314:130", "194314:133-194314:300", "194315:1-194315:10", "194315:13-194315:314",
"194315:317-194315:428", "194315:431-194315:452", "194315:455-194315:467", "194317:1-194317:20", "194424:63-194424:141",
"194424:144-194424:195", "194424:198-194424:266", "194424:268-194424:421", "194424:424-194424:478", "194424:481-194424:531",
"194424:534-194424:553", "194424:556-194424:706", "194424:708", "194428:1-194428:85", "194428:87-194428:122",
"194428:125-194428:294", "194428:296-194428:465", "194429:1-194429:4", "194429:7-194429:54", "194429:57-194429:147",
"194429:150-194429:411", "194429:413-194429:742", "194429:745-194429:986", "194429:988-194429:1019", "194439:46-194439:77",
"194439:79-194439:106", "194455:45-194455:64", "194455:67-194455:140", "194455:142-194455:255", "194455:293-194455:303",
"194464:1-194464:127", "194464:130-194464:142", "194464:145-194464:210", "194479:1-194479:44", "194479:165-194479:232",
"194479:235-194479:262", "194479:265-194479:374", "194479:377-194479:431", "194479:434-194479:489", "194479:492-194479:529",
"194479:531-194479:566", "194480:1-194480:32", "194480:34-194480:205", "194480:207-194480:375", "194480:377-194480:387",
"194480:389-194480:759", "194480:762-194480:956", "194480:959-194480:1402", "194533:46-194533:379", "194533:382-194533:415",
"194533:417-194533:618", "194533:620-194533:872", "194619:31-194619:110", "194631:1-194631:42", "194631:44-194631:100",
"194631:102-194631:169", "194631:171-194631:222", "194643:1-194643:287", "194644:1-194644:168", "194644:171-194644:181",
"194644:184-194644:185", "194644:187-194644:319", "194644:321-194644:421", "194691:61-194691:104", "194691:107-194691:155",
"194691:158-194691:251", "194691:254-194691:268", "194691:271-194691:272", "194691:275-194691:289", "194691:292-194691:313",
"194699:1-194699:30", "194699:32-194699:52", "194699:55-194699:64", "194699:67-194699:71", "194699:73-194699:154",
"194699:157-194699:215", "194699:218-194699:238", "194699:241-194699:259", "194702:1-194702:138", "194702:141-194702:191",
"194704:1-194704:41", "194704:44-194704:545", "194704:548-194704:592", "194711:1-194711:7", "194711:9-194711:619",
"194712:1-194712:56", "194712:61-194712:418", "194712:420-194712:625", "194712:627-194712:759", "194735:44-194735:71",
"194735:74-194735:101", "194735:104-194735:130", "194778:60-194778:118", "194778:120-194778:219", "194789:1-194789:18",
"194789:21-194789:32", "194789:34-194789:80", "194789:82-194789:166", "194789:168-194789:269", "194789:272-194789:405",
"194789:409-194789:414", "194789:417-194789:427", "194789:430-194789:566", "194790:1-194790:45", "194825:72-194825:117",
"194825:120-194825:221", "194896:34-194896:55", "194896:58-194896:79", "194896:82-194896:103", "194897:1-194897:6",
"194897:8-194897:78", "194897:80-194897:96", "194897:98-194897:102", "194912:53-194912:70", "194912:72-194912:96",
"194912:98-194912:444", "194912:446-194912:450", "194912:453-194912:467", "194912:470-194912:561", "194912:564-194912:660",
"194912:663-194912:813", "194912:815-194912:840", "194912:843-194912:864", "194912:866-194912:1004", "194912:1007-194912:1025",
"194912:1027-194912:1067", "194912:1069-194912:1137", "194912:1140-194912:1166", "194912:1168-194912:1249", "194912:1251-194912:1304",
"194912:1307-194912:1444", "194912:1447-194912:1487", "194912:1489-194912:1503", "194912:1506-194912:1662", "194914:1-194914:38",
"194915:1-194915:74", "195013:94-195013:144", "195013:146-195013:185", "195013:187-195013:206", "195013:208-195013:299",
"195013:302-195013:324", "195013:326-195013:366", "195013:369-195013:447", "195013:450-195013:526", "195013:528-195013:541",
"195014:1-195014:6", "195014:9-195014:119", "195014:121-195014:148", "195015:1-195015:13", "195016:1-195016:21",
"195016:23-195016:55", "195016:58-195016:63", "195016:65-195016:174", "195016:177-195016:184", "195016:186-195016:241",
"195016:243-195016:246", "195016:248-195016:251", "195016:254-195016:367", "195016:370-195016:422", "195016:425-195016:560",
"195016:563-195016:569", "195099:70-195099:144", "195099:147-195099:186", "195099:189-195099:208", "195099:211-195099:224",
"195099:227-195099:248", "195109:98-195109:241", "195112:1-195112:12", "195112:15-195112:26", "195113:1-195113:209",
"195113:212-195113:388", "195113:391-195113:403", "195113:406-195113:419", "195113:422-195113:492", "195113:495-195113:579",
"195114:1-195114:69", "195114:72-195114:103", "195115:1-195115:7", "195115:10-195115:22", "195147:132-195147:282",
"195147:285-195147:294", "195147:297-195147:331", "195147:334-195147:363", "195147:366-195147:442", "195147:445-195147:536",
"195147:539-195147:559", "195163:72-195163:138", "195163:140-195163:224", "195163:227-195163:240", "195163:243",
"195163:246-195163:347", "195164:1-195164:64", "195165:1-195165:4", "195165:7-195165:41", "195165:44-195165:54",
"195165:56-195165:153", "195165:156-195165:260", "195165:263-195165:266", "195251:1-195251:131", "195251:134-195251:137",
"195251:140-195251:152", "195251:154-195251:165", "195251:167-195251:242", "195303:109-195303:191", "195303:194-195303:277",
"195303:280-195303:310", "195303:312-195303:316", "195303:318-195303:409", "195304:1-195304:3", "195304:6-195304:22",
"195304:27-195304:80", "195304:83-195304:100", "195304:103-195304:154", "195304:157-195304:341", "195304:344-195304:588",
"195304:590-195304:727", "195304:729-195304:1003", "195304:1006-195304:1079", "195304:1083-195304:1140", "195304:1143-195304:1229",
"195378:90-195378:117", "195378:120-195378:127", "195378:130-195378:185", "195378:187-195378:204", "195378:206-195378:302",
"195378:305-195378:542", "195378:544-195378:565", "195378:567-195378:645", "195378:647-195378:701", "195378:703-195378:734",
"195378:737-195378:1120", "195378:1122-195378:1133", "195390:1", "195390:4-195390:27", "195390:30-195390:145",
"195390:147-195390:183", "195390:186-195390:187", "195390:190-195390:208", "195390:210-195390:213", "195390:215-195390:400",
"195396:49-195396:55", "195396:58-195396:63", "195396:66-195396:131", "195397:1-195397:10", "195397:12-195397:89",
"195397:92-195397:120", "195397:123-195397:141", "195397:143-195397:251", "195397:253", "195397:256-195397:475",
"195397:478-195397:525", "195397:527-195397:608", "195397:611-195397:776", "195397:779-195397:970", "195397:972-195397:1121",
"195397:1123-195397:1181", "195397:1184-195397:1198", "195397:1200-195397:1209", "195398:3-195398:137", "195398:139-195398:494",
"195398:497-195398:585", "195398:587-195398:817", "195398:820-195398:824", "195398:827-195398:1225", "195398:1228-195398:1307",
"195398:1309-195398:1712", "195398:1721-195398:1736", "195398:1741-195398:1752", "195398:1767-195398:1795", "195399:1-195399:192",
"195399:194-195399:382", "195530:1-195530:80", "195530:82-195530:104", "195530:107-195530:156", "195530:159-195530:300",
"195530:302-195530:405", "195540:68-195540:123", "195540:126-195540:137", "195540:140-195540:283", "195540:286-195540:319",
"195551:91-195551:106", "195552:1-195552:21", "195552:23-195552:27", "195552:30-195552:147", "195552:149-195552:155",
"195552:158-195552:182", "195552:185-195552:287", "195552:290-195552:349", "195552:352-195552:469", "195552:472-195552:815",
"195552:818-195552:823", "195552:825-195552:883", "195552:885-195552:1152", "195552:1154-195552:1300", "195552:1303-195552:1789",
"195633:40-195633:42", "195647:1-195647:41", "195649:1-195649:69", "195649:72-195649:151", "195649:154-195649:181",
"195649:183-195649:247", "195655:1-195655:129", "195655:131-195655:184", "195655:186-195655:260", "195655:263-195655:350",
"195655:353-195655:446", "195655:448-195655:483", "195655:485-195655:498", "195656:1-195656:362", "195658:1-195658:37",
"195658:40-195658:362", "195658:364-195658:382", "195658:384-195658:386", "195749:1-195749:8", "195749:10-195749:33",
"195749:36-195749:131", "195757:1-195757:82", "195757:85-195757:115", "195757:118-195757:161", "195757:163-195757:206",
"195758:1-195758:18", "195774:1-195774:13", "195774:16-195774:137", "195774:139-195774:151", "195774:154-195774:162",
"195774:164-195774:256", "195774:258-195774:276", "195774:279-195774:362", "195774:365-195774:466", "195774:469-195774:618",
"195774:620-195774:649", "195774:651-195774:830", "195775:1-195775:57", "195775:60-195775:100", "195775:103-195775:170",
"195776:1-195776:63", "195776:66-195776:283", "195776:286-195776:337", "195776:340-195776:399", "195776:401-195776:409",
"195776:411-195776:477", "195841:74-195841:85", "195868:1-195868:88", "195868:90-195868:107", "195868:110-195868:205",
"195915:1-195915:109", "195915:111-195915:275", "195915:278-195915:390", "195915:393-195915:417", "195915:419-195915:429",
"195915:432-195915:505", "195915:507-195915:747", "195915:749-195915:785", "195915:787-195915:828", "195915:830-195915:850",
"195916:1-195916:16", "195916:19-195916:68", "195916:71-195916:212", "195917:1-195917:4", "195918:1-195918:44",
"195918:46", "195918:49-195918:64", "195919:1-195919:15", "195923:1-195923:14", "195925:1-195925:12",
"195926:1", "195926:3-195926:19", "195926:21-195926:34", "195929:1-195929:29", "195930:1-195930:77",
"195930:80-195930:176", "195930:179-195930:526", "195930:529-195930:596", "195937:1-195937:28", "195937:31-195937:186",
"195937:188-195937:396", "195947:23-195947:62", "195947:64-195947:88", "195948:51-195948:116", "195948:119-195948:144",
"195948:147", "195948:150-195948:352", "195948:355-195948:369", "195948:372-195948:402", "195948:404-195948:500",
"195948:503-195948:540", "195948:543-195948:565", "195948:567-195948:602", "195948:605-195948:615", "195950:1-195950:71",
"195950:73-195950:138", "195950:141-195950:169", "195950:172-195950:332", "195950:335-195950:350", "195950:353-195950:382",
"195950:385-195950:421", "195950:424-195950:450", "195950:453-195950:483", "195950:485-195950:616", "195950:619-195950:715",
"195950:718-195950:787", "195950:789-195950:800", "195950:803-195950:829", "195950:831", "195950:833-195950:1587",
"195963:54-195963:58", "195970:44-195970:49", "195970:51-195970:85", "196019:54-196019:68", "196027:1-196027:55",
"196027:58-196027:119", "196027:121-196027:155", "196027:158-196027:186", "196046:12-196046:40", "196047:1-196047:64",
"196047:70-196047:75", "196048:1-196048:44", "196048:46-196048:48", "196197:58-196197:122", "196197:125-196197:179",
"196197:181-196197:311", "196197:313-196197:516", "196197:519-196197:562", "196199:1-196199:33", "196199:36-196199:83",
"196199:86-196199:118", "196199:121-196199:147", "196199:150-196199:237", "196199:239-196199:285", "196199:287-196199:534",
"196200:1-196200:68", "196202:3-196202:61", "196202:64-196202:108", "196203:1-196203:102", "196203:107-196203:117",
"196218:55-196218:199", "196218:201-196218:224", "196218:226-196218:393", "196218:396-196218:494", "196218:496-196218:741",
"196218:744-196218:752", "196218:754-196218:757", "196218:759-196218:820", "196239:1-196239:59", "196239:62-196239:154",
"196239:157-196239:272", "196239:274-196239:373", "196239:375-196239:432", "196239:435-196239:465", "196239:468-196239:647",
"196239:650-196239:706", "196239:709-196239:1025", "196249:63-196249:77", "196249:80-196249:99", "196250:1-196250:2",
"196250:5-196250:265", "196250:267-196250:426", "196252:1-196252:35", "196334:59-196334:111", "196334:113-196334:123",
"196334:126-196334:132", "196334:135-196334:167", "196334:170-196334:193", "196334:196-196334:257", "196334:259-196334:267",
"196334:270-196334:289", "196334:292-196334:342", "196349:65-196349:84", "196349:86-196349:154", "196349:157-196349:244",
"196349:246-196349:258", "196357:1-196357:4", "196359:1-196359:2", "196362:1-196362:88", "196363:1-196363:8",
"196363:11-196363:34", "196364:1-196364:93", "196364:96-196364:136", "196364:139-196364:365", "196364:368-196364:380",
"196364:382-196364:601", "196364:603-196364:795", "196364:798-196364:884", "196364:887-196364:1196", "196364:1199-196364:1200",
"196364:1203-196364:1299", "196437:1", "196437:3-196437:74", "196437:77-196437:169", "196438:1-196438:181",
"196438:184-196438:699", "196438:701-196438:1269", "196452:82-196452:112", "196452:114-196452:490", "196452:493-196452:586",
"196452:589-196452:618", "196452:622-196452:668", "196452:671-196452:716", "196452:718-196452:726", "196452:728-196452:956",
"196452:958-196452:1004", "196452:1007-196452:1091", "196453:1-196453:74", "196453:77-196453:145", "196453:147-196453:669",
"196453:673-196453:714", "196453:717-196453:799", "196453:802-196453:988", "196453:991-196453:1178", "196453:1180",
"196453:1182-196453:1248", "196453:1250-196453:1528", "196453:1531-196453:1647", "196495:114-196495:180", "196495:182-196495:272",
"196509:1-196509:68", "196531:62-196531:150", "196531:152-196531:253", "196531:256-196531:285", "196531:288-196531:302",
"196531:305-196531:422", "196531:425-196531:440", "198049:1-198049:11", "198049:14-198049:57", "198050:2-198050:155",
"198063:1-198063:37", "198063:40-198063:72", "198063:74-198063:124", "198063:127-198063:294", "198116:36-198116:52",
"198116:54-198116:55", "198116:58-198116:96", "198116:98-198116:112", "198207:1-198207:97", "198208:1-198208:92",
"198208:94-198208:134", "198208:137-198208:147", "198208:150-198208:209", "198210:1-198210:221", "198212:1-198212:574",
"198213:1-198213:107", "198215:1-198215:12", "198230:1-198230:33", "198230:36-198230:57", "198230:60-198230:235",
"198230:237-198230:324", "198230:326-198230:388", "198230:390-198230:459", "198230:462-198230:625", "198230:627-198230:651",
"198230:653-198230:805", "198230:808-198230:811", "198230:814-198230:948", "198230:950-198230:1090", "198230:1093-198230:1103",
"198230:1106-198230:1332", "198230:1335-198230:1380", "198249:1-198249:7", "198269:3-198269:198", "198271:1-198271:91",
"198271:93-198271:170", "198271:173-198271:299", "198271:301-198271:450", "198271:453-198271:513", "198271:516-198271:616",
"198271:619-198271:628", "198271:631-198271:791", "198271:793-198271:797", "198272:1-198272:185", "198272:188-198272:245",
"198272:248-198272:314", "198272:317-198272:433", "198272:436-198272:444", "198272:454-198272:620", "198346:44-198346:47",
"198372:57-198372:110", "198485:68-198485:109", "198485:112-198485:134", "198485:136-198485:181", "198485:184-198485:239",
"198487:1-198487:145", "198487:147-198487:514", "198487:517-198487:668", "198487:671-198487:733", "198487:736-198487:757",
"198487:760-198487:852", "198487:854-198487:994", "198487:997-198487:1434", "198487:1437-198487:1610", "198522:65-198522:144",
"198522:147-198522:208", "198941:102-198941:189", "198941:191-198941:220", "198941:222-198941:241", "198941:243-198941:249",
"198941:252-198941:284", "198954:108-198954:156", "198954:159-198954:277", "198955:1-198955:45", "198955:47-198955:50",
"198955:53-198955:220", "198955:223-198955:269", "198955:271-198955:284", "198955:286-198955:338", "198955:340-198955:580",
"198955:583-198955:742", "198955:744-198955:910", "198955:913-198955:946", "198955:949-198955:1162", "198955:1165-198955:1169",
"198955:1172-198955:1182", "198955:1185-198955:1188", "198955:1190-198955:1246", "198955:1249-198955:1304", "198955:1306-198955:1467",
"198955:1470-198955:1485", "198955:1487-198955:1552", "198969:58-198969:81", "198969:84-198969:247", "198969:249-198969:323",
"198969:325-198969:365", "198969:367-198969:413", "198969:416-198969:466", "198969:468-198969:643", "198969:646-198969:918",
"198969:920-198969:1011", "198969:1013-198969:1175", "198969:1178-198969:1236", "198969:1239-198969:1253", "199008:75-199008:93",
"199008:95-199008:121", "199008:124-199008:208", "199008:211-199008:331", "199008:333-199008:373", "199008:376-199008:482",
"199008:485-199008:605", "199008:608-199008:644", "199011:1-199011:11", "199011:13-199011:24", "199021:59-199021:88",
"199021:91-199021:128", "199021:130-199021:133", "199021:136-199021:309", "199021:311-199021:333", "199021:335-199021:410",
"199021:414-199021:469", "199021:471-199021:533", "199021:535-199021:563", "199021:565-199021:1223", "199021:1226-199021:1479",
"199021:1481-199021:1494", "199318:65-199318:138", "199319:1-199319:7", "199319:9-199319:223", "199319:226-199319:277",
"199319:280-199319:348", "199319:351-199319:358", "199319:360-199319:422", "199319:424-199319:490", "199319:492-199319:493",
"199319:496-199319:612", "199319:615-199319:642", "199319:645-199319:720", "199319:723-199319:728", "199319:730-199319:731",
"199319:734-199319:741", "199319:744-199319:752", "199319:754-199319:943", "199319:945-199319:997", "199336:1-199336:33",
"199336:36-199336:122", "199336:125-199336:231", "199336:234-199336:614", "199336:617-199336:789", "199336:791-199336:977",
"199356:95-199356:121", "199356:123-199356:168", "199356:171-199356:205", "199356:208-199356:231", "199409:25-199409:54",
"199409:56-199409:89", "199409:91-199409:204", "199409:206-199409:290", "199409:293-199409:583", "199409:586-199409:602",
"199409:604-199409:1014", "199409:1016-199409:1300", "199428:61-199428:197", "199428:200-199428:210", "199428:212-199428:382",
"199428:387-199428:414", "199428:417-199428:436", "199428:439-199428:530", "199428:533-199428:648", "199429:1-199429:28",
"199429:30-199429:36", "199429:39-199429:55", "199429:58-199429:101", "199429:103-199429:148", "199429:151-199429:154",
"199435:63-199435:106", "199435:109-199435:261", "199435:263-199435:579", "199435:582-199435:654", "199435:656-199435:696",
"199435:699-199435:1034", "199435:1037-199435:1144", "199435:1147-199435:1327", "199435:1330-199435:1411", "199435:1414-199435:1431",
"199435:1434-199435:1441", "199435:1444-199435:1487", "199435:1489-199435:1610", "199436:1-199436:113", "199436:116-199436:254",
"199436:257-199436:675", "199436:678-199436:748", "199564:1-199564:3", "199569:1-199569:2", "199569:5-199569:136",
"199569:139-199569:367", "199570:1-199570:17", "199571:1-199571:184", "199571:186-199571:360", "199571:363-199571:561",
"199572:1-199572:317", "199573:1-199573:22", "199574:1-199574:53", "199574:56-199574:153", "199574:156-199574:246",
"199608:60-199608:157", "199608:159-199608:209", "199608:211-199608:341", "199608:344-199608:390", "199608:392-199608:461",
"199608:464-199608:800", "199608:802-199608:1064", "199608:1067-199608:1392", "199608:1395-199608:1630", "199608:1633-199608:1904",
"199608:1907-199608:1962", "199608:1965-199608:2252", "199608:2255-199608:2422", "199698:72-199698:94", "199698:96-199698:127",
"199699:1-199699:154", "199699:157-199699:169", "199699:172-199699:410", "199699:412-199699:756", "199703:1-199703:94",
"199703:97-199703:482", "199703:485-199703:529", "199739:66-199739:133", "199751:103-199751:119", "199751:121-199751:127",
"199752:1-199752:141", "199752:144-199752:180", "199752:182-199752:186", "199752:188-199752:211", "199752:214-199752:322",
"199753:1-199753:59", "199754:1-199754:203", "199754:205-199754:325", "199754:328-199754:457", "199754:459-199754:607",
"199754:610-199754:613", "199754:615-199754:806", "199754:808-199754:998", "199804:78-199804:88", "199804:90-199804:181",
"199804:183-199804:235", "199804:238-199804:278", "199804:281-199804:290", "199804:292-199804:519", "199804:522-199804:575",
"199804:577-199804:628", "199804:631-199804:632", "199812:70-199812:141", "199812:144-199812:163", "199812:182-199812:211",
"199812:214-199812:471", "199812:474-199812:505", "199812:508-199812:557", "199812:560-199812:571", "199812:574-199812:623",
"199812:626-199812:751", "199812:754-199812:796", "199832:58-199832:62", "199832:65-199832:118", "199832:121-199832:139",
"199832:142-199832:286", "199833:1-199833:13", "199833:16-199833:103", "199833:105-199833:250", "199833:253-199833:493",
"199833:496-199833:794", "199833:797-199833:1032", "199833:1034-199833:1185", "199833:1188-199833:1239", "199834:1-199834:9",
"199834:11", "199834:14-199834:18", "199834:21-199834:54", "199834:56-199834:57", "199834:62-199834:65",
"199834:69-199834:284", "199834:286-199834:503", "199834:505-199834:942", "199862:59-199862:141", "199864:1-199864:87",
"199864:89", "199864:92-199864:103", "199864:106-199864:372", "199864:374-199864:385", "199864:388-199864:486",
"199867:1-199867:134", "199867:136-199867:172", "199867:174-199867:218", "199867:221-199867:320", "199868:1-199868:21",
"199875:70-199875:150", "199875:152-199875:334", "199876:1-199876:19", "199876:22-199876:95", "199876:97-199876:249",
"199876:252-199876:272", "199876:274-199876:340", "199876:343-199876:362", "199876:365-199876:376", "199877:1-199877:173",
"199877:175-199877:605", "199877:607-199877:701", "199877:703-199877:871", "199960:72-199960:139", "199960:141-199960:197",
"199960:204-199960:232", "199960:235-199960:363", "199960:365-199960:367", "199960:370-199960:380", "199960:383-199960:459",
"199960:461-199960:466", "199960:469-199960:485", "199961:1-199961:211", "199961:213-199961:287", "199967:60-199967:120",
"199967:122-199967:170", "199967:172-199967:198", "199973:73-199973:89", "200041:62-200041:83", "200041:85-200041:157",
"200041:162-200041:274", "200041:277-200041:318", "200041:321-200041:335", "200041:337-200041:386", "200041:388-200041:389",
"200041:392-200041:400", "200041:402-200041:568", "200041:571-200041:593", "200041:595-200041:646", "200041:649-200041:728",
"200041:731-200041:860", "200041:862-200041:930", "200041:932-200041:1096", "200042:1-200042:110", "200042:112-200042:536",
"200049:1-200049:177", "200075:76-200075:139", "200075:142-200075:232", "200075:256-200075:326", "200075:329-200075:422",
"200075:425-200075:431", "200075:434-200075:500", "200075:502-200075:605", "200091:67", "200091:70-200091:151",
"200091:154-200091:172", "200091:174-200091:187", "200091:190-200091:196", "200091:199-200091:201", "200091:204-200091:425",
"200091:428-200091:535", "200091:537-200091:607", "200091:610-200091:879", "200091:881-200091:943", "200091:946-200091:999",
"200091:1001-200091:1025", "200091:1027-200091:1132", "200091:1135-200091:1339", "200091:1341-200091:1433", "200091:1435-200091:1450",
"200091:1453-200091:1523", "200091:1526-200091:1664", "200091:1667-200091:1680", "200091:1683-200091:1710", "200152:74-200152:116",
"200160:52-200160:68", "200161:1-200161:97", "200161:100-200161:112", "200174:81-200174:84", "200177:1-200177:56",
"200178:1-200178:38", "200180:1-200180:18", "200186:1-200186:3", "200186:6-200186:24", "200188:1-200188:24",
"200188:27-200188:28", "200188:31-200188:76", "200188:79-200188:271", "200188:274-200188:352", "200190:1-200190:4",
"200190:6-200190:76", "200190:79-200190:143", "200190:146-200190:159", "200190:162-200190:256", "200190:258-200190:321",
"200190:324-200190:401", "200190:403-200190:453", "200190:456-200190:457", "200190:460-200190:565", "200190:567-200190:588",
"200190:591", "200190:593-200190:595", "200190:597-200190:646", "200190:649-200190:878", "200229:1-200229:33",
"200229:41-200229:219", "200229:222-200229:244", "200229:247-200229:290", "200229:293-200229:624", "200229:627-200229:629",
"200243:69-200243:103", "200243:106-200243:139", "200244:3-200244:304", "200244:307-200244:442", "200244:445-200244:507",
"200244:510-200244:619", "200245:1-200245:103", "200245:105-200245:128", "200245:131-200245:248", "200245:251-200245:357",
"200368:72-200368:180", "200369:1-200369:5", "200369:8-200369:61", "200369:64-200369:360", "200369:363-200369:439",
"200369:441-200369:578", "200369:580-200369:603", "200369:606-200369:684", "200369:686", "200381:8-200381:15",
"200381:18-200381:36", "200381:38-200381:89", "200381:91-200381:195", "200466:134-200466:274", "200473:96-200473:157",
"200473:159-200473:224", "200473:226-200473:304", "200473:306-200473:469", "200473:472-200473:524", "200473:527-200473:542",
"200473:545-200473:619", "200473:622-200473:688", "200473:691-200473:730", "200473:733-200473:738", "200473:740-200473:1324",
"200491:87-200491:107", "200491:110-200491:149", "200491:152-200491:157", "200491:160-200491:197", "200491:199-200491:237",
"200491:240-200491:270", "200491:273", "200491:276-200491:334", "200491:336-200491:360", "200491:363-200491:419",
"200515:97-200515:183", "200519:1-200519:111", "200519:114-200519:126", "200519:129-200519:136", "200519:138-200519:224",
"200519:227-200519:258", "200519:261-200519:350", "200519:353-200519:611", "200519:613-200519:747", "200525:77-200525:149",
"200525:151-200525:164", "200525:166-200525:190", "200525:193-200525:276", "200525:278-200525:311", "200525:314-200525:464",
"200525:467-200525:488", "200525:491-200525:674", "200525:676-200525:704", "200525:707-200525:755", "200525:757-200525:895",
"200525:898-200525:937", "200525:939-200525:990", "200532:1-200532:37", "200599:75-200599:129", "200599:132-200599:137",
"200600:1-200600:183", "200600:186-200600:299", "200600:302-200600:313", "200600:316-200600:324", "200600:327-200600:334",
"200600:336-200600:397", "200600:399-200600:417", "200600:420-200600:526", "200600:529-200600:591", "200600:594-200600:596",
"200600:598-200600:609", "200600:611-200600:660", "200600:663-200600:823", "200600:826-200600:900", "200600:902-200600:943",
"200600:945-200600:1139", "200961:1-200961:115", "200976:94-200976:164", "200990:75-200990:143", "200991:1-200991:42",
"200991:44", "200991:47-200991:80", "200991:83-200991:175", "200991:178-200991:181", "200991:184-200991:252",
"200991:255-200991:632", "200991:635-200991:916", "200991:918-200991:1017", "200991:1019-200991:1048", "200992:1-200992:405",
"200992:408-200992:434", "200992:436-200992:581", "201062:78-201062:268", "201097:83-201097:136", "201097:138-201097:245",
"201097:248-201097:300", "201097:303-201097:370", "201097:372-201097:429", "201097:432-201097:497", "201114:1-201114:14",
"201115:1-201115:73", "201159:70-201159:211", "201164:1-201164:8", "201164:10-201164:94", "201164:96-201164:125",
"201164:128-201164:178", "201164:180-201164:198", "201164:200-201164:271", "201164:274-201164:416", "201164:418",
"201168:1-201168:37", "201168:39-201168:275", "201168:278-201168:481", "201168:483-201168:558", "201168:560-201168:730",
"201173:1-201173:194", "201173:197-201173:586", "201174:1-201174:214", "201174:216-201174:263", "201174:265-201174:339",
"201174:342-201174:451", "201191:75-201191:98", "201191:100-201191:216", "201191:218-201191:389", "201191:392-201191:492",
"201191:494-201191:506", "201191:509-201191:585", "201191:587-201191:594", "201191:597-201191:607", "201191:609-201191:794",
"201191:796-201191:838", "201191:841-201191:974", "201191:977-201191:1105", "201191:1108-201191:1117", "201191:1120-201191:1382",
"201191:1385-201191:1386", "201193:1-201193:19", "201196:1-201196:238", "201196:241-201196:278", "201196:286-201196:299",
"201196:302-201196:338", "201196:341-201196:515", "201196:518-201196:720", "201196:723-201196:789", "201196:803-201196:841",
"201197:1-201197:23", "201202:1-201202:437", "201229:1-201229:5", "201229:8-201229:26", "201229:29-201229:73",
"201278:62-201278:163", "201278:166-201278:229", "201278:232-201278:256", "201278:259-201278:316", "201278:318-201278:595",
"201278:598-201278:938", "201278:942-201278:974", "201278:976-201278:1160", "201278:1163-201278:1304", "201278:1306-201278:1793",
"201278:1796-201278:1802", "201278:1805-201278:1906", "201278:1909-201278:1929", "201278:1932-201278:2174", "201554:70-201554:86",
"201554:88-201554:114", "201554:116-201554:126", "201602:76-201602:81", "201602:83-201602:194", "201602:196-201602:494",
"201602:496-201602:614", "201602:617-201602:635", "201611:87-201611:145", "201611:149-201611:182", "201611:184-201611:186",
"201613:1-201613:42", "201613:44-201613:49", "201613:53-201613:210", "201613:213-201613:215", "201613:218-201613:225",
"201613:228-201613:646", "201624:83-201624:92", "201624:95-201624:240", "201624:270", "201625:211-201625:312",
"201625:315-201625:348", "201625:351-201625:416", "201625:418-201625:588", "201625:591-201625:671", "201625:673-201625:758",
"201625:760-201625:791", "201625:793-201625:944", "201657:77-201657:93", "201657:95-201657:108", "201657:110-201657:118",
"201658:1-201658:19", "201658:21-201658:118", "201658:121-201658:136", "201658:139-201658:288", "201668:78-201668:157",
"201669:1-201669:9", "201669:12-201669:136", "201669:139-201669:141", "201669:143-201669:165", "201671:1-201671:120",
"201671:122-201671:174", "201671:177-201671:462", "201671:464-201671:482", "201671:485-201671:499", "201671:501-201671:545",
"201671:547-201671:571", "201671:574-201671:614", "201671:617-201671:766", "201671:768-201671:896", "201671:899-201671:911",
"201671:914-201671:1007", "201678:1-201678:120", "201679:1-201679:110", "201679:112-201679:241", "201679:244-201679:298",
"201679:302-201679:321", "201679:324-201679:461", "201679:463-201679:483", "201692:78-201692:81", "201692:83-201692:179",
"201705:65-201705:73", "201705:75-201705:109", "201705:111-201705:187", "201706:1-201706:62", "201707:1-201707:23",
"201707:26-201707:42", "201707:45-201707:115", "201707:118-201707:130", "201707:133-201707:160", "201707:163-201707:276",
"201707:279-201707:471", "201707:473-201707:511", "201707:514-201707:545", "201707:547-201707:570", "201707:572-201707:622",
"201707:625-201707:735", "201707:738-201707:806", "201707:809-201707:876", "201707:879-201707:964", "201708:1-201708:79",
"201718:58-201718:108", "201727:67-201727:185", "201729:6-201729:20", "201729:22-201729:75", "201729:77-201729:126",
"201729:129-201729:154", "201729:156-201729:216", "201729:219-201729:244", "201794:58-201794:94", "201802:68-201802:209",
"201802:211-201802:214", "201802:216-201802:220", "201802:223-201802:288", "201802:290-201802:296", "201816:1-201816:72",
"201816:74-201816:105", "201816:107-201816:157", "201817:1-201817:274", "201818:1", "201819:1-201819:94",
"201819:96-201819:241", "201824:1-201824:139", "201824:141-201824:176", "201824:179-201824:286", "201824:289-201824:492",
"202012:98-202012:121", "202012:126-202012:131", "202013:1-202013:2", "202013:5-202013:35", "202013:38-202013:57",
"202014:1-202014:5", "202014:8-202014:14", "202014:16-202014:18", "202014:20-202014:77", "202014:79-202014:102",
"202014:104-202014:174", "202014:177-202014:190", "202014:192-202014:196", "202016:1-202016:48", "202016:51-202016:134",
"202016:137-202016:177", "202016:179-202016:743", "202016:745-202016:831", "202016:834-202016:890", "202016:893-202016:896",
"202016:898-202016:932", "202016:934-202016:1010", "202044:84-202044:101", "202044:104-202044:266", "202044:268-202044:461",
"202044:463-202044:466", "202045:1-202045:30", "202045:33-202045:72", "202045:75-202045:528", "202045:531-202045:601",
"202045:603-202045:785", "202045:788-202045:809", "202045:822-202045:823", "202054:6-202054:266", "202054:268-202054:489",
"202054:492-202054:605", "202054:608-202054:631", "202060:76-202060:142", "202060:144-202060:154", "202060:156-202060:244",
"202060:246-202060:497", "202060:499-202060:642", "202060:644-202060:682", "202060:684-202060:743", "202060:746-202060:936",
"202074:66-202074:174", "202075:1-202075:18", "202075:21-202075:187", "202075:189-202075:214", "202075:217-202075:247",
"202075:250-202075:342", "202075:345-202075:406", "202075:409-202075:497", "202075:500-202075:537", "202075:539",
"202075:542-202075:560", "202075:562-202075:615", "202075:618-202075:628", "202084:83-202084:156", "202084:159-202084:177",
"202084:179-202084:180", "202084:182-202084:239", "202087:1-202087:25", "202087:28-202087:208", "202087:210-202087:357",
"202087:359-202087:652", "202087:655-202087:853", "202087:856-202087:1093", "202088:1-202088:286", "202093:1-202093:104",
"202093:107-202093:320", "202093:322-202093:360", "202116:59-202116:60", "202178:67-202178:78", "202178:80-202178:88",
"202178:91-202178:177", "202178:180-202178:186", "202178:188-202178:337", "202178:340-202178:377", "202178:379-202178:425",
"202178:428-202178:475", "202178:478-202178:548", "202178:551-202178:717", "202178:720-202178:965", "202178:967-202178:1444",
"202178:1447-202178:1505", "202178:1508-202178:1519", "202178:1522-202178:1555", "202205:94-202205:114", "202209:1-202209:48",
"202209:51-202209:142", "202237:39-202237:128", "202237:131", "202237:134-202237:219", "202237:222-202237:235",
"202237:238-202237:275", "202237:277-202237:289", "202237:291-202237:316", "202237:319-202237:419", "202237:422-202237:538",
"202237:540-202237:936", "202237:939-202237:950", "202237:952-202237:976", "202237:979-202237:1079", "202272:76-202272:112",
"202272:115-202272:141", "202272:144-202272:185", "202272:188-202272:205", "202272:208-202272:305", "202272:307-202272:313",
"202272:315-202272:371", "202272:436-202272:480", "202272:483-202272:555", "202272:558-202272:577", "202272:579-202272:683",
"202272:686-202272:705", "202272:707-202272:740", "202272:742-202272:890", "202272:937-202272:1295", "202272:1299-202272:1481",
"202299:68-202299:84", "202299:87-202299:141", "202299:143-202299:193", "202299:196-202299:358", "202299:361-202299:379",
"202299:382-202299:414", "202299:416-202299:452", "202299:455-202299:555", "202305:1-202305:89", "202305:92-202305:130",
"202305:133-202305:323", "202314:67-202314:104", "202314:107-202314:265", "202314:268-202314:278", "202328:46-202328:89",
"202328:92-202328:156", "202328:158-202328:276", "202328:278-202328:291", "202328:294-202328:434", "202328:437-202328:460",
"202328:463-202328:586", "202328:588-202328:610", "202328:612-202328:614", "202333:1-202333:235", "202389:81-202389:182",
"202389:185-202389:190", "202389:192-202389:199", "202469:87-202469:158", "202469:160-202469:174", "202469:177-202469:352",
"202472:1-202472:96", "202472:99-202472:112", "202477:1-202477:129", "202477:131-202477:150", "202478:1-202478:177",
"202478:180-202478:183", "202478:186-202478:219", "202478:222-202478:360", "202478:362-202478:506", "202478:509-202478:531",
"202478:534-202478:718", "202478:720-202478:927", "202478:929-202478:973", "202478:975-202478:1029", "202478:1031-202478:1186",
"202478:1189-202478:1212", "202478:1215-202478:1248", "202504:77-202504:96", "202504:99-202504:133", "202504:135-202504:182",
"202504:184-202504:211", "202504:213-202504:241", "202504:243-202504:392", "202504:395-202504:527", "202504:529-202504:617",
"202504:620-202504:715", "202504:718-202504:763", "202504:766-202504:1172", "202504:1174-202504:1247", "202504:1250-202504:1471",
"202504:1474-202504:1679", "202504:1682-202504:1704", "202972:1-202972:30", "202972:33-202972:184", "202972:186-202972:290",
"202972:292-202972:295", "202972:298-202972:371", "202972:374-202972:429", "202972:431-202972:544", "202973:1-202973:234",
"202973:237-202973:305", "202973:308-202973:437", "202973:439-202973:530", "202973:532-202973:541", "202973:544-202973:552",
"202973:555-202973:851", "202973:853-202973:1408", "203002:77-203002:128", "203002:130-203002:141", "203002:144-203002:207",
"203002:209-203002:267", "203002:270-203002:360", "203002:362-203002:501", "203002:504-203002:641", "203002:643-203002:669",
"203002:671", "203002:674-203002:717", "203002:720-203002:1034", "203002:1037-203002:1070", "203002:1073-203002:1370",
"203002:1372-203002:1392", "203002:1395-203002:1410", "203002:1413-203002:1596", "203709:1-203709:121", "203742:1-203742:29",
"203777:103-203777:113", "203830:82-203830:182", "203832:1-203832:11", "203833:1-203833:70", "203833:73-203833:128",
"203834:1-203834:40", "203835:1-203835:70", "203835:73-203835:358", "203853:122-203853:222", "203894:82-203894:272",
"203894:275-203894:477", "203894:480-203894:902", "203894:905-203894:1319", "203909:79-203909:113", "203909:116-203909:117",
"203909:120-203909:140", "203909:143-203909:382", "203912:1-203912:306", "203912:308-203912:566", "203912:569-203912:609",
"203912:611-203912:698", "203912:701-203912:820", "203912:823-203912:865", "203912:867-203912:1033", "203912:1035-203912:1321",
"203987:1-203987:9", "203987:12-203987:241", "203987:243-203987:339", "203987:342-203987:781", "203987:784-203987:1014",
"203992:1-203992:15", "203994:1-203994:56", "203994:59-203994:136", "203994:139-203994:304", "203994:306-203994:342",
"203994:344-203994:425", "204100:117-204100:139", "204101:1-204101:74", "204113:82-204113:96", "204113:98-204113:102",
"204113:105-204113:127", "204113:129-204113:191", "204113:194-204113:258", "204113:261-204113:327", "204113:329-204113:388",
"204113:390-204113:400", "204113:402-204113:583", "204113:585-204113:690", "204114:1-204114:358", "204238:23-204238:52",
"204238:55", "204250:92-204250:118", "204250:121-204250:177", "204250:179-204250:285", "204250:287-204250:336",
"204250:339-204250:400", "204250:403-204250:521", "204250:524-204250:543", "204250:546-204250:682", "204250:684-204250:801",
"204511:1-204511:56", "204541:5-204541:39", "204541:42", "204541:44-204541:139", "204541:142-204541:149",
"204541:151-204541:204", "204544:1-204544:11", "204544:13-204544:93", "204544:96-204544:195", "204544:197-204544:224",
"204544:226-204544:334", "204544:337-204544:426", "204552:1-204552:9", "204553:1-204553:51", "204553:53-204553:60",
"204553:63-204553:101", "204554:1-204554:5", "204554:7-204554:221", "204554:224-204554:455", "204554:458-204554:470",
"204554:472-204554:481", "204554:483-204554:514", "204555:1-204555:329", "204555:331-204555:334", "204563:91-204563:99",
"204563:102-204563:178", "204563:180-204563:219", "204563:222-204563:229", "204563:231-204563:364", "204563:366",
"204563:369-204563:470", "204563:473-204563:524", "204563:527-204563:571", "204564:1-204564:84", "204564:87-204564:89",
"204564:92-204564:159", "204564:161-204564:187", "204564:190-204564:191", "204564:193-204564:293", "204564:296-204564:315",
"204564:317-204564:340", "204564:343-204564:427", "204564:429-204564:434", "204564:437-204564:735", "204564:737-204564:855",
"204564:858-204564:1206", "204564:1209-204564:1248", "204564:1251-204564:1284", "204565:1-204565:48", "204566:1-204566:12",
"204567:1-204567:38", "204576:49-204576:192", "204576:195-204576:301", "204577:1-204577:46", "204577:49-204577:64",
"204577:67-204577:105", "204577:107-204577:170", "204577:173-204577:181", "204577:183-204577:193", "204577:196-204577:653",
"204577:656-204577:669", "204577:671-204577:740", "204577:742-204577:913", "204577:915-204577:1057", "204577:1059-204577:1115",
"204577:1117-204577:1282", "204599:73-204599:83", "204599:85-204599:94", "204599:97-204599:121", "204599:124-204599:125",
"204599:128-204599:173", "204599:175-204599:240", "204599:243-204599:245", "204599:248-204599:264", "204599:266-204599:292",
"204599:294-204599:334", "204601:1-204601:25", "204601:28-204601:62", "204601:65-204601:80", "204601:83-204601:89",
"204601:92-204601:290", "204601:292-204601:563", "204601:565-204601:591", "204601:593-204601:652", "204601:655-204601:780",
"204601:783-204601:812", "204601:814-204601:892", "204601:894-204601:984", "204601:986-204601:1003", "204601:1006-204601:1038",
"204601:1040-204601:1088", "204601:1091-204601:1102", "204601:1105-204601:1161", "204601:1164-204601:1250", "205086:95-205086:149",
"205111:88-205111:390", "205111:392-205111:441", "205111:444-205111:446", "205158:81-205158:289", "205158:292-205158:313",
"205158:315-205158:473", "205158:476-205158:591", "205158:594-205158:595", "205158:597-205158:612", "205158:615-205158:663",
"205158:665-205158:667", "205158:672-205158:685", "205158:687-205158:733", "205193:80-205193:109", "205193:111-205193:349",
"205193:352-205193:486", "205193:488-205193:650", "205193:652-205193:712", "205193:714-205193:902", "205217:1-205217:12",
"205217:16-205217:111", "205217:113-205217:171", "205217:174-205217:250", "205217:253-205217:318", "205233:94-205233:153",
"205236:1-205236:190", "205236:193-205236:207", "205236:209-205236:260", "205236:263-205236:331", "205236:334-205236:352",
"205238:1-205238:6", "205238:9-205238:199", "205238:202-205238:254", "205238:256-205238:304", "205238:306-205238:355",
"205238:358-205238:381", "205238:384-205238:596", "205238:598-205238:617", "205303:35-205303:54", "205303:90-205303:132",
"205303:135-205303:144", "205310:76-205310:306", "205310:309-205310:313", "205310:316", "205310:319-205310:321",
"205310:324-205310:457", "205310:460-205310:559", "205311:1-205311:85", "205311:88-205311:92", "205311:95-205311:183",
"205311:186-205311:395", "205311:397-205311:592", "205311:595-205311:910", "205311:913-205311:1260", "205339:71-205339:175",
"205339:178-205339:213", "205339:216-205339:230", "205339:233-205339:262", "205339:265-205339:404", "205344:1-205344:83",
"205344:86-205344:104", "205344:106-205344:359", "205344:362-205344:431", "205344:433-205344:949", "205344:951-205344:967",
"205344:969-205344:1127", "205344:1129-205344:1346", "205344:1348-205344:1586", "205515:82-205515:201", "205515:203-205515:216",
"205519:1-205519:47", "205519:50-205519:172", "205519:175-205519:367", "205519:370-205519:386", "205519:389-205519:472",
"205526:1-205526:269", "205526:272-205526:277", "205526:280-205526:332", "205614:1-205614:4", "205614:7-205614:40",
"205617:1-205617:29", "205617:32-205617:102", "205617:105-205617:123", "205617:125-205617:140", "205617:143-205617:264",
"205617:266-205617:448", "205617:451-205617:532", "205617:534-205617:547", "205618:1-205618:12", "205620:1-205620:175",
"205666:60-205666:119", "205666:122-205666:165", "205666:168-205666:259", "205666:261-205666:322", "205666:325-205666:578",
"205666:580-205666:594", "205666:597-205666:721", "205666:724-205666:739", "205667:1-205667:165", "205667:168-205667:282",
"205667:285-205667:318", "205667:321-205667:412", "205667:415-205667:689", "205667:692-205667:751", "205667:754-205667:774",
"205667:777-205667:1109", "205683:76-205683:82", "205683:85-205683:178", "205683:181-205683:198", "205683:201-205683:305",
"205690:1-205690:40", "205694:1-205694:205", "205694:208-205694:230", "205694:233-205694:347", "205694:350-205694:452",
"205694:455-205694:593", "205694:595-205694:890", "205718:49-205718:75", "205718:78-205718:97", "205718:100-205718:103",
"205718:105-205718:176", "205718:178-205718:338", "205718:341-205718:361", "205718:363-205718:524", "205718:527-205718:531",
"205718:534-205718:589", "205718:591-205718:694", "205774:1-205774:80", "205777:1-205777:8", "205781:1-205781:89",
"205781:91-205781:197", "205781:200-205781:502", "205826:80-205826:232", "205826:235-205826:303", "205826:306-205826:468",
"205833:84-205833:86", "205833:89-205833:121", "205833:123-205833:155", "205833:157-205833:165", "205833:167-205833:173",
"205833:176-205833:219", "205833:221-205833:267", "205833:270-205833:312", "205833:315-205833:346", "205833:350-205833:355",
"205833:360-205833:366", "205834:1-205834:12", "205834:14-205834:195", "205908:68-205908:200", "205908:202-205908:209",
"205921:22-205921:73", "205921:76-205921:268", "205921:271-205921:394", "205921:397-205921:401", "205921:410-205921:428",
"205921:431-205921:498", "205921:500-205921:571", "205921:574-205921:779", "205921:782-205921:853", "206066:89-206066:146",
"206088:86-206088:159", "206088:161-206088:178", "206088:181-206088:199", "206088:202-206088:286", "206102:83-206102:116",
"206102:120-206102:130", "206102:133-206102:208", "206102:211-206102:235", "206102:238-206102:246", "206102:249-206102:278",
"206102:281-206102:349", "206187:107-206187:169", "206187:172-206187:242", "206187:245-206187:288", "206187:290-206187:340",
"206187:343-206187:427", "206187:429-206187:435", "206187:437-206187:486", "206187:489-206187:569", "206187:571-206187:647",
"206187:649-206187:662", "206187:664-206187:708", "206188:1-206188:40", "206188:42-206188:55", "206199:1-206199:75",
"206199:77-206199:82", "206199:85-206199:114", "206207:82-206207:130", "206207:132-206207:176", "206207:179-206207:194",
"206207:196-206207:388", "206207:390-206207:419", "206207:422-206207:447", "206207:450-206207:569", "206207:572-206207:690",
"206208:1-206208:470", "206208:472-206208:518", "206210:11-206210:25", "206210:28-206210:275", "206210:277-206210:298",
"206210:300-206210:383", "206210:386-206210:466", "206243:62-206243:169", "206243:172-206243:196", "206243:199-206243:354",
"206243:357-206243:433", "206243:435-206243:448", "206243:451-206243:533", "206243:536-206243:554", "206243:557-206243:723",
"206243:726-206243:905", "206245:1-206245:62", "206246:1-206246:14", "206246:16-206246:237", "206246:240-206246:285",
"206246:288-206246:407", "206246:412-206246:676", "206246:678-206246:704", "206246:706-206246:785", "206246:787-206246:962",
"206246:965-206246:997", "206246:1000-206246:1198", "206246:1201-206246:1290", "206257:1-206257:29", "206258:1-206258:36",
"206258:39-206258:223", "206258:226-206258:249", "206302:1-206302:8", "206302:11-206302:33", "206302:36-206302:44",
"206302:47-206302:82", "206302:84-206302:108", "206302:110-206302:149", "206302:151-206302:186", "206302:189-206302:229",
"206302:231-206302:232", "206302:234-206302:241", "206302:243-206302:276", "206303:1-206303:19", "206303:23-206303:286",
"206304:1-206304:4", "206304:6-206304:62", "206331:91-206331:222", "206331:225-206331:312", "206389:88-206389:185",
"206389:187-206389:249", "206389:252-206389:272", "206389:275-206389:392", "206391:1-206391:55", "206391:57-206391:91",
"206401:69-206401:90", "206401:92-206401:194", "206401:197-206401:210", "206401:212-206401:249", "206401:251-206401:265",
"206401:267-206401:409", "206446:92-206446:141", "206446:143-206446:159", "206446:162-206446:205", "206446:208-206446:301",
"206446:304-206446:442", "206446:445", "206446:448-206446:474", "206446:476-206446:616", "206446:619-206446:872",
"206446:874-206446:910", "206446:912-206446:948", "206446:950-206446:989", "206446:992-206446:1030", "206446:1033-206446:1075",
"206446:1109-206446:1149", "206448:1-206448:143", "206448:145-206448:559", "206448:561-206448:1170", "206448:1173-206448:1231",
"206448:1235-206448:1237", "206466:24-206466:137", "206466:140-206466:277", "206466:280-206466:296", "206466:299-206466:303",
"206466:306-206466:405", "206466:407-206466:419", "206466:422-206466:477", "206466:480-206466:511", "206466:514-206466:676",
"206476:73-206476:129", "206476:133-206476:137", "206476:140-206476:141", "206476:143-206476:219", "206477:1-206477:14",
"206477:16-206477:31", "206477:33-206477:41", "206477:44-206477:51", "206477:53-206477:70", "206477:73-206477:75",
"206477:77-206477:89", "206477:91-206477:94", "206477:97-206477:115", "206477:118-206477:184", "206478:1-206478:27",
"206478:29-206478:136", "206478:139-206478:144", "206484:73-206484:95", "206484:98-206484:133", "206484:136-206484:163",
"206484:166-206484:186", "206484:189-206484:384", "206484:387-206484:463", "206484:465-206484:551", "206484:554",
"206484:556-206484:669", "206512:91-206512:123", "206512:125-206512:133", "206512:136-206512:161", "206512:163-206512:190",
"206512:193-206512:201", "206512:203-206512:212", "206512:214-206512:332", "206512:334-206512:584", "206512:587-206512:604",
"206512:607-206512:1005", "206512:1008-206512:1123", "206512:1126-206512:1163", "206512:1165-206512:1211", "206513:3-206513:39",
"206513:42-206513:188", "206513:191-206513:234", "206513:237-206513:238", "206513:241-206513:323", "206542:1-206542:115",
"206542:117-206542:165", "206542:168-206542:511", "206542:514-206542:547", "206542:550-206542:603", "206542:606-206542:668",
"206542:671-206542:727", "206542:730-206542:739", "206542:741-206542:833", "206550:77-206550:132", "206550:135-206550:144",
"206572:37-206572:47", "206573:2-206573:14", "206574:1-206574:87", "206575:1-206575:7", "206575:10",
"206575:12-206575:69", "206594:72-206594:107", "206594:110-206594:246", "206594:249-206594:281", "206595:1-206595:34",
"206595:37-206595:42", "206595:45-206595:193", "206596:1-206596:13", "206596:15-206596:220", "206596:222-206596:228",
"206596:231-206596:236", "206596:239-206596:292", "206596:295-206596:695", "206596:697-206596:728", "206596:730-206596:810",
"206598:1-206598:81", "206598:83-206598:103", "206598:105-206598:588", "206598:591-206598:657", "206598:659-206598:719",
"206605:1-206605:36", "206605:39-206605:78", "206744:49-206744:157", "206744:160-206744:192", "206744:195-206744:395",
"206744:398-206744:452", "206745:1-206745:81", "206745:84-206745:199", "206745:202-206745:224", "206745:227-206745:237",
"206745:240-206745:304", "206745:306-206745:318", "206745:321-206745:720", "206745:723-206745:796", "206745:799-206745:894",
"206745:897-206745:944", "206745:946-206745:1106", "206745:1108-206745:1524", "206745:1527-206745:1862", "206745:1988-206745:1996",
"206859:79-206859:210", "206859:212-206859:258", "206859:260-206859:323", "206859:325-206859:356", "206859:359-206859:609",
"206859:612-206859:681", "206859:684-206859:732", "206859:734-206859:768", "206859:771-206859:808", "206859:811-206859:827",
"206859:830-206859:848", "206866:1-206866:30", "206866:33-206866:113", "206866:115-206866:274", "206868:1-206868:3",
"206868:10-206868:16", "206869:1-206869:251", "206869:253-206869:271", "206869:274-206869:502", "206869:507-206869:520",
"206869:522-206869:566", "206869:568-206869:752", "206897:1-206897:34", "206897:38-206897:61", "206897:63-206897:102",
"206897:109", "206897:111-206897:112", "206897:114-206897:131", "206897:133-206897:137", "206901:1-206901:98",
"206906:1-206906:31", "206906:38-206906:94", "206906:96-206906:136", "206906:138-206906:139", "206906:142-206906:149",
"206906:151-206906:175", "206906:177-206906:206", "206940:1-206940:151", "206940:153", "206940:155-206940:298",
"206940:301-206940:382", "206940:384-206940:712", "206940:715-206940:803", "206940:805-206940:960", "206940:963-206940:1027",
"207099:83-207099:134", "207099:137-207099:172", "207099:175-207099:213", "207099:216-207099:314", "207099:316-207099:320",
"207099:323-207099:330", "207099:333-207099:367", "207099:370-207099:481", "207099:484-207099:602", "207099:605-207099:755",
"207099:757-207099:1046", "207099:1048-207099:1171", "207100:1-207100:91", "207100:94", "207214:57-207214:112",
"207214:114-207214:177", "207214:179-207214:181", "207214:184-207214:196", "207214:199-207214:220", "207214:223-207214:262",
"207214:265-207214:405", "207214:408-207214:482", "207214:485-207214:640", "207214:643-207214:708", "207214:718-207214:757",
"207214:759-207214:808", "207214:811-207214:829", "207217:1-207217:32", "207219:1-207219:112", "207220:1-207220:160",
"207221:1-207221:102", "207222:1-207222:17", "207222:20-207222:289", "207231:70-207231:84", "207231:86-207231:121",
"207231:123-207231:184", "207231:187-207231:189", "207231:192-207231:303", "207231:306-207231:354", "207231:357-207231:481",
"207231:484-207231:504", "207231:508-207231:549", "207231:552-207231:626", "207231:628-207231:690", "207231:693-207231:875",
"207231:878-207231:1000", "207231:1003-207231:1170", "207231:1173-207231:1187", "207231:1189-207231:1227", "207231:1229-207231:1415",
"207231:1418-207231:1445", "207231:1447-207231:1505", "207233:1-207233:119", "207233:121-207233:148", "207269:80-207269:394",
"207269:397-207269:436", "207269:439-207269:463", "207269:466-207269:551", "207269:568-207269:577", "207273:3-207273:877",
"207279:68-207279:138", "207279:141-207279:149", "207279:151-207279:237", "207279:240-207279:266", "207279:269-207279:307",
"207279:309-207279:416", "207279:498-207279:551", "207279:554-207279:640", "207279:643-207279:961", "207279:963-207279:1095",
"207279:1098-207279:1160", "207320:1-207320:110", "207320:112-207320:350", "207371:72-207371:117", "207371:120-207371:124",
"207372:1-207372:27", "207372:30-207372:113", "207372:116-207372:154", "207372:156-207372:174", "207372:176-207372:478",
"207372:480-207372:496", "207397:32-207397:77", "207397:80-207397:140", "207397:143-207397:179", "207398:1-207398:14",
"207398:16-207398:33", "207454:79-207454:95", "207454:98-207454:123", "207454:126-207454:259", "207454:261-207454:363",
"207454:365-207454:458", "207454:461-207454:498", "207454:501-207454:609", "207454:612-207454:632", "207454:635-207454:781",
"207454:784-207454:866", "207454:869-207454:974", "207454:977-207454:1064", "207454:1067-207454:1079", "207454:1081-207454:1321",
"207454:1323-207454:1464", "207454:1467-207454:1569", "207454:1571-207454:1604", "207454:1607-207454:1712", "207454:1714-207454:1988",
"207469:1-207469:31", "207469:34-207469:45", "207477:76-207477:104", "207477:107-207477:111", "207477:114-207477:147",
"207477:150-207477:295", "207477:298-207477:483", "207477:486-207477:494", "207477:497-207477:527", "207477:530-207477:563",
"207477:565-207477:570", "207487:50-207487:98", "207487:101-207487:311", "207487:313-207487:359", "207487:363-207487:468",
"207487:471-207487:472", "207488:1-207488:63", "207488:66-207488:92", "207488:95-207488:113", "207488:116-207488:198",
"207488:200-207488:250", "207488:252-207488:288", "207488:291-207488:365", "207488:368-207488:377", "207488:379-207488:440",
"207490:1-207490:48", "207490:51-207490:111", "207491:1-207491:176", "207491:179-207491:458", "207492:1-207492:20",
"207492:23-207492:298", "207515:79-207515:109", "207515:112-207515:132", "207515:134-207515:208", "207515:211-207515:225",
"207515:228-207515:320", "207515:322-207515:381", "207515:383-207515:498", "207515:500-207515:730", "207515:733-207515:849",
"207515:851-207515:954", "207515:957-207515:994", "207515:997-207515:1052", "207515:1055-207515:1143", "207515:1145-207515:1211",
"207517:1-207517:12", "207517:15-207517:57", "207518:1-207518:59", "207518:61-207518:83", "207882:22-207882:45",
"207883:1", "207883:3-207883:4", "207883:7-207883:75", "207884:1-207884:106", "207884:108-207884:183",
"207885:1-207885:90", "207886:1-207886:30", "207886:32-207886:90", "207886:92-207886:156", "207886:158-207886:166",
"207886:168-207886:171", "207889:1-207889:43", "207889:47-207889:57", "207889:60-207889:303", "207889:306-207889:442",
"207889:445", "207889:447-207889:551", "207889:553-207889:731", "207889:733-207889:907", "207889:910-207889:945",
"207898:1-207898:33", "207898:36-207898:57", "207898:60-207898:235", "207898:239-207898:257", "207898:260-207898:277",
"207905:75-207905:196", "207905:198-207905:281", "207905:284-207905:329", "207905:331-207905:402", "207905:404-207905:565",
"207905:568-207905:672", "207905:675-207905:805", "207905:807-207905:850", "207905:852-207905:861", "207905:864-207905:884",
"207905:886-207905:1180", "207905:1183-207905:1283", "207905:1285-207905:1331", "207905:1333-207905:1515", "207905:1518-207905:1734",
"207905:1737-207905:1796", "207920:84-207920:146", "207920:149-207920:241", "207920:243-207920:261", "207920:264-207920:291",
"207920:294-207920:486", "207920:489-207920:518", "207920:520-207920:598", "207920:600-207920:708", "207920:710-207920:826",
"207921:1-207921:37", "207921:40-207921:58", "207922:1-207922:69", "207922:71-207922:100", "207922:103-207922:126",
"207922:129-207922:242", "207922:274-207922:291", "207924:1-207924:52", "207924:54-207924:171", "207924:173-207924:178",
"207924:181-207924:339", "208307:2-208307:42", "208307:45", "208307:47-208307:70", "208307:72-208307:147",
"208307:150-208307:252", "208307:256-208307:259", "208307:262-208307:275", "208307:278-208307:342", "208307:345-208307:450",
"208307:453-208307:527", "208307:530-208307:583", "208307:586-208307:605", "208307:608-208307:616", "208307:618-208307:667",
"208307:670-208307:761", "208307:763-208307:798", "208307:800-208307:889", "208307:891-208307:893", "208307:896-208307:1055",
"208307:1057-208307:1205", "208307:1208-208307:1294", "208307:1297-208307:1328", "208339:77-208339:89", "208339:91-208339:122",
"208339:125-208339:208", "208339:211-208339:346", "208339:349-208339:363", "208341:1-208341:84", "208341:87-208341:117",
"208341:120-208341:513", "208341:515-208341:685", "208341:688-208341:693", "208341:695-208341:775", "208341:777-208341:824",
"208351:83-208351:97", "208351:100-208351:356", "208351:359-208351:367", "208351:369", "208352:1-208352:15",
"208352:17", "208352:19", "208353:1-208353:76", "208353:78-208353:269", "208353:271-208353:348",
"208357:1-208357:70", "208357:73-208357:507", "208390:72-208390:128", "208390:130-208390:169", "208391:52-208391:82",
"208391:84-208391:162", "208391:164-208391:216", "208391:219-208391:493", "208391:495-208391:498", "208391:500-208391:523",
"208391:526-208391:533", "208391:535-208391:588", "208391:591-208391:660", "208391:663-208391:869", "208427:49-208427:89",
"208427:92-208427:161", "208427:164", "208427:166-208427:173", "208427:175-208427:268", "208427:271-208427:312",
"208427:315", "208427:317-208427:335", "208427:337-208427:361", "208427:364-208427:402", "208427:404-208427:422",
"208427:425-208427:577", "208427:580-208427:647", "208428:1-208428:58", "208428:61-208428:68", "208428:70-208428:156",
"208428:159-208428:227", "208429:1-208429:56", "208429:59-208429:139", "208429:141-208429:159", "208429:162-208429:237",
"208429:240-208429:440", "208429:442-208429:452", "208429:455-208429:589", "208429:592-208429:712", "208429:715-208429:922",
"208487:2-208487:26", "208487:29-208487:159", "208487:161-208487:307", "208487:309-208487:459", "208487:462-208487:476",
"208487:479-208487:621", "208509:71-208509:232", "208538:2-208538:43", "208540:1-208540:26", "208540:29-208540:98",
"208541:1-208541:57", "208541:59-208541:173", "208541:175-208541:376", "208541:378-208541:413", "208551:119-208551:193",
"208551:195-208551:212", "208551:215-208551:300", "208551:303-208551:354", "208551:356-208551:554", "208551:557-208551:580",
"208686:73-208686:79", "208686:82-208686:181", "208686:183-208686:224", "208686:227-208686:243", "208686:246-208686:311",
"208686:313-208686:459" ) ),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_101.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_102.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_103.root')
)
|
[
"riccardo.manzoni@cern.ch"
] |
riccardo.manzoni@cern.ch
|
19a7d178d64a161292e6a1ec89c749b9143c3dcb
|
f615b8ae24cc2e8cd8eb4605de95cd96aa1d84a5
|
/egohands_setup.py
|
b45d7c835e1b4f0e8aaa1e7693053ae2e085f4ce
|
[] |
no_license
|
tfygg/deeptraining_hands
|
7b98d279cfe90e2be76eb720fae9cc84847a9b08
|
7aaa8a16d7170c8da9bc5ec39b076352e414ed82
|
refs/heads/master
| 2021-09-12T15:46:55.771089
| 2018-04-18T07:07:39
| 2018-04-18T07:07:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,708
|
py
|
"""
THIS CODE IS TAKEN FROM VICTOR DIBIA WHO ALSO WORKED ON THE SAME TOPIC
UNFORTUNATELY 2 MONTHS BEFORE I HAD THE IDEA ;)
BUT THIS PEACE OF CODE HERE IS PERFECT SO HANDS DOWN
ALL I DID WAS ALTERING IT A BIT TO MY NEEDS
SEE HIS REPO:
https://github.com/victordibia/handtracking
"""
import scipy.io as sio
import numpy as np
import os
import gc
import cv2
import time
import xml.etree.cElementTree as ET
import random
import shutil as sh
from shutil import copyfile
import zipfile
import six.moves.urllib as urllib
import csv
def save_csv(csv_path, csv_content):
with open(csv_path, 'w') as csvfile:
wr = csv.writer(csvfile)
for i in range(len(csv_content)):
wr.writerow(csv_content[i])
def get_bbox_visualize(base_path, dir):
image_path_array = []
for root, dirs, filenames in sorted(os.walk(base_path + dir)):
for f in filenames:
if(f.split(".")[1] == "jpg"):
img_path = base_path + dir + "/" + f
image_path_array.append(img_path)
image_path_array.sort()
boxes = sio.loadmat(base_path + dir + "/polygons.mat")
# there are 100 of these per folder in the egohands dataset
polygons = boxes["polygons"][0]
# first = polygons[0]
# print(len(first))
pointindex = 0
for first in polygons:
font = cv2.FONT_HERSHEY_SIMPLEX
img_id = image_path_array[pointindex]
img = cv2.imread(img_id)
img_params = {}
img_params["width"] = np.size(img, 1)
img_params["height"] = np.size(img, 0)
head, tail = os.path.split(img_id)
img_params["filename"] = tail
img_params["path"] = os.path.abspath(img_id)
img_params["type"] = "train"
pointindex += 1
boxarray = []
csvholder = []
for pointlist in first:
pst = np.empty((0, 2), int)
max_x = max_y = min_x = min_y = 0
findex = 0
for point in pointlist:
if(len(point) == 2):
x = int(point[0])
y = int(point[1])
if(findex == 0):
min_x = x
min_y = y
findex += 1
max_x = x if (x > max_x) else max_x
min_x = x if (x < min_x) else min_x
max_y = y if (y > max_y) else max_y
min_y = y if (y < min_y) else min_y
# print(index, "====", len(point))
appeno = np.array([[x, y]])
pst = np.append(pst, appeno, axis=0)
cv2.putText(img, ".", (x, y), font, 0.7,
(255, 255, 255), 2, cv2.LINE_AA)
hold = {}
hold['minx'] = min_x
hold['miny'] = min_y
hold['maxx'] = max_x
hold['maxy'] = max_y
if (min_x > 0 and min_y > 0 and max_x > 0 and max_y > 0):
boxarray.append(hold)
labelrow = [tail,
np.size(img, 1), np.size(img, 0), "hand", min_x, min_y, max_x, max_y]
csvholder.append(labelrow)
cv2.polylines(img, [pst], True, (0, 255, 255), 1)
cv2.rectangle(img, (min_x, max_y),
(max_x, min_y), (0, 255, 0), 1)
csv_path = img_id.split(".")[0]
if not os.path.exists(csv_path + ".csv"):
cv2.putText(img, "DIR : " + dir + " - " + tail, (20, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2)
cv2.imshow('Verifying annotation ', img)
save_csv(csv_path + ".csv", csvholder)
print("===== saving csv file for ", tail)
cv2.waitKey(1) # Change this to 1000 to see every single frame
def create_directory(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# combine all individual csv files for each image into a single csv file per folder.
def generate_label_files(image_dir):
header = ['filename', 'width', 'height',
'class', 'xmin', 'ymin', 'xmax', 'ymax']
for root, dirs, filenames in os.walk(image_dir):
for dir in dirs:
csvholder = []
csvholder.append(header)
loop_index = 0
for f in os.listdir(image_dir + dir):
if(f.split(".")[1] == "csv"):
loop_index += 1
#print(loop_index, f)
csv_file = open(image_dir + dir + "/" + f, 'r')
reader = csv.reader(csv_file)
for row in reader:
csvholder.append(row)
csv_file.close()
os.remove(image_dir + dir + "/" + f)
save_csv(image_dir + dir + "_labels.csv", csvholder)
print("Saved label csv for ", dir, image_dir +
dir + "/" + dir + "_labels.csv")
# Split data, copy to train/test folders
def split_data_test_eval_train(image_dir):
create_directory("data")
create_directory("data/train")
create_directory("data/eval")
loop_index = 0
"""
data_size = 4000
data_sampsize = int(0.1 * data_size)
random.seed(1)
test_samp_array = random.sample(range(data_size), k=data_sampsize)
"""
for root, dirs, filenames in os.walk(image_dir):
for dir in dirs:
for f in os.listdir(image_dir + dir):
if(f.split(".")[1] == "jpg"):
loop_index += 1
#print('DEBUG: loop_index, f',loop_index, f)
#print('DEBUG: f.split(".")[0]',f.split(".")[0])
#if loop_index in test_samp_array:
if not np.mod(loop_index,10):
os.rename(image_dir + dir +
"/" + f, "data/eval/" + f)
os.rename(image_dir + dir +
"/" + f.split(".")[0] + ".csv", "data/eval/" + f.split(".")[0] + ".csv")
else:
os.rename(image_dir + dir +
"/" + f, "data/train/" + f)
os.rename(image_dir + dir +
"/" + f.split(".")[0] + ".csv", "data/train/" + f.split(".")[0] + ".csv")
print(loop_index, image_dir + f)
print("> done scanning director ", dir)
os.remove(image_dir + dir + "/polygons.mat")
os.rmdir(image_dir + dir)
print("Train/Eval content generation complete!")
generate_label_files("data/")
def generate_csv_files(image_dir):
for root, dirs, filenames in os.walk(image_dir):
for dir in dirs:
get_bbox_visualize(image_dir, dir)
print("CSV generation complete!\nGenerating train/eval folders")
split_data_test_eval_train("egohands/_LABELLED_SAMPLES/")
# rename image files so we can have them all in a train/test/eval folder.
def rename_files(image_dir):
print("Renaming files")
loop_index = 0
for root, dirs, filenames in sorted(os.walk(image_dir)):
for dir in dirs:
for f in os.listdir(image_dir + dir):
if (dir not in f):
if(f.split(".")[1] == "jpg"):
loop_index += 1
old = image_dir + dir + "/" + f
new = image_dir + dir + "/" + dir + "_" + f
os.rename(old, new)
else:
break
generate_csv_files("egohands/_LABELLED_SAMPLES/")
def extract_folder(dataset_path):
if not os.path.exists("egohands"):
zip_ref = zipfile.ZipFile(dataset_path, 'r')
print("> Extracting Dataset files")
zip_ref.extractall("egohands")
print("> Extraction complete")
zip_ref.close()
rename_files("egohands/_LABELLED_SAMPLES/")
def download_egohands_dataset(dataset_url, dataset_path):
print("\nTHIS CODE IS BASED ON VICTOR DIBIAs WORK\
\nSEE HIS REPO:\
\nhttps://github.com/victordibia/handtracking\n")
is_downloaded = os.path.exists(dataset_path)
if not is_downloaded:
print(
"> downloading Egohands dataset (1.3GB)")
opener = urllib.request.URLopener()
opener.retrieve(dataset_url, dataset_path)
print("> download complete")
extract_folder(dataset_path)
else:
print("Egohands dataset already downloaded.\nGenerating CSV files")
extract_folder(dataset_path)
def create_label_map():
label_map = "data/label_map.pbtxt"
if not os.path.isfile(label_map):
f = open(label_map,"w")
f.write("item {\n id: 1\n name: 'hand'\n}")
f.close()
print("> created ",label_map)
def final_finish():
cwd = os.getcwd()
for directory in ['train','eval']:
src_dir = cwd+'/data/{}/'.format(directory)
drc_dir = cwd+'/data/{}/images/'.format(directory)
create_directory(drc_dir)
for file in os.listdir(src_dir):
if file.endswith(".jpg"):
sh.move(src_dir+file,drc_dir+file)
sh.rmtree('egohands')
#os.remove(EGO_HANDS_FILE)
print('\n> creating the dataset complete\
\n> you can now start training\
\n> see howto_wiki for more information')
def main():
EGOHANDS_DATASET_URL = "http://vision.soic.indiana.edu/egohands_files/egohands_data.zip"
EGO_HANDS_FILE = "egohands_data.zip"
download_egohands_dataset(EGOHANDS_DATASET_URL, EGO_HANDS_FILE)
create_label_map()
final_finish()
if __name__ == '__main__':
main()
|
[
"gustav.zitzewitz@gmx.de"
] |
gustav.zitzewitz@gmx.de
|
9bf27b45951df9980c3560ab22c335077ee230ab
|
54dbcd8191ee7f6bef91f02038659f93f83c0bc2
|
/ilkapp/migrations/0026_hafta.py
|
820ada0083f41ff3f334e2b8ecece520b9c48a44
|
[] |
no_license
|
kizikli27/efesafa
|
4cb6cbf53980d3ecec54303dab7ba06a21869f5e
|
7d2e67b7b050540711eaa8540f524b3b22459339
|
refs/heads/main
| 2023-02-26T01:36:55.720380
| 2021-02-04T12:00:54
| 2021-02-04T12:00:54
| 333,233,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
# Generated by Django 3.1.4 on 2020-12-26 21:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ilkapp', '0025_auto_20201226_1547'),
]
operations = [
migrations.CreateModel(
name='hafta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gunler', models.CharField(blank=True, max_length=10, null=True)),
],
),
]
|
[
"kizikli27@gmail.com"
] |
kizikli27@gmail.com
|
db15d9dc414241557854c5a2343c9b053d053fbe
|
279296caf6c066f018a2a6f8e95be9b675946837
|
/Leetcode/MedianOfTwoSortedArrays.py
|
aba0c2b112a1523eee0386f806a2adb6a4f5420a
|
[] |
no_license
|
GeneZC/Zero2All
|
0444680e83052c2842695f77d7c9895defbb9e4e
|
46e813690cf975d5de395b825d4506805c0bc9ab
|
refs/heads/master
| 2021-07-25T05:14:14.353793
| 2020-04-16T02:55:27
| 2020-04-16T02:55:27
| 147,045,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
class Solution:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
len1 = len(nums1)
len2 = len(nums2)
pivot = (len1 + len2) // 2
odd = (len1 + len2) % 2
if len1 == 0:
if odd:
return nums2[pivot]
else:
return (nums2[pivot] + nums2[pivot-1]) / 2.0
if len2 == 0:
if odd:
return nums1[pivot]
else:
return (nums1[pivot] + nums1[pivot-1]) / 2.0
prev = []
p1, p2 = 0, 0
while True:
if len(prev) - 1 == pivot:
if odd:
return prev[-1]
else:
return (prev[-1] + prev[-2]) / 2.0
try:
num1 = nums1[p1]
except:
num1 = float('inf')
try:
num2 = nums2[p2]
except:
num2 = float('inf')
if num1 >= num2:
prev.append(num2)
p2 += 1
else:
prev.append(num1)
p1 += 1
|
[
"gene_zhangchen@163.com"
] |
gene_zhangchen@163.com
|
bd60963c288c16a724e71877fbabfd7921e405f8
|
334d0190164d92b53be2844a3afc2826d64b1a6d
|
/lib/python3.9/site-packages/theano/link/c/cutils.py
|
bbcd50fb6209c3ee3ecd25ecbcb4d4ad7c91b5d0
|
[] |
no_license
|
sou133688/BayesianStatics
|
f294d7c47cfa56374cf73b520529620dc6120f47
|
be9121429494cd8fd231594b029fc2f030d8335f
|
refs/heads/main
| 2023-08-21T15:57:32.980658
| 2021-10-01T00:01:13
| 2021-10-01T00:01:13
| 401,909,680
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,167
|
py
|
import errno
import os
import sys
from theano.compile.compilelock import lock_ctx
from theano.configdefaults import config
from theano.link.c import cmodule
# TODO These two lines may be removed in the future, when we are 100% sure
# no one has an old cutils_ext.so lying around anymore.
if os.path.exists(os.path.join(config.compiledir, "cutils_ext.so")):
os.remove(os.path.join(config.compiledir, "cutils_ext.so"))
def compile_cutils():
"""
Do just the compilation of cutils_ext.
"""
code = """
#include <Python.h>
#include "theano_mod_helper.h"
extern "C"{
static PyObject *
run_cthunk(PyObject *self, PyObject *args)
{
PyObject *py_cthunk = NULL;
if(!PyArg_ParseTuple(args,"O",&py_cthunk))
return NULL;
if (!PyCObject_Check(py_cthunk)) {
PyErr_SetString(PyExc_ValueError,
"Argument to run_cthunk must be a PyCObject.");
return NULL;
}
void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);
int (*fn)(void*) = (int (*)(void*))(ptr_addr);
void* it = PyCObject_GetDesc(py_cthunk);
int failure = fn(it);
return Py_BuildValue("i", failure);
}
static PyMethodDef CutilsExtMethods[] = {
{"run_cthunk", run_cthunk, METH_VARARGS|METH_KEYWORDS,
"Run a theano cthunk."},
{NULL, NULL, 0, NULL} /* Sentinel */
};"""
# This is not the most efficient code, but it is written this way to
# highlight the changes needed to make 2.x code compile under python 3.
code = code.replace("<Python.h>", '"numpy/npy_3kcompat.h"', 1)
code = code.replace("PyCObject", "NpyCapsule")
code += """
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"cutils_ext",
NULL,
-1,
CutilsExtMethods,
};
PyMODINIT_FUNC
PyInit_cutils_ext(void) {
return PyModule_Create(&moduledef);
}
}
"""
loc = os.path.join(config.compiledir, "cutils_ext")
if not os.path.exists(loc):
try:
os.mkdir(loc)
except OSError as e:
assert e.errno == errno.EEXIST
assert os.path.exists(loc), loc
args = cmodule.GCC_compiler.compile_args(march_flags=False)
cmodule.GCC_compiler.compile_str("cutils_ext", code, location=loc, preargs=args)
try:
# See gh issue #728 for why these lines are here. Summary: compiledir
# must be at the beginning of the path to avoid conflicts with any other
# cutils_ext modules that might exist. An __init__.py file must be created
# for the same reason. Note that these 5 lines may seem redundant (they are
# repeated in compile_str()) but if another cutils_ext does exist then it
# will be imported and compile_str won't get called at all.
sys.path.insert(0, config.compiledir)
location = os.path.join(config.compiledir, "cutils_ext")
if not os.path.exists(location):
try:
os.mkdir(location)
except OSError as e:
assert e.errno == errno.EEXIST
assert os.path.exists(location), location
if not os.path.exists(os.path.join(location, "__init__.py")):
open(os.path.join(location, "__init__.py"), "w").close()
try:
from cutils_ext.cutils_ext import * # noqa
except ImportError:
with lock_ctx():
# Ensure no-one else is currently modifying the content of the compilation
# directory. This is important to prevent multiple processes from trying to
# compile the cutils_ext module simultaneously.
try:
# We must retry to import it as some other process could
# have been compiling it between the first failed import
# and when we receive the lock
from cutils_ext.cutils_ext import * # noqa
except ImportError:
compile_cutils()
from cutils_ext.cutils_ext import * # noqa
finally:
if sys.path[0] == config.compiledir:
del sys.path[0]
|
[
"matsushu@ZaknoMacBook-Pro.local"
] |
matsushu@ZaknoMacBook-Pro.local
|
d232c87bbdf55880e841f21229d99d455e814b67
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/validPalindrome_20200803230832.py
|
1cd0f9b378eb36d51c8a024c40908243883ff3e9
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
import re
def palindrome(str):
if len(str) == 0:
return True
str = str.lower()
cleanStr = re.sub(r"[,:.;:@#?!&$]+",' ',str)
actualStr = cleanStr.split(" ")
actualStr.reverse()
newArr = []
for i in actualStr:
newArr.append(i[::-1])
cleanStr = cleanStr.replace(" ","")
if cleanStr ==
print(cleanStr)
print("".join(newArr))
palindrome("A man, a plan, a canal: Panama")
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
616e381848f821b24e85189e7891488a8db2ba8e
|
9b8c94c2ed11c0868c8afb391a326472ea77e66a
|
/novel/qidian_comment.py
|
ebd85b928022f146c93f2e6fdd233265f11802d4
|
[] |
no_license
|
LJ147/cupspider
|
9ad755dc4210ccb5e8d785090f7d73722eae49c3
|
48571d1ceadab68b3893094c27d65665369fdd5e
|
refs/heads/master
| 2021-01-19T07:46:14.212333
| 2017-05-07T00:00:11
| 2017-05-07T00:00:11
| 87,569,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,839
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by LJ on 2017/3/25
import sys
import db_tool
import json
import mongoDB
import random
import requests
import time
reload(sys)
sys.setdefaultencoding("utf-8")
def store_comment(token, page, book_id):
# book_id:1003354631 token:NZv1ty8GbjYLuCTm9PMpf7yONl12AgeFQ9BuDYBJ
r = requests.get(
"http://book.qidian.com/ajax/comment/info?_csrfToken={token}&pageIndex={index}&pageSize=15&orderBy= &bookId={book_id}".format(
token=token, book_id=book_id, index=page))
content = json.loads(r.content)
content['book_id'] = book_id
# 插入全部评论信息
try:
mongoDB.insert_comment(content)
except:
# 遇到错误时随机休眠一段时间(IP跳跃的简易替代)
rand = random.randint(3, 20)
print 'sleep ' + str(rand) + "seconds "
time.sleep(rand)
store_comment(token, page + 1, book_id)
try:
comment_info_list = content['data']['commentInfo']
for item in comment_info_list:
comment = item.get("comment")
if comment != u"":
print str(item.get("nickName")) + " said :" + comment + " about book " + book_id
# 仅插入有内容的评论信息
item['book_id'] = book_id
mongoDB.insert_comment_with_content(item)
except:
print "no comment"
def get_comment_amount(token, page, book_id):
# book_id:1003354631 token:NZv1ty8GbjYLuCTm9PMpf7yONl12AgeFQ9BuDYBJ
r = requests.get(
"http://book.qidian.com/ajax/comment/info?_csrfToken={token}&pageIndex={index}&pageSize=15&orderBy= &bookId={book_id}".format(
token=token, book_id=book_id, index=page))
content = json.loads(r.content)
try:
count = content['data']['totalCnt']
except:
count = 0
finally:
return count
if __name__ == '__main__':
page = 15
page_size = 15
select_amount = 100
sql = "SELECT COUNT(url) FROM bookForQidian"
url_count = int(db_tool.select_url(sql)[0].get('COUNT(url)'))
# 数据库记录需大于100
urls = db_tool.select_one_hundred()
for url in urls:
# get_token(url[0].decode('utf-8'))
# book_id:1003354631 token:NZv1ty8GbjYLuCTm9PMpf7yONl12AgeFQ9BuDYBJ book.qidian.com/info/1000117983
book_id = str(url[0][21:])
comment_max = int(
get_comment_amount(token="NZv1ty8GbjYLuCTm9PMpf7yONl12AgeFQ9BuDYBJ", page=page, book_id=book_id))
page_count = comment_max / page_size
page = 1
while (page < page_count):
store_comment(token="NZv1ty8GbjYLuCTm9PMpf7yONl12AgeFQ9BuDYBJ", page=page, book_id=book_id)
page = page + 1
print "url is less than 100 or all the conmmends haves been stored"
|
[
"Alison@LJ-3.local"
] |
Alison@LJ-3.local
|
aa134928c752ba4aafcec27b5bfa762a3e9aaf3d
|
a2402496966e8467ec8dd81a4573d4d23d285193
|
/setup.py
|
b297f6b4fb14fb3469aafa798bff1371bedb9696
|
[] |
no_license
|
yudhaputrama/ftrigger
|
d7af5582c08104b3c6204eb4948920c22cfc7d12
|
844fc16f053be3232c21c1e571717915d6c91b69
|
refs/heads/master
| 2021-08-18T21:07:11.196709
| 2017-10-11T22:51:32
| 2017-10-11T22:51:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
from setuptools import find_packages
from setuptools import setup
install_requires = [
'confluent-kafka',
'docker',
'requests',
]
dependency_links = [
]
setup(
name='ftrigger',
version='0.1',
description='Triggers for FaaS functions',
author='King Chung Huang',
author_email='kchuang@ucalgary.ca',
url='https://github.com/ucalgary/ftrigger',
packages=find_packages(),
package_data={
},
install_requires=install_requires,
dependency_links=dependency_links,
entry_points="""
[console_scripts]
kafka-trigger=ftrigger.kafka:main
""",
zip_safe=True
)
|
[
"kinghuang@mac.com"
] |
kinghuang@mac.com
|
c8e1674f11b454b63e056013510f267b8012d7ee
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4085/codes/1601_820.py
|
eddecc99b1de0911e10f15980db5d2f52e5fcf8a
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
valor = float(input("escreva o valor do saque: "))
notas50 = valor // 50
resto50 = valor % 50
notas10 = resto50 // 10
resto10 = resto50 % 10
notas2 = resto10 // 2
print(int(notas50))
print(int(notas10))
print(int(notas2))
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
e38633f152ab74822515801a7ecd0548cf4bd9e9
|
078f7d560323916082ef0749eca40f4d7df1b39a
|
/sklearnserver/__main__.py
|
814dcc1443584f3c870d2b5878360c737a4fb198
|
[] |
no_license
|
wprazuch/imdb-movie-reviews
|
5be0220fdc6b56907a90fcd9fd1375016cfa3976
|
5f18f3f0a61a59a45587bb42090b01468fa87ec2
|
refs/heads/master
| 2023-01-05T22:09:58.096013
| 2020-10-27T07:27:39
| 2020-10-27T07:27:39
| 297,391,133
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
import argparse
import logging
import sys
import kfserving
from sklearnserver import SKLearnModel, SKLearnModelRepository
DEFAULT_MODEL_NAME = 'model'
DEFAULT_LOCAL_MODEL_DIR = '/tmp/model'
parser = argparse.ArgumentParser(parents=[kfserving.kfserver.parser])
parser.add_argument('--model_dir', required=True, help='A URI pointer to the model binary')
parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,
help='The name that the model is server under')
args, _ = parser.parse_known_args()
if __name__ == '__main__':
model = SKLearnModel(args.model_name, args.model_dir)
print("Starting...")
try:
model.load()
except Exception as e:
ex_type, ex_value, _ = sys.exc_info()
logging.error(f"fail to load model {args.model_name} from dir {args.model_dir}. "
f"exception type {ex_type}, exception msg: {ex_value}")
model.ready = False
print("Exception")
print(model)
kf_server = kfserving.KFServer()
kf_server.register_model(model)
kf_server.start([model])
|
[
"wojciechprazuch3@gmail.com"
] |
wojciechprazuch3@gmail.com
|
bc14ec5acd9e1d8d7585ac958b4c40c7f30579f3
|
7e22c340a8fde1a763d6b8c7bb19bc3032855ab1
|
/apps/user/api/api_rest.py
|
164844563b1e797a01b4f72f1b44810987a82d14
|
[] |
no_license
|
ivanAbregu/SKOL
|
af3c328d483f7b5077a3bde480e4a3fc49d992ab
|
60a53c2daa903e322763eff7a3ed7365a862c89f
|
refs/heads/master
| 2021-07-18T09:56:37.772524
| 2020-05-26T18:47:41
| 2020-05-26T18:47:41
| 174,054,858
| 1
| 0
| null | 2020-06-05T20:03:56
| 2019-03-06T02:20:30
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
from django.contrib.auth.models import Permission, Group
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from ..models import User
from .serializers import UserWebModelSerializer
from .filters import UserFilter
from rest_framework.viewsets import ModelViewSet
from ..permissions import AdminAccessPermission
class UserViewSet(ModelViewSet):
"""
retrieve:
Return the given user.
list:
Return a list of all the existing users.
create:
Create a new user instance.
"""
queryset = User.objects.all()
serializer_class = UserWebModelSerializer
http_method_names = ['get','put','post','delete']
filter_class = UserFilter
def get_queryset(self):
qs = super(UserViewSet, self).get_queryset()
# if self.request.user:
# qs = qs.filter(club = self.request.user.club)
# elif not self.request.user.is_superuser:
# qs = []
return qs
|
[
"ivan.abregu@gmail.com"
] |
ivan.abregu@gmail.com
|
d45b04ebbf585fe633d6a67f062a7691a78c81b1
|
df8b2f9a7a7e0baf387ec402c917d9b5f7abb3d4
|
/apps/reports/migrations/0001_initial.py
|
632053fb8a22ed740d5fd9a969d37a99daa7f7ed
|
[] |
no_license
|
shaoyan163/TestTools
|
624a3de2ba56440f9659e748d223bf985d29a20a
|
768b081a328f99f2811217744106a8269fc47ef6
|
refs/heads/master
| 2021-05-24T09:19:21.773461
| 2020-04-12T15:04:21
| 2020-04-12T15:04:21
| 253,492,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,708
|
py
|
# Generated by Django 2.0.5 on 2020-04-12 13:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Reports',
fields=[
('create_time', models.DateTimeField(auto_now_add=True, help_text='创建时间', verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, help_text='更新时间', verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, help_text='逻辑删除', verbose_name='逻辑删除')),
('id', models.AutoField(help_text='id主键', primary_key=True, serialize=False, verbose_name='id主键')),
('name', models.CharField(help_text='报告名称', max_length=200, unique=True, verbose_name='报告名称')),
('result', models.BooleanField(default=1, help_text='执行结果', verbose_name='执行结果')),
('count', models.IntegerField(help_text='用例总数', verbose_name='用例总数')),
('success', models.IntegerField(help_text='成功总数', verbose_name='成功')),
('html', models.TextField(blank=True, default='', help_text='报告HTML源码', null=True, verbose_name='报告HTML源码')),
('summary', models.TextField(blank=True, default='', max_length=200, null=True, verbose_name='报告详情')),
],
options={
'verbose_name': '测试报告',
'verbose_name_plural': '测试报告',
'db_table': 'tb_reports',
},
),
]
|
[
"Yanyan.Shao@geely.com"
] |
Yanyan.Shao@geely.com
|
6b3cd0e1c37c73fd2aefdfe95c80ab2119abae57
|
bc5b0c07b74fdb5207355e9e07462a3921accd37
|
/models/qc.py
|
cefc827f7029d33ccecf6e9234465207e104d3be
|
[] |
no_license
|
yanuarpradanaa/pp_application
|
357cd4de789d08a65c3d25199facec53977cc6f5
|
8489d5fa704294f01a3b9477f014986f91dd3c94
|
refs/heads/main
| 2023-08-23T11:20:38.102188
| 2021-10-14T06:39:25
| 2021-10-14T06:39:25
| 417,014,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
from odoo import models, fields, api
class QcCustom(models.Model):
_inherit = 'qc.inspection'
jenis_kertas = fields.Char('Jenis Kertas')
grammatur = fields.Char('Grammatur')
panjang = fields.Integer('Panjang')
lebar = fields.Integer('Lebar')
tinggi = fields.Integer('Tinggi')
qty_bundle = fields.Integer('Qty Per Bundle')
description = fields.Text(string='Description')
qty_sample = fields.Float(string='Qty Sample', default=1.00)
so_ref = fields.Many2one('sale.order', 'SO Reference', store=True)
product_ref = fields.Many2one('product.product', 'Product', store=True)
|
[
"54838397+yanuarpradanaa@users.noreply.github.com"
] |
54838397+yanuarpradanaa@users.noreply.github.com
|
3f5afa1622f3c0cc7cf7c470041cc47ccd58c026
|
7e2419c7ad5a78d22dce018506e4cf2590a75193
|
/default/mailParser.py
|
8d22e382108781cf1891c0588c95d61cf9629ed0
|
[] |
no_license
|
bgirschig/MOTOR
|
7f3387a3650900bd887e73b93965209416d7b80c
|
3559c41160d3cde66d4657555888510921754d13
|
refs/heads/main
| 2023-08-18T21:44:33.548671
| 2019-12-03T14:53:35
| 2019-12-03T14:53:35
| 158,950,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,295
|
py
|
import re
import logging
from datetime import datetime
from datetime import date
def pre_validate(mail_message):
"""Validations that can be performed before parsing mail body (eg. email is
valid, message is not too old, etc...)
Arguments:
mail_message {MailMessage} -- The mail message, as received by the InboundMailHandler
"""
# TODO: Implement this. Throw errors when not valid
# message age
# date_reg = r"\w{2,4},\s(\d{2})\s(\w{2,4})\s(\d{4})\s(\d{2}):(\d{2}):(\d{2})"
# sender is an authorized email
# Note: one client should be able to register multiple emails as authorized
def parse(mail_message):
"""parses email body for render requests
Arguments:
mail_message {MailMessage} -- The mail message, as received by the InboundMailHandler
Returns:
Dictionnary -- The parsed data
"""
pre_validate(mail_message)
# extract full body
full_body = ''
for _encoding, body in mail_message.bodies('text/plain'):
full_body += body.decode()
# Extract requested urls
url_regex = r"https?:\/\/(?:www\.)?[\w.]+(?:\/(?:[\w\-\.]+))*\/?(?:[\?#].+)?"
found_urls = re.findall(url_regex, full_body)
# We return an object, instead of a simple list of urls because the parsed
# data may include other information in the future. For instance, this
# allows for adding global settings, or per-item settings without
# refactoring anything
output = {
'requests': []
}
for url in found_urls:
output['requests'].append({'url': url})
return output
def stringify(mail_message):
"""returns a human-readable string representing the given mail_message
Arguments:
mail_message {MailMessage} -- the message to be stringified
Returns:
string -- the stringified message
"""
output = '\n'.join([
'sender: ' + (mail_message.sender if hasattr(mail_message, 'sender') else '--not defined--'),
'subject: ' + (mail_message.subject if hasattr(mail_message, 'subject') else '--not defined--'),
'to: ' + (mail_message.to if hasattr(mail_message, 'to') else '--not defined--'),
'date: ' + (mail_message.date if hasattr(mail_message, 'date') else '--not defined--'),
])
output += '\n-------------------- body:\n'
for _encoding, body in mail_message.bodies('text/plain'):
output += body.decode()
output += '\n--------------------------'
return output
|
[
"bastien.girschig@gmail.com"
] |
bastien.girschig@gmail.com
|
56706326bf27f9ae132e9d86bf660a6e0caba942
|
32da8be38c8a205f620b758dc7a135bfbee93d91
|
/dataframes.py
|
37f89a49ef99fc86cdb27862944d650934cdce56
|
[] |
no_license
|
ThomKaar/CSC369-Lab4
|
2343b7508f3e7aad44327789fddffb2394051df8
|
328bac7c94790d989ddcb8db42a2e4f1e9e953e8
|
refs/heads/master
| 2022-07-15T08:05:56.765243
| 2020-05-12T06:46:45
| 2020-05-12T06:46:45
| 259,461,213
| 0
| 0
| null | 2020-05-12T06:25:23
| 2020-04-27T21:32:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,178
|
py
|
# Thomas Karr
# Wesley Benica
# Lab 4 - CSC369 - Spring 2020
from datetime import date
import pandas as pd
from output_html import Query
def get_df(q: Query) -> pd.DataFrame:
if 'track' in q.task:
df = get_track_df(q, q.task['track'])
elif 'ratio' in q.task:
df = get_track_df(q, 'ratio')
elif 'stats' in q.task:
df = get_stats_df(q)
else:
print("Something went wrong")
raise ValueError
return df
def get_track_df(q, track_var):
aggregation = q.data.get('aggregation')
target = q.data.get('target')
states = q.data.get('states')
counties = q.data.get('counties')
if not aggregation:
if counties:
df = get_unagg_track_df(q, track_var, counties=counties)
else:
df = get_unagg_track_df(q, track_var, states=states)
else:
if counties:
df = get_agg_track_df(q, target=target[0])
else:
df = get_agg_track_df(q, target=aggregation)
return df
def get_unagg_track_df(q: Query, track_var: str, counties=None, states=None):
data = q.data
idx = [int_to_date(datum['date']) for datum in data['data']]
col = counties if counties else states
df = pd.DataFrame(index=idx, columns=col)
for col in df.columns:
df[col].values[:] = 0
field = 'county' if counties else 'state'
for d in data['data']:
for dd in d['daily_data']:
df[dd[field]][int_to_date(d['date'])] = dd.get(track_var) or 0
return df
def get_agg_track_df(q: Query, target):
data = q.data['data']
is_vert = q.output['table']['row'] == 'time'
# TODO add target as a title?
df = pd.DataFrame(data)
df['date'] = df['date'].apply(lambda date_int: int_to_date(date_int))
df = df.set_index('date')
df.index.name = None
if is_vert:
df = df.transpose()
return df
def get_stats_df(q):
return pd.DataFrame()
def int_to_date(date_int: int) -> str:
year = date_int // 10000
month = (date_int - year * 10000) // 100
day = (date_int - year * 10000 - month * 100)
format = '%m/%d/%y'
return date(year, month, day).strftime(format)
|
[
"43890908+wbenica@users.noreply.github.com"
] |
43890908+wbenica@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.