blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e260c0f266108fca49853195f3c3f8c7721266f
|
5f2b7f5eef576dfa575bac913c39c30607f946a1
|
/Assignment1.py
|
ed4eb2bcf302203f7f796407791ca5a42df62013
|
[] |
no_license
|
Kolokodess/switch_python_Ada
|
f9c3a6728b25ca3d1cb30ea20ffb77de6bd73cd8
|
42a42751cac75fab4fb01505294be6a3ef25268e
|
refs/heads/master
| 2021-06-18T11:07:38.416980
| 2017-04-04T08:41:46
| 2017-04-04T08:41:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
balance = float(raw_input ("Enter Balance:"))
Annual_interest = float(raw_input("Enter Annual Interest rate:"))
min_payment_rate = float(raw_input("Enter Minimum payment rate:"))
total_payment = 0
months = 1
while months <= 12:
#m_m_p = minimum monthly payment
m_m_p = min_payment_rate * balance
monthly_interest = Annual_interest/12.0 * balance
principal = m_m_p - monthly_interest
balance = balance - principal
print "month:", months
print "m_m_p:", m_m_p
print "monthly_interest:", monthly_interest
print "principal:", principal
print "balance:", balance
total_payment +=m_m_p
months +=1
print "Result"
print "total_payment: $",round(total_payment,2)
print "Remaining balance: $", round(balance,2)
|
[
"ada.oyom@gmail.com"
] |
ada.oyom@gmail.com
|
1b70bccd5370036cb4520982e27696d6b98d1e47
|
10729b1d8e2761e49c56a6a308ee6b2f486d4076
|
/PropertyScraper/__main__.py
|
f60db63a37a55ba97aa880ef3f66de8d9c0edd6f
|
[] |
no_license
|
thebend/real
|
9042e4cd261bee306e3ffb81b26e55416851eb71
|
db2a32ac356fdd7d342aed138cac744e31e329a0
|
refs/heads/master
| 2021-06-11T00:10:48.589182
| 2021-03-04T07:08:34
| 2021-03-04T07:08:34
| 82,351,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
#pylint: disable=C0103
''' Execute scraper with default settings '''
import logging
from datetime import datetime
from PropertyScraper import Scraper
log_filename = '{:%Y-%m-%d}.log'.format(datetime.now())
db_filename = '{:%Y-%m-%d}.db'.format(datetime.now())
logger = logging.getLogger('PropertyScraper')
file_handler = logging.FileHandler(log_filename)
logger.addHandler(file_handler)
# scraper = Scraper.Scraper(db_filename, rebuild=True)
# scraper.scrape_terramap()
scraper = Scraper.Scraper(db_filename)
# scraper.scrape_evalue_neighbours()
# scraper.scrape_ev_by_tmaddress()
# scraper.scrape_ev_by_tmcenter()
scraper.ev_center2ev_geo()
|
[
"Benjamin Davidson"
] |
Benjamin Davidson
|
b374191a7cf732d53d219ab1e5838ac5a74b3ab2
|
8fcc27160f8700be46296568260fa0017a0b3004
|
/client/cherrypy/test/test_virtualhost.py
|
e9b88bd297cb6047933124c32d619fd6c0d22cc0
|
[] |
no_license
|
connoryang/dec-eve-serenity
|
5d867f4eedfa896a4ef60f92556356cafd632c96
|
b670aec7c8b4514fc47cd52e186d7ccf3aabb69e
|
refs/heads/master
| 2021-01-22T06:33:16.303760
| 2016-03-16T15:15:32
| 2016-03-16T15:15:32
| 56,389,750
| 1
| 0
| null | 2016-04-16T15:05:24
| 2016-04-16T15:05:24
| null |
UTF-8
|
Python
| false
| false
| 3,718
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\lib\cherrypy\test\test_virtualhost.py
import os
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
import cherrypy
from cherrypy.test import helper
class VirtualHostTest(helper.CPWebCase):
def setup_server():
class Root:
def index(self):
return 'Hello, world'
index.exposed = True
def dom4(self):
return 'Under construction'
dom4.exposed = True
def method(self, value):
return 'You sent %s' % repr(value)
method.exposed = True
class VHost:
def __init__(self, sitename):
self.sitename = sitename
def index(self):
return 'Welcome to %s' % self.sitename
index.exposed = True
def vmethod(self, value):
return 'You sent %s' % repr(value)
vmethod.exposed = True
def url(self):
return cherrypy.url('nextpage')
url.exposed = True
static = cherrypy.tools.staticdir.handler(section='/static', dir=curdir)
root = Root()
root.mydom2 = VHost('Domain 2')
root.mydom3 = VHost('Domain 3')
hostmap = {'www.mydom2.com': '/mydom2',
'www.mydom3.com': '/mydom3',
'www.mydom4.com': '/dom4'}
cherrypy.tree.mount(root, config={'/': {'request.dispatch': cherrypy.dispatch.VirtualHost(**hostmap)},
'/mydom2/static2': {'tools.staticdir.on': True,
'tools.staticdir.root': curdir,
'tools.staticdir.dir': 'static',
'tools.staticdir.index': 'index.html'}})
setup_server = staticmethod(setup_server)
def testVirtualHost(self):
self.getPage('/', [('Host', 'www.mydom1.com')])
self.assertBody('Hello, world')
self.getPage('/mydom2/', [('Host', 'www.mydom1.com')])
self.assertBody('Welcome to Domain 2')
self.getPage('/', [('Host', 'www.mydom2.com')])
self.assertBody('Welcome to Domain 2')
self.getPage('/', [('Host', 'www.mydom3.com')])
self.assertBody('Welcome to Domain 3')
self.getPage('/', [('Host', 'www.mydom4.com')])
self.assertBody('Under construction')
self.getPage('/method?value=root')
self.assertBody("You sent u'root'")
self.getPage('/vmethod?value=dom2+GET', [('Host', 'www.mydom2.com')])
self.assertBody("You sent u'dom2 GET'")
self.getPage('/vmethod', [('Host', 'www.mydom3.com')], method='POST', body='value=dom3+POST')
self.assertBody("You sent u'dom3 POST'")
self.getPage('/vmethod/pos', [('Host', 'www.mydom3.com')])
self.assertBody("You sent 'pos'")
self.getPage('/url', [('Host', 'www.mydom2.com')])
self.assertBody('%s://www.mydom2.com/nextpage' % self.scheme)
def test_VHost_plus_Static(self):
self.getPage('/static/style.css', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/css;charset=utf-8')
self.getPage('/static2/dirback.jpg', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'image/jpeg')
self.getPage('/static2/', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertBody('Hello, world\r\n')
self.getPage('/static2', [('Host', 'www.mydom2.com')])
self.assertStatus(301)
|
[
"masaho.shiro@gmail.com"
] |
masaho.shiro@gmail.com
|
3cd88a93ec624282caf04872b0d591e54a297d80
|
bd12b2b84643023ff65734bc187d0c05cc540c4c
|
/scripts/compare_comparators.py
|
fe07f218b50049761eee72939c20fa091c4412af
|
[
"Apache-2.0"
] |
permissive
|
alexander-bzikadze/graph_diff
|
27bdc3c25c4b3a567bda8c2c967a74ccb8f412f9
|
c7d5510590d8f6999697a3e197d4e806c320e968
|
refs/heads/master
| 2021-05-16T13:03:25.026373
| 2018-05-01T18:16:35
| 2018-05-01T18:16:35
| 105,353,922
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,676
|
py
|
import logging
from graph_diff.graph import rnr_graph
from graph_diff.graph.graph_generator import GraphGenerator
from graph_diff.graph_comparison import generate_n_comparator_tests
from graph_diff.graph_diff_algorithm import GraphMapComparatorByNodeNum, GraphMapComparatorByEdgeNum
from graph_diff.graph_diff_algorithm.graph_map import *
NUMBER_OF_TESTS = 100
DIRECTORY = "../comparator_png/"
comparators = [
# GraphMapComparatorByEdgeNumAndThenNodeNum(),
# GraphMapComparatorByEdgeNumAndNodeNumSum(),
# GraphMapComparatorByNodeNumAndThenEdgeNum(),
GraphMapComparatorByNodeNum(),
GraphMapComparatorByEdgeNum(),
# GraphMapComparatorByEdgeDiffAndThenNodeDiff()
]
logging.info("Start comparator test with {0} tests".format(NUMBER_OF_TESTS))
generate_n_comparator_tests(n=NUMBER_OF_TESTS, comparators=comparators, directory=DIRECTORY)
class GeneratorMock(GraphGenerator):
i = 0
def generate_graph(self):
if self.i == 0:
graph = rnr_graph()
graph.add_node(lr_node(1, 1))
graph.add_node(lr_node(1, 2))
graph.add_node(lr_node(1, 3))
graph.add_node(lr_node(2, 1))
graph.add_node(lr_node(2, 2))
elif self.i == 1:
graph = rnr_graph()
graph.add_node(lr_node(1, 1))
graph.add_node(lr_node(1, 2))
graph.add_node(lr_node(2, 3))
graph.add_node(lr_node(2, 1))
graph.add_node(lr_node(2, 2))
else:
raise Exception("")
self.i += 1
return graph
# generate_n_comparator_tests(n=1, comparators=comparators, directory=DIRECTORY, graph_generator=GeneratorMock())
|
[
"alexander.bzikadze@gmail.com"
] |
alexander.bzikadze@gmail.com
|
7f893bc5ede151e3ba8385f0ff5bff7a0cfe4beb
|
497ead1ee1e09a2530aa771ae059989e341684d7
|
/python/cuml/dask/preprocessing/LabelEncoder.py
|
4c731de842b1d1109949e96d1b4ad9f7128da6cf
|
[
"Apache-2.0"
] |
permissive
|
xieliaing/cuml
|
193f5753696bbfd4de8e3eaef919c18da2fd1d1a
|
78092ddde28d5a810e45d6186f049c1309121408
|
refs/heads/master
| 2022-11-10T16:45:38.818055
| 2022-11-03T23:12:07
| 2022-11-03T23:12:07
| 159,592,316
| 0
| 0
|
Apache-2.0
| 2018-11-29T01:59:07
| 2018-11-29T01:59:07
| null |
UTF-8
|
Python
| false
| false
| 7,769
|
py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.dask.common.base import BaseEstimator
from cuml.dask.common.base import DelayedTransformMixin
from cuml.dask.common.base import DelayedInverseTransformMixin
from toolz import first
from collections.abc import Sequence
from dask_cudf.core import DataFrame as dcDataFrame
from dask_cudf.core import Series as daskSeries
from cuml.common.exceptions import NotFittedError
from cuml.preprocessing import LabelEncoder as LE
class LabelEncoder(BaseEstimator,
DelayedTransformMixin,
DelayedInverseTransformMixin):
"""
An nvcategory based implementation of ordinal label encoding
Parameters
----------
handle_unknown : {'error', 'ignore'}, default='error'
Whether to raise an error or ignore if an unknown categorical feature
is present during transform (default is to raise). When this parameter
is set to 'ignore' and an unknown category is encountered during
transform or inverse transform, the resulting encoding will be null.
Examples
--------
Converting a categorical implementation to a numerical one
.. code-block:: python
>>> from dask_cuda import LocalCUDACluster
>>> from dask.distributed import Client
>>> import cudf
>>> import dask_cudf
>>> from cuml.dask.preprocessing import LabelEncoder
>>> import pandas as pd
>>> pd.set_option('display.max_colwidth', 2000)
>>> cluster = LocalCUDACluster(threads_per_worker=1)
>>> client = Client(cluster)
>>> df = cudf.DataFrame({'num_col':[10, 20, 30, 30, 30],
... 'cat_col':['a','b','c','a','a']})
>>> ddf = dask_cudf.from_cudf(df, npartitions=2)
>>> # There are two functionally equivalent ways to do this
>>> le = LabelEncoder()
>>> le.fit(ddf.cat_col) # le = le.fit(data.category) also works
<cuml.dask.preprocessing.LabelEncoder.LabelEncoder object at 0x...>
>>> encoded = le.transform(ddf.cat_col)
>>> print(encoded.compute())
0 0
1 1
2 2
3 0
4 0
dtype: uint8
>>> # This method is preferred
>>> le = LabelEncoder()
>>> encoded = le.fit_transform(ddf.cat_col)
>>> print(encoded.compute())
0 0
1 1
2 2
3 0
4 0
dtype: uint8
>>> # We can assign this to a new column
>>> ddf = ddf.assign(encoded=encoded.values)
>>> print(ddf.compute())
num_col cat_col encoded
0 10 a 0
1 20 b 1
2 30 c 2
3 30 a 0
4 30 a 0
>>> # We can also encode more data
>>> test_data = cudf.Series(['c', 'a'])
>>> encoded = le.transform(dask_cudf.from_cudf(test_data,
... npartitions=2))
>>> print(encoded.compute())
0 2
1 0
dtype: uint8
>>> # After train, ordinal label can be inverse_transform() back to
>>> # string labels
>>> ord_label = cudf.Series([0, 0, 1, 2, 1])
>>> ord_label = le.inverse_transform(
... dask_cudf.from_cudf(ord_label,npartitions=2))
>>> print(ord_label.compute())
0 a
1 a
2 b
0 c
1 b
dtype: object
>>> client.close()
>>> cluster.close()
"""
def __init__(self, *, client=None, verbose=False, **kwargs):
super().__init__(client=client,
verbose=verbose,
**kwargs)
def fit(self, y):
"""
Fit a LabelEncoder (nvcategory) instance to a set of categories
Parameters
----------
y : dask_cudf.Series
Series containing the categories to be encoded. Its elements
may or may not be unique
Returns
-------
self : LabelEncoder
A fitted instance of itself to allow method chaining
Notes
--------
Number of unique classes will be collected at the client. It'll
consume memory proportional to the number of unique classes.
"""
_classes = y.unique().compute()
el = first(y) if isinstance(y, Sequence) else y
self.datatype = ('cudf' if isinstance(el, (dcDataFrame, daskSeries))
else 'cupy')
self._set_internal_model(LE(**self.kwargs).fit(y, _classes=_classes))
return self
def fit_transform(self, y, delayed=True):
"""
Simultaneously fit and transform an input
This is functionally equivalent to (but faster than)
LabelEncoder().fit(y).transform(y)
"""
return self.fit(y).transform(y, delayed=delayed)
def transform(self, y, delayed=True):
"""
Transform an input into its categorical keys.
This is intended for use with small inputs relative to the size of the
dataset. For fitting and transforming an entire dataset, prefer
`fit_transform`.
Parameters
----------
y : dask_cudf.Series
Input keys to be transformed. Its values should match the
categories given to `fit`
Returns
-------
encoded : dask_cudf.Series
The ordinally encoded input series
Raises
------
KeyError
if a category appears that was not seen in `fit`
"""
if self._get_internal_model() is not None:
return self._transform(y,
delayed=delayed,
output_dtype='int32',
output_collection_type='cudf')
else:
msg = ("This LabelEncoder instance is not fitted yet. Call 'fit' "
"with appropriate arguments before using this estimator.")
raise NotFittedError(msg)
def inverse_transform(self, y, delayed=True):
"""
Convert the data back to the original representation.
In case unknown categories are encountered (all zeros in the
one-hot encoding), ``None`` is used to represent this category.
Parameters
----------
X : dask_cudf Series
The string representation of the categories.
delayed : bool (default = True)
Whether to execute as a delayed task or eager.
Returns
-------
X_tr : dask_cudf.Series
Distributed object containing the inverse transformed array.
"""
if self._get_internal_model() is not None:
return self._inverse_transform(y,
delayed=delayed,
output_collection_type='cudf')
else:
msg = ("This LabelEncoder instance is not fitted yet. Call 'fit' "
"with appropriate arguments before using this estimator.")
raise NotFittedError(msg)
|
[
"noreply@github.com"
] |
noreply@github.com
|
a0d3caee1fbf6c2afadd6139c75f0fb247dbe328
|
b24e45267a8d01b7d3584d062ac9441b01fd7b35
|
/Usuario/.history/views_20191102195546.py
|
879e6589a3c510e2404c8ff9b59bed87520c898f
|
[] |
no_license
|
slalbertojesus/merixo-rest
|
1707b198f31293ced38930a31ab524c0f9a6696c
|
5c12790fd5bc7ec457baad07260ca26a8641785d
|
refs/heads/master
| 2022-12-10T18:56:36.346159
| 2020-05-02T00:42:39
| 2020-05-02T00:42:39
| 212,175,889
| 0
| 0
| null | 2022-12-08T07:00:07
| 2019-10-01T18:56:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,630
|
py
|
from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework_simplejwt.tokens import RefreshToken
from .models import Usuario
from .serializers import UsuarioSerializer
SUCCESS = 'exito'
ERROR = 'error'
DELETE_SUCCESS = 'eliminado'
UPDATE_SUCCESS = 'actualizado'
CREATE_SUCCESS = 'creado'
@api_view(['GET', ])
def api_detail_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador = identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = UsuarioSerializer(usuario)
return Response(serializer.data)
@api_view(['PUT',])
def api_update_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador = identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'PUT':
serializer = UsuarioSerializer(usuario, data=request.data)
data = {}
if serializer.is_valid():
serializer.save()
data[SUCCESS] = UPDATE_SUCCESS
return Response(data=data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['DELETE',])
def api_delete_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador=identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'DELETE':
operation = usuario.delete()
data = {}
if operation:
data[SUCCESS] = DELETE_SUCCESS
return Response(data=data)
@api_view(['POST',])
@permission_classes([AllowAny,])
def api_create_usuario_view(request):
if request.method == 'POST':
serializer = UsuarioSerializer(data=request.data)
data = {}
if serializer.is_valid():
usuario = serializer.save()
data['response'] = "se registró de forma exitosa"
data['nombre'] = usuario.nombre
data['usuario'] = usuario.usuario
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["POST"])
@permission_classes([AllowAny,])
def api_login(request):
usuario = request.data.get("usuario")
contraseña = request.data.get("contraseña")
if usuario is None or contraseña is None:
return Response({'error': 'No existen contraseña ni usuario'},
status=HTTP_400_BAD_REQUEST)
usuario = authenticate(usuario=usuario, contraseña=contraseña)
get_tokens_for_user(usuario)
return {
'refresh': str(token),
'access': str(token.access_token),
}
def for_user(cls, user):
"""
Returns an authorization token for the given user that will be provided
after authenticating the user's credentials.
"""
user_id = getattr(user, api_settings.USER_ID_FIELD)
if not isinstance(user_id, int):
user_id = str(user_id)
token = cls()
token[api_settings.USER_ID_CLAIM] = user_id
return token
refresh = RefreshToken.for_user(user)
def authenticate(usuario, contraseña):
usuario = Usuario.objects.get(usuario= usuario, contraseña=contraseña)
if not usuario:
raise serializers.ValidationError({'error': 'Usuario no existe'},
status=HTTP_404_NOT_FOUND)
return usuario
|
[
"slalbertojesus@gmail.com"
] |
slalbertojesus@gmail.com
|
5e759521921a5fbee942af6ff03899975bbd0b35
|
84ab518741695c4cdaaaaad7aacd242a48542373
|
/practicePrograms2.py
|
2a9bca47df4a22c71dce1bbf0feda7d31fd35710
|
[] |
no_license
|
Aakashgarg743/Learn-Python
|
755818988dc391dc9cdea7091a6488fdb39b0b3d
|
b5e832146845ed140e63f7f2151af70d21e44003
|
refs/heads/master
| 2023-08-11T08:14:00.474551
| 2021-09-29T13:52:33
| 2021-09-29T13:52:33
| 409,439,716
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,188
|
py
|
# DICTIONARY
user = input("Welcome To My Dictionary\n['python', 'pip', 'functions']\nEnter any word that are listed above to get the meaning....\n").lower()
dic = {"python": "it is a programming language..", "pip":"it is used to install packages", "funcitons": "it is a block of code that only runs when it is called..."}
if user in dic.keys():
print(dic[user])
else:
print("You entered wrong input...")
# FAULTY - CALCULATOR
def add(num1, num2):
if num1=="56" and num2=="9":
print("77")
else:
user = int(input("In which format you want to get your result???\nType- \n1. Decimal\n2. Integer\n"))
if user == 1:
val = float(num1) + float(num2)
else:
val = int(num1) + int(num2)
return val
def sub(num1, num2):
user = int(input("In which format you want to get your result???\nType- \n1. Decimal\n2. Integer\n"))
if user == 1:
val = float(num1) - float(num2)
else:
val = int(num1) - int(num2)
return val
def mul(num1, num2):
if num1=="45" and num2=="3":
print("555")
else:
user = int(input("In which format you want to get your result???\nType- \n1. Decimal\n2. Integer\n"))
if user == 1:
val = float(num1) * float(num2)
else:
val = int(num1) * int(num2)
return val
def div(num1, num2):
user = int(input("In which format you want to get your result???\nType- \n1. Decimal\n2. Integer\n"))
if user == 1:
val = float(num1) / float(num2)
else:
val = int(num1) // int(num2)
return val
if __name__=='__main__':
n1 = input("Enter 1st number...\n")
n2 = input("Enter 2nd number...\n")
if n1.isdigit() and n2.isdigit():
inpu = int(input("What operation you want to perform\nType- \n1. Addition\n2. Subtraction\n3. Multiplication\n4. Division\n"))
if inpu == 1:
print(add(n1, n2))
elif inpu == 2:
print(sub(n1, n2))
elif inpu == 3:
print(mul(n1, n2))
elif inpu ==4:
print(div(n1, n2))
else:
print("Wrong Input...")
else:
print("You enter wrong input")
|
[
"91084902+Aakashgarg743@users.noreply.github.com"
] |
91084902+Aakashgarg743@users.noreply.github.com
|
9418bf8162cced953666e74e72750c54214a25e4
|
ca87c047f49a4aa893224466c4ea54e1801e0de2
|
/code/pywin32/excel/extract_excel_data.py
|
0659280bc294925a0cfacd0f9fb16e9b812f4540
|
[] |
no_license
|
jpereiran/jpereiran-blog
|
a46504871dfbd1a007090d4a39fe51ddced0032c
|
08385e2e8b0a0440d1fda81293f8692c923174a1
|
refs/heads/master
| 2021-10-11T06:51:28.190808
| 2021-10-04T00:19:49
| 2021-10-04T00:19:49
| 192,151,831
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,971
|
py
|
import win32com.client
import glob
import sys, io
# Open up Excel and make it visible (actually you don't need to make it visible)
excel = win32com.client.Dispatch('Excel.Application')
excel.Visible = True
# Select the path of the folder with all the files
files = glob.glob("folder_path/*.xlsx")
# Redirect the stdout to a file
orig_stdout = sys.stdout
bk = io.open("Answers_Report.txt", mode="w", encoding="utf-8")
sys.stdout = bk
# Go through all the files in the folder
for file in files:
print(file.split('\\')[1])
wb_data = excel.Workbooks.Open(file)
# Get the answers to the Q1A
mission=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("C6")
vision =wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("C7")
print("Question 1A")
print("Mission:",mission)
print("Vision:" ,vision)
print()
# Get the answers to the Q1B
oe1=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("C14")
ju1=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("D14")
oe2=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("C15")
ju2=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("D15")
print("Question 1B")
print("OEN1:",oe1, "- JUSTIF:",ju1)
print("OEN2:",oe2, "- JUSTIF:",ju2)
print()
# Get the answers to the Q2A
mision=wb_data.Worksheets("2a_MisionyVisionSI").Range("C6")
vision=wb_data.Worksheets("2a_MisionyVisionSI").Range("C7")
print("Question 2A")
print("Mission SI:",mision)
print("Vision SI:",vision)
print()
# Get the answers to the Q3A
print("Question 3A")
for i in range(5,13):
proy=wb_data.Worksheets("3a_ProySI").Range("B"+str(i))
desc=wb_data.Worksheets("3a_ProySI").Range("D"+str(i))
mcfr=wb_data.Worksheets("3a_ProySI").Range("E"+str(i))
tipo=wb_data.Worksheets("3a_ProySI").Range("F"+str(i))
print("\tProyect:",proy)
print("\tDesc:",desc)
print("\tMacFarlan:",mcfr,"- Tipo",tipo)
print()
# Close the file without saving
wb_data.Close(True)
# Restoring the stdout
sys.stdout = orig_stdout
bk.close()
# Create a new Excel file for the grading template
wb_template = excel.Workbooks.Add()
# Headers of the template
wb_template.Worksheets(1).Range("A1").Value = 'File'
wb_template.Worksheets(1).Range("B1").Value = 'Q1A'
wb_template.Worksheets(1).Range("C1").Value = 'C1A'
wb_template.Worksheets(1).Range("D1").Value = 'Q1B'
wb_template.Worksheets(1).Range("E1").Value = 'C1A'
wb_template.Worksheets(1).Range("F1").Value = 'Q2A'
wb_template.Worksheets(1).Range("G1").Value = 'C2A'
wb_template.Worksheets(1).Range("H1").Value = 'Q3A'
wb_template.Worksheets(1).Range("I1").Value = 'C3A'
# Add the path of each file into the template
for idx, arch in enumerate(files):
wb_template.Worksheets(1).Range("A"+str(idx+2)).Value = arch.replace('\\','/')
# Save the grading template without alerts
excel.DisplayAlerts = False
wb_template.SaveAs(r'folder_path\Grades_Template.xlsx')
# Close the file and the program
wb_template.Close()
excel.DisplayAlerts = True
excel.Quit()
|
[
"noreply@github.com"
] |
noreply@github.com
|
fc94bfc8c1c5fa1df951fab64844fca2a07b5310
|
8cf593d60d02be4692bce873656c85466fc1d8e1
|
/cw7/Zad1.py
|
44641eb1ec798de123460d5e7fff7ebaa097f101
|
[] |
no_license
|
michals-lab/Python
|
502ad5202c962808d499e16545aea5ca73c3fe21
|
09e2c76368215e3345ee5130bf56ca626ffbbe38
|
refs/heads/master
| 2021-01-25T23:55:35.358748
| 2020-05-20T07:26:42
| 2020-05-20T07:26:42
| 243,231,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
import numpy as nu
x = nu.arange(3).reshape(1,3)
y = nu.arange(6,9).reshape(1,3)
print(x*y)
|
[
"59738119+michals-lab@users.noreply.github.com"
] |
59738119+michals-lab@users.noreply.github.com
|
bb86890b77d314e21ced1d0d6e8ca9908fe3952a
|
baccbb4478c01c4c102cc1cfae56103f179a50d8
|
/scripts/handle_mysql.py
|
83cd4c7631baa42559ce110f8b02604fc0a58aa8
|
[] |
no_license
|
cheer1106/test_cheer
|
a09b5f11e85f06e0aa1c0fef69b6b30e069a38b1
|
6e29266daabdcf45a7a76da2a5653d6ef6b1c108
|
refs/heads/master
| 2020-09-29T08:27:03.935876
| 2019-12-10T10:44:30
| 2019-12-10T10:44:30
| 226,999,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,061
|
py
|
"""
============================
Author:cheer
# @time:2019-11-19 10:01
# @FileName: handle_mysql.py
# @Software: PyCharm
# @Cnblogs :https://www.cnblogs.com/*****
============================
"""
import pymysql
import random
from scripts.handle_yaml import do_yaml
'''
类封装的原则:不同功能写不同的方法
'''
class HandleMysql(object):
def __init__(self):
# 1 建立连接
# self.conn = pymysql.connect(host='api.lemonban.com', # mysql服务器IP或者域名
# user='future', # 用户名
# password='123456', # 密码
# db='futureloan', # 要连接的数据库名
# port=3306, # 数据库端口号,默认为3306,也可以不写
# charset='utf8', # 数据库编码为utf8,不能写为utf-8
# cursorclass=pymysql.cursors.DictCursor # 添加游标类,取结果的时候返回的字典类型;不添加返回元组
# )
self.conn = pymysql.connect(host=do_yaml.read_yaml('mysql', 'host'), # mysql服务器IP或者域名
user=do_yaml.read_yaml('mysql', 'user'), # 用户名
password=do_yaml.read_yaml('mysql', 'password'), # 密码
db=do_yaml.read_yaml('mysql', 'db'), # 要连接的数据库名
port=do_yaml.read_yaml('mysql', 'port'), # 数据库端口号,默认为3306,也可以不写
charset='utf8', # 数据库编码为utf8,不能写为utf-8
cursorclass=pymysql.cursors.DictCursor # 添加游标类,取结果的时候返回的字典类型(结果不唯一的话返回嵌套字典的列表);不添加返回元组
)
# 2.创建游标对象
self.cursor = self.conn.cursor()
def run(self, sql, args=None, is_more=True):
# 3.使用游标对象,运行sql
self.cursor.execute(sql, args)
# 4.使用连接对象提交
self.conn.commit()
# 5.返回结果
if is_more:
return self.cursor.fetchall()
else:
return self.cursor.fetchone()
# 官方推荐,一定要关闭
def close(self):
self.cursor.close()
self.conn.close()
@staticmethod
def create_mobile():
"""
随机生成11位手机号
:return:
"""
return '188' + ''.join(random.sample('0123456789', 8))
def is_existed_mobile(self, mobile):
"""
判断手机号是否被注册
:param mobile: 待判断是否注册的手机号
:return:
"""
# sql = "select * from member where mobile_phone = %s;"
sql = do_yaml.read_yaml('mysql', 'select_user_sql')
# 已注册(run函数返回数据,即if表达式为真),返回True;查询不到结果(None),返回False
if self.run(sql, args=[mobile], is_more=False):
return True
else:
return False
def create_not_exsited_mobile(self):
"""
随机生成一个在数据库中不存在的手机号
:return:
"""
while True:
# 随机生成一个手机号码
one_mobile = self.create_mobile()
# 如果找到了未注册的手机号,跳出循环
if not self.is_existed_mobile(one_mobile):
break
return one_mobile
if __name__ == '__main__':
do_mysql = HandleMysql() # 不建议放在main上面创建对象,因为有关闭
sql_1 = 'select * from member LIMIT 0,10;'
# sql_2 = "select * from member where mobile_phone = '13888888889';"
#
# print(do_mysql.run(sql_1))
print(do_mysql.run(sql_1))
# print(do_mysql.create_not_exsited_mobile())
do_mysql.close()
|
[
"1498053436@qq.com"
] |
1498053436@qq.com
|
b08be16b6f55bbb29dd93651676a710322f99cdd
|
2fcb5da42f0aff62c88189bd36fc5f61a40eb604
|
/vardautomation/timeconv.py
|
3b84b24deda8187b48a85d3ae7948559d45a7404
|
[
"MIT"
] |
permissive
|
tomato39/vardautomation
|
d45ec446a1cd06c2e7b7ec5378772953fa7b4caa
|
efa24d9420d6a6f732e8b0a846874a289a7cb095
|
refs/heads/master
| 2023-08-23T01:44:00.014196
| 2021-10-21T23:05:52
| 2021-10-21T23:09:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,647
|
py
|
"""Conversion time module"""
from fractions import Fraction
from .status import Status
class Convert:
"""Collection of methods to perform time conversion"""
@classmethod
def ts2f(cls, ts: str, fps: Fraction, /) -> int:
"""
Convert a timestamp hh:mm:ss.xxxx in number of frames
:param ts: Timestamp
:param fps: Framerate Per Second
:return: Frames
"""
s = cls.ts2seconds(ts)
f = cls.seconds2f(s, fps)
return f
@classmethod
def f2ts(cls, f: int, fps: Fraction, /, *, precision: int = 3) -> str:
"""
Convert frames in timestamp hh:mm:ss.xxxx
:param f: Frames
:param fps: Framerate Per Second
:param precision: Precision number, defaults to 3
:return: Timestamp
"""
s = cls.f2seconds(f, fps)
ts = cls.seconds2ts(s, precision=precision)
return ts
@classmethod
def seconds2ts(cls, s: float, /, *, precision: int = 3) -> str:
"""
Convert seconds in timestamp hh:mm:ss.xxx
:param s: Seconds
:param precision: Precision number, defaults to 3
:return: Timestamp
"""
m = s // 60
s %= 60
h = m // 60
m %= 60
return cls.composets(h, m, s, precision=precision)
@classmethod
def f2assts(cls, f: int, fps: Fraction, /) -> str:
"""
Convert frames to .ass timestamp hh:mm:ss.xx properly
by removing half of one frame per second of the specified framerate
:param f: Frames
:param fps: Framerate Per Second
:return: ASS timestamp
"""
s = cls.f2seconds(f, fps)
s -= fps ** -1 * 0.5
ts = cls.seconds2ts(max(0, s), precision=3)
return ts[:-1]
@classmethod
def assts2f(cls, assts: str, fps: Fraction, /) -> int:
"""
Convert .ass timestamp hh:mm:ss.xx to frames properly
by adding half of one frame per second of the specified framerate
:param assts: ASS timestamp
:param fps: Framerate Per Second
:return: Frames
"""
s = cls.ts2seconds(assts)
if s > 0:
s += fps ** -1 * 0.5
return cls.seconds2f(s, fps)
@staticmethod
def f2seconds(f: int, fps: Fraction, /) -> float:
"""
Convert frames to seconds
:param f: Frames
:param fps: Framerate Per Second
:return: Seconds
"""
if f == 0:
return 0.0
t = round(float(10 ** 9 * f * fps ** -1))
s = t / 10 ** 9
return s
@staticmethod
def ts2seconds(ts: str, /) -> float:
"""
Convert timestamp hh:mm:ss.xxxx to seconds
:param ts: Timestamp
:return: Seconds
"""
h, m, s = map(float, ts.split(':'))
return h * 3600 + m * 60 + s
@staticmethod
def seconds2f(s: float, fps: Fraction, /) -> int:
"""
Convert seconds to frames
:param s: Seconds
:param fps: Framerate Per Second
:return: Frames
"""
return round(s * fps)
@staticmethod
def samples2seconds(num_samples: int, sample_rate: int, /) -> float:
"""
Convert samples to seconds
:param num_samples: Samples
:param sample_rate: Playback sample rate
:return: Seconds
"""
return num_samples / sample_rate
@staticmethod
def seconds2samples(s: float, sample_rate: int, /) -> int:
"""
Convert seconds to samples
:param s: Seconds
:param sample_rate: Playback sample rate
:return: Samples
"""
return round(s * sample_rate)
@classmethod
def f2samples(cls, f: int, fps: Fraction, sample_rate: int) -> int:
"""
Convert frames to samples
:param f: Frames
:param fps: Framerate Per Second
:param sample_rate: Playback sample rate
:return: Samples
"""
s = cls.f2seconds(f, fps)
return cls.seconds2samples(s, sample_rate)
@classmethod
def samples2f(cls, num_samples: int, sample_rate: int, fps: Fraction) -> int:
"""
Convert sample to frames
:param num_samples: Samples
:param sample_rate: Playback sample rate
:param fps: Framerate Per Second
:return: Frame
"""
s = cls.samples2seconds(num_samples, sample_rate)
return cls.seconds2f(s, fps)
@staticmethod
def composets(h: float, m: float, s: float, /, *, precision: int = 3) -> str:
"""
Make a timestamp based on given hours, minutes and seconds
:param h: Hours
:param m: Minutes
:param s: Seconds
:param precision: Precision number, defaults to 3
:return: Timestamp
"""
if precision == 0:
out = f"{h:02.0f}:{m:02.0f}:{round(s):02}"
elif precision == 3:
out = f"{h:02.0f}:{m:02.0f}:{s:06.3f}"
elif precision == 6:
out = f"{h:02.0f}:{m:02.0f}:{s:09.6f}"
elif precision == 9:
out = f"{h:02.0f}:{m:02.0f}:{s:012.9f}"
else:
Status.fail(f'composets: the precision {precision} must be a multiple of 3 (including 0)')
return out
|
[
"ichunjo.le.terrible@gmail.com"
] |
ichunjo.le.terrible@gmail.com
|
3f1b20e6325128b26f23eed22db51edb5211804d
|
e1d942fc4d0099c4a5fe7cf10fdf7e710da2a147
|
/11/JackCompiler.py
|
354594cea58f8aa55a8c2273f48dda940a0413fb
|
[] |
no_license
|
AradAlon/Nand2Tetris
|
1c5856a6cf5734661b8e848a4e5fbea5381f4603
|
1ca9948b1495b0f16bfa5c89c4be50944fa2380e
|
refs/heads/master
| 2022-12-15T14:32:10.149762
| 2020-09-19T13:27:15
| 2020-09-19T13:27:15
| 296,870,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,213
|
py
|
import sys
import re
import glob
OUT_PATH = ''
T_KEYWORD = 'keyword'
T_SYM = 'symbol'
T_NUM = 'integerConstant'
T_STR = 'stringConstant'
T_ID = 'identifier'
class JackCompiler:
def __init__(self, jacks):
self.analyze(jacks)
def analyze(self, jacks):
for jack in jacks:
CompilationEngine(jack)
class JackTokenizer:
def __init__(self, jack):
reader = open(jack, 'r')
one_liner = self.one_liner(reader)
self.tokens = self.tokenize(one_liner)
self.index = -1
reader.close()
def one_liner(self, reader):
content = []
line = reader.readline()
while line:
comment_index = line.find('//') if line.find('//') > -1 else len(line)
line = line[:comment_index].strip()
if not line:
line = reader.readline()
continue
content.append(line)
line = reader.readline()
one_liner = ' '.join(content)
one_liner = re.sub(r'/\*(.*?)\*/', '', one_liner).strip()
return one_liner
def tokenize(self, one_liner):
keywords = ['class', 'method', 'function', 'constructor', 'int', 'boolean',
'char', 'void', 'var', 'static', 'field', 'let', 'do', 'if',
'else', 'while', 'return', 'true', 'false', 'null', 'this']
symbols = ['{','}','(',')','[',']','.',',',';','+','-','*','/','&','|','<','>','=','~']
convert_symbols = {
"<": '<',
">": '>',
'"': '"',
"&": '&',
}
tokens = []
keyword_re = r'\b' + r'\b|\b'.join(keywords) + r'\b'
sym_re = '['+re.escape(''.join(symbols))+']'
num_re = r'\d+'
str_re = r'"[^"\n]*"'
id_re = r'[\w\-]+'
word = re.compile(keyword_re+'|'+sym_re+'|'+num_re+'|'+str_re+'|'+id_re)
types = {
T_KEYWORD: keyword_re,
T_SYM: sym_re,
T_NUM: num_re,
T_STR: str_re,
T_ID: id_re,
}
split = word.findall(one_liner)
for word in split:
for typ, reg in types.items():
if re.match(reg, word) != None:
if typ == T_STR:
word = word.strip('"')
# if typ == T_SYM:
# word = convert_symbols.get(word, word)
tokens.append((word,typ))
break
return tokens
@property
def hasMoreTokens(self):
return self.index < len(self.tokens) - 1
def advance(self):
self.index += 1 if self.hasMoreTokens else self.index
@property
def currentToken(self):
return self.tokens[self.index] if self.index > -1 else None
def nextToken(self, LL):
return self.tokens[self.index + LL] if self.hasMoreTokens else None
class CompilationEngine:
label_count = 0
convert_symbols = {'+':'add', '-':'sub', '*':'call Math.multiply 2', '/':'call Math.divide 2',
'<':'lt', '>':'gt', '=':'eq', '&':'and', '|':'or'}
unary_convert_symbols = {'-':'neg', '~':'not'}
def __init__(self, jack):
self.jackTokens = JackTokenizer(jack)
self.vm = VMWriter(jack)
self.symbols = SymbolTable()
self.compileClass()
self.vm.close()
def process(self, expected_typ, *args):
self.jackTokens.advance()
val ,typ = self.jackTokens.currentToken
if expected_typ != typ or ((expected_typ == T_KEYWORD or expected_typ == T_SYM) and val not in args):
text = '{}, ({} {})'.format(expected_typ, typ, val)
raise ValueError()
return typ, val
def peek(self, expected_typ, *args, LL=1):
val, typ = self.jackTokens.nextToken(LL)
if expected_typ != typ or ((expected_typ == T_KEYWORD or expected_typ == T_SYM) and val not in args):
return False
return True
@property
def label(self):
self.label_count += 1
return 'label{}'.format(str(self.label_count))
def vm_variable(self, action, name):
kind, type, index = self.symbols.kind_type_index_of(name)
if action == 'push':
self.vm.write_push(kind, index)
if action == 'pop':
self.vm.write_pop(kind, index)
def compileClass(self):
self.process(T_KEYWORD, 'class')
_, self.current_class_name = self.process(T_ID)
self.process(T_SYM, '{')
self.compileClassVarDec()
self.compileSubroutineDec()
self.process(T_SYM, '}')
def compileClassVarDec(self):
while self.peek(T_KEYWORD, 'static', 'field'):
_, kind = self.process(T_KEYWORD, 'static', 'field')
_, type = self.process(T_KEYWORD, 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'int', 'char', 'boolean') else self.process(T_ID)
_, name = self.process(T_ID)
self.symbols.append_class_table(name, type, kind)
while self.peek(T_SYM, ','):
self.process(T_SYM, ',')
_, name = self.process(T_ID)
self.symbols.append_class_table(name, type, kind)
self.process(T_SYM, ';')
def compileSubroutineDec(self):
while self.peek(T_KEYWORD, 'constructor', 'function', 'method'):
_, self.current_subroutine_type = self.process(T_KEYWORD, 'constructor', 'function', 'method')
_, type = self.process(T_KEYWORD, 'void', 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'void', 'int', 'char', 'boolean') else self.process(T_ID)
_, self.current_subroutine_name = self.process(T_ID)
self.symbols.start_subroutine()
if self.current_subroutine_type == 'method':
self.symbols.append_subroutine_table('this', self.current_class_name, 'argument')
self.compileParameterList()
self.compileSubroutineBody()
def compileParameterList(self):
self.process(T_SYM, '(')
if self.peek(T_KEYWORD, 'int', 'char', 'boolean') or self.peek(T_ID):
_, type = self.process(T_KEYWORD, 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'int', 'char', 'boolean') else self.process(T_ID)
_, name = self.process(T_ID)
self.symbols.append_subroutine_table(name, type, 'argument')
while self.peek(T_SYM, ','):
self.process(T_SYM, ',')
_, type = self.process(T_KEYWORD, 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'int', 'char', 'boolean') else self.process(T_ID)
_, name = self.process(T_ID)
self.symbols.append_subroutine_table(name, type, 'argument')
self.process(T_SYM, ')')
def compileSubroutineBody(self):
self.process(T_SYM, '{')
self.compileVarDec()
func_name = self.current_class_name+'.'+self.current_subroutine_name
num_of_var = self.symbols.var_count('var')
self.vm.write_function(func_name, num_of_var)
self.this_pointer()
self.compileStatements()
self.process(T_SYM, '}')
def this_pointer(self):
if self.current_subroutine_type == 'method':
self.vm.write_push('argument', 0)
self.vm.write_pop('pointer', 0)
elif self.current_subroutine_type == 'constructor':
self.vm.write_push('constant', self.symbols.var_count('field'))
self.vm.write_call('Memory.alloc', 1)
self.vm.write_pop('pointer', 0)
def compileVarDec(self):
while self.peek(T_KEYWORD, 'var'):
_, kind = self.process(T_KEYWORD, 'var')
_, type = self.process(T_KEYWORD, 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'int', 'char', 'boolean') else self.process(T_ID)
_, name = self.process(T_ID)
self.symbols.append_subroutine_table(name, type, kind)
while self.peek(T_SYM, ','):
self.process(T_SYM, ',')
_, name = self.process(T_ID)
self.symbols.append_subroutine_table(name, type, kind)
self.process(T_SYM, ';')
def compileStatements(self):
while self.peek(T_KEYWORD, 'let', 'if', 'while', 'do', 'return'):
if self.peek(T_KEYWORD, 'let'):
self.compileLet()
elif self.peek(T_KEYWORD, 'if'):
self.compileIf()
elif self.peek(T_KEYWORD, 'while'):
self.compileWhile()
elif self.peek(T_KEYWORD, 'do'):
self.compileDo()
elif self.peek(T_KEYWORD, 'return'):
self.compileReturn()
def compileLet(self):
self.process(T_KEYWORD, 'let')
_, name = self.process(T_ID)
if self.peek(T_SYM, '['):
self.vm_variable('push', name)
self.process(T_SYM, '[')
self.compileExpression()
self.process(T_SYM, ']')
self.vm.write_arithmetic('add')
self.process(T_SYM, '=')
self.compileExpression()
self.process(T_SYM, ';')
self.vm.write_pop('temp', 1)
self.vm.write_pop('pointer', 1)
self.vm.write_push('temp', 1)
self.vm.write_pop('that', 0)
return
self.process(T_SYM, '=')
self.compileExpression()
self.process(T_SYM, ';')
self.vm_variable('pop', name)
def compileIf(self):
self.process(T_KEYWORD, 'if')
label = self.label
self.compileCondition(label)
if self.peek(T_KEYWORD, 'else'):
self.process(T_KEYWORD, 'else')
self.process(T_SYM, '{')
self.compileStatements()
self.process(T_SYM, '}')
self.vm.write_label(label)
def compileWhile(self):
self.process(T_KEYWORD, 'while')
label = self.label
self.vm.write_label(label)
self.compileCondition(label)
def compileCondition(self, label):
self.process(T_SYM, '(')
self.compileExpression()
self.process(T_SYM, ')')
self.vm.write_arithmetic('not')
else_label = self.label
self.vm.write_if(else_label)
self.process(T_SYM, '{')
self.compileStatements()
self.process(T_SYM, '}')
self.vm.write_goto(label)
self.vm.write_label(else_label)
def compileDo(self):
self.process(T_KEYWORD, 'do')
self.compileSubroutineCall()
self.vm.write_pop('temp', 0)
self.process(T_SYM, ';')
def compileReturn(self):
self.process(T_KEYWORD, 'return')
if not self.peek(T_SYM, ';'):
self.compileExpression()
else:
self.vm.write_push('constant', 0)
self.process(T_SYM, ';')
self.vm.write_return()
def compileExpression(self):
if not self.is_term():
return 0
self.compileTerm()
while self.peek(T_SYM, '+', '-', '*', '/', '&', '|', '<', '>', '='):
_, op = self.process(T_SYM, '+', '-', '*', '/', '&', '|', '<', '>', '=')
self.compileTerm()
self.vm.write_arithmetic(self.convert_symbols[op])
return 1
def compileTerm(self):
if self.peek(T_NUM):
_, val = self.process(T_NUM)
self.vm.write_push('constant', val)
elif self.peek(T_STR):
_, string = self.process(T_STR)
self.vm.write_push('constant', len(string))
self.vm.write_call('String.new', 1)
for char in string:
self.vm.write_push('constant', ord(char))
self.vm.write_call('String.appendChar', 2)
elif self.peek(T_KEYWORD, 'true', 'false', 'null', 'this'):
_, word = self.process(T_KEYWORD, 'true', 'false', 'null', 'this')
if word == 'this':
self.vm.write_push('pointer', 0)
elif word == 'true':
self.vm.write_push('constant', 1)
self.vm.write_arithmetic('neg')
else:
self.vm.write_push('constant', 0)
elif self.peek(T_SYM, '('):
self.process(T_SYM, '(')
self.compileExpression()
self.process(T_SYM, ')')
elif self.peek(T_SYM, '-', '~'):
_, op = self.process(T_SYM, '-', '~')
self.compileTerm()
self.vm.write_arithmetic(self.unary_convert_symbols[op])
elif self.peek(T_ID):
if self.peek(T_SYM, '[', LL=2):
_, name = self.process(T_ID)
self.vm_variable('push', name)
self.process(T_SYM, '[')
self.compileExpression()
self.process(T_SYM, ']')
self.vm.write_arithmetic('add')
self.vm.write_pop('pointer', 1)
self.vm.write_push('that', 0)
elif self.peek(T_SYM, '(', '.', LL=2):
self.compileSubroutineCall()
else:
_, name = self.process(T_ID)
self.vm_variable('push', name)
def is_term(self):
return (self.peek(T_NUM) or self.peek(T_STR) or
self.peek(T_KEYWORD, 'true', 'false', 'null', 'this') or
self.peek(T_ID) or self.peek(T_SYM, '(', '-', '~'))
def compileSubroutineCall(self):
num_of_args = 0
_, obj_name = self.process(T_ID)
if self.peek(T_SYM, '.'):
self.process(T_SYM, '.')
_, type, _ = self.symbols.kind_type_index_of(obj_name)
if type:
num_of_args += 1
self.vm_variable('push', obj_name)
obj_name = type
_, func_name = self.process(T_ID)
name = '{}.{}'.format(obj_name, func_name)
else:
self.vm.write_push('pointer', 0)
num_of_args += 1
name = '{}.{}'.format(self.current_class_name, obj_name)
self.process(T_SYM, '(')
num_of_args += self.compileExpressionList()
self.process(T_SYM, ')')
self.vm.write_call(name, num_of_args)
def compileExpressionList(self):
num_of_args = self.compileExpression()
while self.peek(T_SYM, ','):
self.process(T_SYM, ',')
self.compileExpression()
num_of_args += 1
return num_of_args
class SymbolTable:
def __init__(self):
self.class_table = {
"field": [
# {
# 'name': 'x',
# 'type': T_NUM,
# },
],
"static": [
# {
# 'name': 'x',
# 'type': T_NUM,
# }
]
}
self.subroutine_tables = [
# {
# "argument": [
# {
# 'name': 'x',
# 'type': T_NUM,
# },
# ],
# "local": [
# {
# 'name': 'x',
# 'type': T_NUM,
# }
# ]
# }
]
def append_class_table(self, name, type, kind):
raw = {
'name': name,
'type': type,
}
self.class_table[kind].append(raw)
def append_subroutine_table(self, name, type, kind):
raw = {
'name': name,
'type': type,
}
self.subroutine_tables[-1][kind].append(raw)
def start_subroutine(self):
element = {
"argument": [],
"var": [],
# "local": []
}
self.subroutine_tables.append(element)
def var_count(self, kind):
if kind in ['field', 'static']:
count = len(self.class_table[kind])
else:
count = len(self.subroutine_tables[-1][kind])
return count
def kind_type_index_of(self, name):
for kind, elements in self.class_table.items():
for element in elements:
if element['name'] == name:
return kind, element['type'], elements.index(element)
for kind, elements in self.subroutine_tables[-1].items():
for element in elements:
if element['name'] == name:
return kind, element['type'], elements.index(element)
return None, None, None
class VMWriter:
def __init__(self, jack):
self.file = open(jack.replace('.jack','.vm'), 'w')
def write(self, line):
self.file.write(line + '\n')
def write_push(self, segment, index):
if segment == 'field':
segment = 'this'
if segment == 'var':
segment = 'local'
line = 'push {} {}'.format(segment, str(index))
self.write(line)
def write_pop(self, segment, index):
if segment == 'field':
segment = 'this'
if segment == 'var':
segment = 'local'
line = 'pop {} {}'.format(segment, str(index))
self.write(line)
def write_arithmetic(self, command):
line = '{}'.format(command)
self.write(line)
def write_label(self, label):
line = 'label {}'.format(label)
self.write(line)
def write_goto(self, label):
line = 'goto {}'.format(label)
self.write(line)
def write_if(self, label):
line = 'if-goto {}'.format(label)
self.write(line)
def write_call(self, name, num_of_args):
line = 'call {} {}'.format(name, str(num_of_args))
self.write(line)
def write_function(self, name, num_of_locals):
line = 'function {} {}'.format(name, str(num_of_locals))
self.write(line)
def write_return(self):
line = 'return'
self.write(line)
def close(self):
self.file.close()
if __name__ == "__main__":
path_or_file = sys.argv[1]
if not path_or_file.endswith('.jack'):
name = path_or_file.split('\\')[-1]
OUT_PATH = path_or_file
num_of_arg = len(sys.argv) - 1
if num_of_arg != 1:
print("expected 1 argument - file or folder, got {} argument/s".format(num_of_arg))
sys.exit()
jacks = glob.glob(path_or_file+'/*.jack') or [path_or_file]
if jacks == []:
print("no jack files in folder")
sys.exit()
trans = JackCompiler(jacks)
|
[
"noreply@github.com"
] |
noreply@github.com
|
ae8caa3e5755b5b934074980647e9b8a044a2e9a
|
2d930aadf19b2ad6ea49725099d2f37475cd57f8
|
/test/functional/wallet-dump.py
|
c3f723a19bbd46584fb33bce6dba37487abcdcbe
|
[
"MIT"
] |
permissive
|
stratton-oakcoin/oakcoin
|
ea83774c9f6ea64adb8832770e6219ffb31edef6
|
fe53193a50bd3674211448f1dcc39c6f9f042bb2
|
refs/heads/master
| 2021-01-20T13:22:05.877005
| 2017-05-07T10:09:57
| 2017-05-07T10:09:57
| 90,477,972
| 1
| 2
| null | 2017-05-07T10:09:57
| 2017-05-06T16:58:05
|
C++
|
UTF-8
|
Python
| false
| false
| 4,770
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Oakcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the dumpwallet RPC."""
from test_framework.test_framework import OakcoinTestFramework
from test_framework.util import (start_nodes, start_node, assert_equal, oakcoind_processes)
def read_dump(file_name, addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
# only read non comment lines
if line[0] != "#" and len(line) > 10:
# split out some data
key_label, comment = line.split("#")
# key = key_label.split(" ")[0]
keytype = key_label.split(" ")[2]
if len(comment) > 1:
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdmaster=1":
# ensure the old master is still available
assert(hd_master_addr_old == addr)
elif keytype == "hdmaster=1":
# ensure we have generated a new hd master key
assert(hd_master_addr_old != addr)
hd_master_addr_ret = addr
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=":
found_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(OakcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [["-keypool=90"]]
def setup_network(self, split=False):
# Use 1 minute timeout because the initial getnewaddress RPC can take
# longer than the default 30 seconds due to an expensive
# CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in
# the test often takes even longer.
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, timewait=60)
def run_test (self):
tmpdir = self.options.tmpdir
# generate 20 addresses to compare against the dump
test_addr_count = 20
addrs = []
for i in range(0,test_addr_count):
addr = self.nodes[0].getnewaddress()
vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
addrs.append(vaddr)
# Should be a no-op:
self.nodes[0].keypoolrefill()
# dump unencrypted wallet
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
assert_equal(found_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_addr_chg, 50) # 50 blocks where mined
assert_equal(found_addr_rsv, 90*2) # 90 keys plus 100% internal keys
#encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
oakcoind_processes[0].wait()
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
self.nodes[0].walletpassphrase('test', 10)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
assert_equal(found_addr, test_addr_count)
assert_equal(found_addr_chg, 90*2 + 50) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 90*2)
if __name__ == '__main__':
WalletDumpTest().main ()
|
[
"s.matthew.english@gmail.com"
] |
s.matthew.english@gmail.com
|
ecf24d7d6ee11295a51d53aedecf26dcf7c0a36a
|
f57a425d2bfe242f59bfccefb844c06f6a924bb9
|
/LinearRegression.py
|
d8742adbfb5eb3ca9eda275cfd5df8ec9b9b7d75
|
[] |
no_license
|
MostafaZegoo/NLP_Project
|
a48208d4626e1b2254f96757714db77f14ae2880
|
363942bea9d297c9af47e28a5fd35fcf8860dda2
|
refs/heads/master
| 2020-03-12T23:19:17.713877
| 2018-08-21T08:29:59
| 2018-08-21T08:29:59
| 130,863,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,706
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,confusion_matrix,classification_report
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LinearRegression
import string
np.random.seed(123456)
news_df = pd.read_csv("uci-news-aggregator.csv", sep = ",")
news_df['CATEGORY'] = news_df.CATEGORY.map({ 'b': 1, 't': 2, 'e': 3, 'm': 4 })
news_df['TITLE'] = news_df.TITLE.map(lambda x: x.lower().translate(str.maketrans('','', string.punctuation)))
vectorizer = CountVectorizer(stop_words='english')
x = vectorizer.fit_transform(news_df['TITLE'])
encoder = LabelEncoder()
y = encoder.fit_transform(news_df['CATEGORY'])
# split into train and test sets
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33,random_state=42)
# Instantiate the classifier: clf
clf = OneVsRestClassifier(LinearRegression())
# Fit the classifier to the training data
clf.fit(x_train, y_train)
#clf.score(x_test,y_test)
y_pred=clf.predict(x_test)
tn,fn,tp,fp=confusion_matrix(y_test,y_pred)
print(confusion_matrix(y_test,y_pred))
print("===================================")
print(classification_report(y_test,y_pred))
print("===================================")
print("Accuracy score:",accuracy_score(y_test,y_pred))
print("===================================")
print(clf.predict(vectorizer.transform(["nescafe is a product from nestle"])))
plt.plot(tn)
plt.plot(tp)
plt.plot(fn)
plt.plot(fp)
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
3753ecbbf592ed7d15df03a2549a45b42ac22766
|
224a906e91c7c1cc8778466ef785060871ede67d
|
/name_to_job.py
|
4d1e39731dce00448b3ebe524814d3cbaed30fb8
|
[] |
no_license
|
Pavanisoma/Salary-Prediction-from-Name-Team-Competition-
|
353c7fda0df873e7e41d8283929c9ed49fdc97e9
|
1ddf618cbc4df41171a1b87698c0a7b45f0eb574
|
refs/heads/master
| 2021-08-08T21:10:57.862710
| 2020-06-14T19:28:51
| 2020-06-14T19:28:51
| 191,850,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,272
|
py
|
import numpy as np
from keras.utils import to_categorical
import keras.backend as K
import matplotlib.pyplot as plt
import random
from tqdm import tqdm
import nltk
import os
import time
import tensorflow as tf
import csv
from nltk.stem.wordnet import WordNetLemmatizer
from nltk import word_tokenize
tf.enable_eager_execution()
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
def pre_processed_job_title(title):
title = title.lower()
lmtzr = WordNetLemmatizer()
lemmatized = [lmtzr.lemmatize(word) for word in word_tokenize(title)]
title = '<start> ' + ' '.join(lemmatized) + ' <end>'
return title
def pre_processed_name(first_name, last_name):
first_name = first_name.replace(',', '').replace('"', '').replace('\'', '').lower()
last_name = last_name.replace(',', '').replace('"', '').replace('\'', '').lower()
return '<start> ' + first_name + ' ' + last_name + ' <end>'
def load_dataset(file="processed_bayarea.csv"):
name_list = []
job_list = []
name_job_pairs = []
with open(file, 'r') as csvfile:
csvreader = csv.reader(csvfile)
next(csvreader)
for row in csvreader:
name = pre_processed_name(row[2], row[1])
job = pre_processed_job_title(row[0])
job_list.append(job)
name_list.append(name)
name_job_pairs.append([name, job])
return name_list, job_list, name_job_pairs
class LanguageIndex():
def __init__(self, lang):
self.lang = lang
self.word2idx = {}
self.idx2word = {}
self.vocab = set()
def create_index(self):
for phrase in self.lang:
self.vocab.update(phrase.split(' '))
self.vocab = sorted(self.vocab)
self.word2idx['<pad>'] = 0
for index, word in enumerate(self.vocab):
self.word2idx[word] = index + 1
for word, index in self.word2idx.items():
self.idx2word[index] = word
def max_length(tensor):
return max(len(t) for t in tensor)
def load_sequence_data():
# creating cleaned input, output pairs
name_list, job_list, pairs = load_dataset()
# index language using the class defined above
inp_lang = LanguageIndex([name for name, job in pairs])
targ_lang = LanguageIndex([job for name, job in pairs])
inp_lang.create_index()
targ_lang.create_index()
# Vectorize the input and target languages
# name
input_tensor = [[inp_lang.word2idx[s] for s in name.split(' ')] for name, job in pairs]
# job_tite
target_tensor = [[targ_lang.word2idx[s] for s in job.split(' ')] for name, job in pairs]
# Calculate max_length of input and output tensor
# Here, we'll set those to the longest sentence in the dataset
max_length_inp, max_length_tar = max_length(input_tensor), max_length(target_tensor)
# Padding the input and output tensor to the maximum length
input_tensor = tf.keras.preprocessing.sequence.pad_sequences(input_tensor,
maxlen=max_length_inp,
padding='post')
target_tensor = tf.keras.preprocessing.sequence.pad_sequences(target_tensor,
maxlen=max_length_tar,
padding='post')
return input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_tar
def gru(units):
if tf.test.is_gpu_available():
return tf.keras.layers.CuDNNGRU(units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
else:
return tf.keras.layers.GRU(units,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.enc_units)
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.dec_units)
self.fc = tf.keras.layers.Dense(vocab_size)
# used for attention
self.W1 = tf.keras.layers.Dense(self.dec_units)
self.W2 = tf.keras.layers.Dense(self.dec_units)
self.V = tf.keras.layers.Dense(1)
def call(self, x, hidden, enc_output):
hidden_with_time_axis = tf.expand_dims(hidden, 1)
score = self.V(tf.nn.tanh(self.W1(enc_output) + self.W2(hidden_with_time_axis)))
attention_weights = tf.nn.softmax(score, axis=1)
context_vector = attention_weights * enc_output
context_vector = tf.reduce_sum(context_vector, axis=1)
x = self.embedding(x)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
output = tf.reshape(output, (-1, output.shape[2]))
x = self.fc(output)
return x, state, attention_weights
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.dec_units))
def loss_function(real, pred):
mask = 1 - np.equal(real, 0)
loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask
return tf.reduce_mean(loss_)
def evaluate(last_name, first_name, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ):
attention_plot = np.zeros((max_length_targ, max_length_inp))
units = 1024
sentence = pre_processed_name(first_name, last_name)
inputs = []
for i in sentence.split(' '):
if i in inp_lang.word2idx:
inputs.append(inp_lang.word2idx[i])
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs], maxlen=max_length_inp, padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word2idx['<start>']], 0)
for t in range(max_length_targ):
predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out)
# storing the attention weights to plot later on
attention_weights = tf.reshape(attention_weights, (-1, ))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.argmax(predictions[0]).numpy()
result += targ_lang.idx2word[predicted_id] + ' '
if targ_lang.idx2word[predicted_id] == '<end>':
return result, sentence, attention_plot
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict)
plt.show()
def translate(last_name, first_name, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ):
result, sentence, attention_plot = evaluate(last_name, first_name, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
# print('Input: {}'.format(sentence))
# print('Predicted translation: {}'.format(result))
# attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]
# plot_attention(attention_plot, sentence.split(' '), result.split(' '))
return result
def main():
input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_targ = load_sequence_data()
# Creating training and validation sets using an 80-20 split
input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2)
BUFFER_SIZE = len(input_tensor_train)
BATCH_SIZE = 64
N_BATCH = BUFFER_SIZE//BATCH_SIZE
embedding_dim = 256
units = 1024
vocab_inp_size = len(inp_lang.word2idx)
vocab_tar_size = len(targ_lang.word2idx)
dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
optimizer = tf.train.AdamOptimizer()
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
encoder=encoder,
decoder=decoder)
EPOCHS = 10
for epoch in range(EPOCHS):
start = time.time()
hidden = encoder.initialize_hidden_state()
total_loss = 0
for (batch, (inp, targ)) in enumerate(dataset):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word2idx['<start>']] * BATCH_SIZE, 1)
# Teacher forcing - feeding the target as the next input
for t in range(1, targ.shape[1]):
# passing enc_output to the decoder
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
# using teacher forcing
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[1]))
total_loss += batch_loss
variables = encoder.variables + decoder.variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
if batch % 100 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
batch_loss.numpy()))
# saving (checkpoint) the model every 2 epochs
if (epoch + 1) % 2 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print('Epoch {} Loss {:.4f}'.format(epoch + 1,
total_loss / N_BATCH))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
print(tf.train.latest_checkpoint(checkpoint_dir))
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
print(translate('chang', 'shih yu', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ))
print(translate('bui', 'xuan loc', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ))
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
31e5d88aad90549955249b4cb57b003d157e5527
|
620ca56701bce0add202f3cbe7c62036e4b1e359
|
/Course_3/Week_1/validations2.py
|
77d004fd5a9ff1e3999c8772722fd59fa072d73d
|
[
"MIT"
] |
permissive
|
gpastor3/Google-ITAutomation-Python
|
1f52dbff0b8f0832ab3fea4ac9c468c667363e1a
|
6027750a33e8df883d762223bb0c4a5a95395bc0
|
refs/heads/main
| 2023-04-05T00:29:10.902116
| 2021-02-04T02:08:06
| 2021-02-04T02:08:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 928
|
py
|
#!/usr/bin/env python3
"""
This script is used for course notes.
Author: Erick Marin
Date: 01/06/2020
"""
def validate_user(username, minlen):
# An alternative to the raise keyword that we can use for situations where
# we want to check that our code behaves the way it should particularly
# when we want to avoid situations that should never happen. This is the
# assert keyword. This keyword tries to verify that a conditional
# expression is true, and if it's false it raises an assertion error with
# the indicated message.
if type(username) != str:
raise TypeError("username must be a string")
if minlen < 1:
raise ValueError("minlen must be at least 1")
if len(username) < minlen:
return False
if not username.isalnum():
return False
# Usernames can't begin with a number
if username[0].isnumeric():
return False
return True
|
[
"emarin.iot@gmail.com"
] |
emarin.iot@gmail.com
|
22ff9336b110cd98c8003d9035ac0470e51ce429
|
1cd3305944de3d5b76ed91c9e0ac7e26b82f47ff
|
/2019/src/j4_s1.py
|
c8c5708d08518a425e322647be8957ec162d4269
|
[
"Apache-2.0"
] |
permissive
|
coachlivinglegend/CCC
|
4b0b6d4bbe031de88275a2834a12ae74fa7bc54e
|
6f98e81c7fef38bf70e68188db38863cc0cba2f4
|
refs/heads/master
| 2023-04-18T00:41:23.522774
| 2021-05-04T12:55:02
| 2021-05-04T12:55:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
import collections
import itertools
import functools
import math
import re
import bisect
import random
rint = lambda: int(input())
rstr = lambda: input()
rints = lambda: list(map(int, input().split()))
rstrs = lambda: input().split()
wmat = lambda mat, sep: '\n'.join(sep.join(map(str, row)) for row in mat)
warr = lambda arr, sep: sep.join(map(str, arr))
wl = lambda sep, *arr: sep.join(map(str, arr))
ctoi = lambda x : ord(x) - ord('a')
itoc = lambda x : chr(x + ord('a'))
grid = [
[[[1,2],[3,4]], [[2,1],[4,3]]],
[[[3,4],[1,2]], [[4,3],[2,1]]]
]
def main():
s = rstr()
h = v = 0
for ch in s:
if ch == 'H':
h = 1 - h
else:
v = 1 - v
print(wmat(grid[h][v], ' '))
if __name__ == '__main__':
main()
|
[
"kylexie186@gmail.com"
] |
kylexie186@gmail.com
|
c4cc3eae8ce8dc40427cfc6263c0d8d9207e33ce
|
e2590e0a78046a22131b69c76ebde21bf042cdd1
|
/ABC201_300/ABC275/A.py
|
6bc5a95d16891d1502a3adf5fbd2ff8aa0b3a6a3
|
[] |
no_license
|
masato-sso/AtCoderProblems
|
b8e23941d11881860dcf2942a5002a2b19b1f0c8
|
fbc02e6b7f8c6583e5a4e5187463e0001fc5f4d8
|
refs/heads/main
| 2023-01-22T23:57:58.509585
| 2023-01-21T14:07:47
| 2023-01-21T14:07:47
| 170,867,816
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
N = int(input())
H = list(map(int, input().split()))
maxValue = max(H)
ans = 0
for idx,h in enumerate(H):
if(h == maxValue):
ans = idx + 1
break
print(ans)
|
[
"masato@seijinnoMacBook-Pro-2.local"
] |
masato@seijinnoMacBook-Pro-2.local
|
b01d1738f1683a3b3cff5b6198a8926953464429
|
084916df5eff5e2a3f19ac5d86c401b467cb3969
|
/assignment1/bfs-wikipedia.py
|
484c57e34cfa312faa7ec9728cc7539fd575ab0e
|
[] |
no_license
|
cvalenzuela/NOC_Intelligence-Learning
|
2850b87c2ead8be97be193375129eee2ea16c0e8
|
9074fae4f1e20d7f93ef6e1ef9f75b92374d2f51
|
refs/heads/master
| 2021-01-23T04:20:26.702674
| 2017-04-30T03:50:19
| 2017-04-30T03:50:19
| 86,189,486
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,938
|
py
|
# coding: utf-8
# An implementation of bfs based on Grokking Algorithms chapter 6 and NOC-S17-2-Intelligence-Learning bfs examples.
# Using a Breadth-First Algorithm (BFS), this script will search for the shortest way to get from one article in Wikipedia to a specific word or term used in another article.
# Cristóbal Valenzuela
# -------------
from collections import deque
import wikipedia
import time
import sys
wikipedia.set_lang("en")
print '''
-----
This script will search for the most efficient
way to get from one article in Wikipedia to a
another article that has a specific term in it.
-----
'''
begin = raw_input("Ready? (y/n): ")
if begin != 'y':
print 'Well....Hasta la vista'
sys.exit()
else:
pass
start_article = raw_input("Enter the name of a Wikipedia article from where to start (ie: 'Paul Cezanne'): ")
search_term = raw_input("Enter a term or word of search for (ie: 'Gertrude Stein'): ")
print('''
Awesome!, I will search for the most efficient way to start from the %s Wikipedia article
and find the path to another article that talks about %s.
''' % (start_article, search_term))
raw_input("This may take me a while, so chill...Ok?")
#start_article = "New York University" # starting point
#search_term = 'Chile' # end point
# define the graph and starting point
graph = {}
# create a Node class
class Node:
def __init__(self, name, parent):
self.name = name
self.parent = parent
self.article = 'None'
self.content = 'None'
self.title = 'None'
self.links = 'None'
def get_article(self):
try:
self.article = wikipedia.page(self.name)
self.content = self.article.content
self.title = self.article.title
self.links = self.article.links
except:
print 'Sorry! Something happend!'
print 'Try again with another article!'
#sys.exit()
# initialize the root node
root_node = Node(start_article, None)
root_node.get_article()
graph[root_node.title] = []
all_nodes = []
for article in root_node.links:
graph[root_node.title].append(Node(article, root_node.title)) # add all of the first article links to the graph
#print root_node.links
def spinning_cursor():
while True:
for cursor in '|/-\\':
yield cursor
# this is main loop
def search(name):
print('''
Searching how to get from %s to %s.
''' % (root_node.title, search_term))
spinner = spinning_cursor()
search_queue = deque() # create a new queue (double-ended queue)
search_queue += graph[root_node.title] # add all of the root node urls to the search queue
searched = [] # this array keeps track of which article we have already search for, so we dont search twice.
while search_queue: # while the queue isn't empty
# fancy spinner to wait
sys.stdout.write(spinner.next())
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\b')
article = search_queue.popleft() # grabs the first article off the queue
if not article.title in searched: # only search this article if we haven't already search for
try:
article.get_article() # get the content from wikipedia
#print article.title
if search_term in article.content: # check if article contains the search_term we are looking for
all_nodes.append(article)
print 'I found it!'
print 'Here is the first shortest path I discovered:'
current = article.title
path = []
while current != root_node.title:
for article in all_nodes:
if article.title == current:
path.append(article.title)
current = article.parent
path.append(start_article)
print('''
%s ---> the %s wikipedia article talks about %s!
''' % (' ---> '.join(path[::-1]), path[0], search_term))
#print ' ---> '.join(path[::-1]) + ' ---> ' + path[0] + ' has ' + search_term + ' in it!'
print 'cool!'
return True
else:
graph[article.title] = []
all_nodes.append(article)
for name in article.links:
graph[article.title].append(Node(name, article.title)) # add all of the first article links to the graph
search_queue += graph[article.title]
searched.append(article.title) # mark this article as searched.
except:
pass
print 'wops!'
return False # if we reach here, the term was not find in the max of x iterations
# start the search
search(start_article)
|
[
"cvalenzuela@nyu.edu"
] |
cvalenzuela@nyu.edu
|
16468fa7074d1375000d5ddc4377969b545f6089
|
2e9589362c3f53841c101de62e714a5bac3d8096
|
/dataset_generator/word_embeddings/document_featurizer.py
|
b2abcd83bcf17e7f6052f59c79b1425136fb9bf4
|
[] |
no_license
|
nikhilsu/CitationRecommender
|
8b61bd44c3884de010d698dd013938d6cc13a6dc
|
44666e57664980ab6476182aa9a572b7ab68fa07
|
refs/heads/master
| 2023-04-11T01:07:05.379559
| 2021-05-21T14:31:53
| 2021-05-31T06:15:45
| 183,876,181
| 0
| 0
| null | 2023-03-25T01:08:58
| 2019-04-28T07:59:19
|
Python
|
UTF-8
|
Python
| false
| false
| 4,672
|
py
|
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from sklearn.feature_extraction.text import CountVectorizer
from tqdm import tqdm
class DocumentFeaturizer(object):
STOPWORDS = {
'abstract', 'about', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'for',
'from', 'how', 'in', 'is', 'it', 'of', 'on', 'or', 'that', 'the',
'this', 'to', 'was', 'what', 'when', 'where', 'who', 'will', 'with',
'the', 'we', 'our', 'which'
}
def __init__(self, raw_dataset, opts):
self.raw_dataset = raw_dataset
self.max_abstract_len = opts.max_abstract_len
self.max_title_len = opts.max_title_len
title_abstract_of_training_data = self.raw_dataset.fetch_collated_training_text(opts.train_split)
max_df_frac = 0.90
self.count_vectorizer = CountVectorizer(
max_df=max_df_frac,
max_features=opts.max_features,
stop_words=self.STOPWORDS
)
self.count_vectorizer.fit(tqdm(title_abstract_of_training_data, desc='Building Count-Vectorizer'))
self.word_to_index = dict((word, index + 1) for index, word in enumerate(self.count_vectorizer.vocabulary_))
self.n_features = 1 + len(self.word_to_index)
opts.n_features = self.n_features
def __index_of_word(self, word):
return self.word_to_index[word] if word in self.word_to_index else None
def __word_to_index_features(self, document):
x_indexes = []
for words in document:
indexes = []
for word in words:
index = self.__index_of_word(word)
if index:
indexes.append(index)
x_indexes.append(indexes)
return x_indexes
def __extract_textual_features(self, text, max_len):
return np.asarray(pad_sequences(self.__word_to_index_features([text]), max_len)[0], dtype=np.int32)
@staticmethod
def __extract_citation_features(documents):
return np.log([max(doc['in_citation_count'] - 1, 0) + 1 for doc in documents])
@staticmethod
def __extract_common_types_features(d_qs, candidates):
common_types = [np.intersect1d(d_q, candidate) for (d_q, candidate) in zip(d_qs, candidates)]
common_types_features = np.zeros_like(d_qs)
for i, intersection in enumerate(common_types):
common_types_features[i, :len(intersection)] = intersection
return common_types_features
@staticmethod
def __extract_sim_scores(d_qs, candidates, candidate_selector):
return np.asarray(
[candidate_selector.cosine_similarity(d_q, candidate) for (d_q, candidate) in zip(d_qs, candidates)])
def featurize_documents(self, documents):
features = {
'title':
np.asarray([self.__extract_textual_features(doc['title'], self.max_title_len) for doc in documents]),
'abstract':
np.asarray(
[self.__extract_textual_features(doc['abstract'], self.max_abstract_len) for doc in documents])
}
return features
def extract_features(self, d_qs, candidates, candidate_selector=None):
for_nn_rank = candidate_selector is not None
d_q_features = self.featurize_documents(d_qs)
candidate_features = self.featurize_documents(candidates)
features = {
'query-title-text':
d_q_features['title'],
'query-abstract-text':
d_q_features['abstract'],
'candidate-title-text':
candidate_features['title'],
'candidate-abstract-text':
candidate_features['abstract']
}
if for_nn_rank:
citation_features = DocumentFeaturizer.__extract_citation_features(candidates)
common_title = DocumentFeaturizer.__extract_common_types_features(d_q_features['title'],
candidate_features['title'])
common_abstract = DocumentFeaturizer.__extract_common_types_features(d_q_features['abstract'],
candidate_features['abstract'])
similarity_score_features = DocumentFeaturizer.__extract_sim_scores(d_qs, candidates, candidate_selector)
features['query-candidate-common-title'] = common_title
features['query-candidate-common-abstract'] = common_abstract
features['candidate-citation-count'] = citation_features
features['similarity-score'] = similarity_score_features
return features
|
[
"nikhilsulegaon@gmail.com"
] |
nikhilsulegaon@gmail.com
|
87542af4bb98ec1f4f2dd18363ced2a123b396b2
|
f0eb4d12fdac429d5620c0823af0b0be54d9ae3a
|
/KerasTracker/QualitativeResultsFigure.py
|
2c9149b99f691939d72647c667e16923e340162a
|
[
"Apache-2.0"
] |
permissive
|
felixVil/LDASegment
|
817cf7a5b8d101c7879b293d464c0428e37a776b
|
25f59c9f43c76e64c0a1e4131fa3c12bab60b716
|
refs/heads/master
| 2023-03-28T03:23:20.419209
| 2021-03-30T23:10:35
| 2021-03-30T23:10:35
| 322,999,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,118
|
py
|
from UtilFunctions import *
import os
import numpy as np
def find_result_per_sequence_tracker_ind(sequence, tracker, ind):
result_sequence_path = os.path.join(results_path, tracker, 'baseline')
result_filename = '%s_001.txt' % sequence
result_filepath = os.path.join(result_sequence_path, sequence, result_filename)
file_id = open(result_filepath, 'r')
lines = file_id.readlines()
file_id.close()
polygon_line = lines[ind]
polygon_line.replace('\n','')
polygon_arr = np.array([float(element) for element in polygon_line.split(',')])
return polygon_arr
sequence_path = 'D:/Another_D/E_backup/my homework/BGU Computer Vision thesis/vot-toolkit-master-2019/vot-workspace/sequences'
results_path = 'D:/Another_D/E_backup/my homework/BGU Computer Vision thesis/results_on_tracker_qualitatively_evaluated'
overlay_images_path = 'overlay_images'
if not os.path.exists(overlay_images_path):
os.makedirs(overlay_images_path)
sequences_dict = {'zebrafish1': {'inds' : [14, 31, 57], 'width': 2},
'fish1': {'inds': [143, 278, 316], 'width': 2},
'gymnastics2': {'inds': [178, 194, 206], 'width': 9},
'book': {'inds': [43, 82, 104], 'width': 2},
'conduction1':{'inds': [42, 187], 'width': 2},
'dinosaur': {'inds': [220, 277], 'width' : 9}}
color_dict = {'SiamMask':(255, 255, 255, 128), 'UPDT': (255, 0, 255, 128), 'ATOM':(255, 0, 0, 128), 'LADCF': (0, 0, 255, 128), 'LDATrackerDenseNetDilate':(0, 255, 0, 128)}
for sequence in sequences_dict.keys():
line_width = sequences_dict[sequence]['width']
poi_inds = sequences_dict[sequence]['inds']
frames_folder = os.path.join(sequence_path, sequence, 'color')
for ind in poi_inds:
poly_arrays = []
frames_file = os.path.join(frames_folder, '%08d.jpg' % (ind + 1))
overlay_image_file = os.path.join(overlay_images_path, '%s_%08d.jpg' % (sequence, ind + 1))
for tracker in color_dict.keys():
poly_array = find_result_per_sequence_tracker_ind(sequence, tracker, ind)
if len(poly_array) < 4:
continue # tracker is during failure.
elif len(poly_array) == 4:
#polygon is a standard axis aligned rectangle.
poly_array = convert_rect_to_real_poly(poly_array)
poly_arrays.append(poly_array)
draw_beatiful_polygon(poly_array, frames_file, overlay_image_file, color_dict[tracker], line_width)
frames_file = overlay_image_file
img_overlay = read_image(overlay_image_file)
crop_rect = create_tight_rect_around_locations(poly_arrays, img_overlay.shape)
img_overlay_cropped = img_overlay[crop_rect[0]:crop_rect[1], crop_rect[2]:crop_rect[3]]
img_overlay_cropped_pil = Image.fromarray(img_overlay_cropped, 'RGB')
overlay_cropped_filename = 'cropped_%s_%08d.png' % (sequence, ind + 1)
overlay_cropped_filepath = os.path.join(overlay_images_path, overlay_cropped_filename)
img_overlay_cropped_pil.save(overlay_cropped_filepath, "PNG")
|
[
"felixvil@post.bgu.ac.il"
] |
felixvil@post.bgu.ac.il
|
4cf799ae31dfe4802a0d9299a2f9c9087c10afe6
|
0add969034a82912bc6e19abc427abe883ee65bb
|
/theta_en_time_polar.py
|
a9683111bde6bafb250a54492723f599975e5624
|
[] |
no_license
|
Michael-Gong/New_LPI_python_script
|
eefd162fdbbc3c614c66e2b157ea5296e3bc8492
|
9de109c6f19aa60bdeaf102e9a1ec0baff5669ad
|
refs/heads/master
| 2020-03-28T16:06:09.631550
| 2020-02-01T08:21:17
| 2020-02-01T08:21:17
| 148,659,608
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,511
|
py
|
#%matplotlib inline
#import sdf
import matplotlib
import matplotlib as mpl
#mpl.style.use('https://raw.githubusercontent.com/Michael-Gong/DLA_project/master/style')
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
from numpy import ma
from matplotlib import colors, ticker, cm
from matplotlib.mlab import bivariate_normal
from optparse import OptionParser
import os
from mpl_toolkits.mplot3d import Axes3D
import random
from mpl_toolkits import mplot3d
from matplotlib import rc
import matplotlib.transforms as mtransforms
import sys
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
#rc('text', usetex=True)
font = {'family' : 'monospace',
'color' : 'black',
'weight' : 'normal',
'size' : 28,
}
font2 = {'family' : 'monospace',
'color' : 'black',
'weight' : 'normal',
'size' : 15,
}
font_size = 28
font_size_2 = 15
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
upper = matplotlib.cm.jet(np.arange(256))
lower = np.ones((int(256/4),4))
for i in range(3):
lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0])
cmap = np.vstack(( lower, upper ))
mycolor_jet = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
upper = matplotlib.cm.viridis(np.arange(256))
lower = np.ones((int(256/4),4))
for i in range(3):
lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0])
cmap = np.vstack(( lower, upper ))
mycolor_viridis = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
upper = matplotlib.cm.rainbow(np.arange(256))
lower = np.ones((int(256/4),4))
for i in range(3):
lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0])
cmap = np.vstack(( lower, upper ))
mycolor_rainbow = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
def pxpy_to_energy(gamma, weight):
binsize = 200
en_grid = np.linspace(50,19950,200)
en_bin = np.linspace(0,20000.0,201)
en_value = np.zeros_like(en_grid)
for i in range(binsize):
# if i == binsize-1:
# en_value[i] = sum(weight[en_bin[i]<=gamma])
# else:
en_value[i] = sum(weight[ (en_bin[i]<=gamma) & (gamma<en_bin[i+1]) ])
return (en_grid, en_value)
def theta_to_grid(theta, weight):
binsize = 240
theta_grid = np.linspace(-119.5,119.5,240)
theta_bin = np.linspace(-120,120,241)
theta_value = np.zeros_like(theta_grid)
for i in range(binsize):
# if i == binsize-1:
# en_value[i] = sum(weight[en_bin[i]<=gamma])
# else:
theta_value[i] = sum(weight[ (theta_bin[i]<=theta) & (theta<theta_bin[i+1]) ])
return (theta_grid, theta_value)
if __name__ == "__main__":
part_number = 50000
from_path = './p50000_no_T150/'
nsteps = int(sum(1 for line in open(from_path+'t_tot_s.txt'))/part_number)
ntheta = 270
ngg = 120
from_path_list = ['./p50000_no_T150/','./p50000_rr_T150/','./p50000_qe_T150/']
#from_path_list = ['./Data_qe_T500_p50000_try/']
for i in range(np.size(from_path_list)):
from_path = from_path_list[i] #'./Data_qe_T050_p50000/'
to_path = from_path
t0 = np.loadtxt(from_path+'t_tot_s.txt')/2/np.pi
px0 = np.loadtxt(from_path+'px_tot_s.txt')
py0 = np.loadtxt(from_path+'py_tot_s.txt')
t0 = np.reshape(t0,(part_number,nsteps))
px0 = np.reshape(px0,(part_number,nsteps))
py0 = np.reshape(py0,(part_number,nsteps))
gg0 = (px0**2+py0**2+1)**0.5*0.51e-3
ww0 = np.zeros_like(gg0)+1
ww0 = np.zeros_like(gg0)+gg0
theta0 = np.arctan2(py0,px0)
theta_edges = np.linspace(-np.pi,np.pi, ntheta +1)
gg_edges = np.linspace(0.1, 6, ngg +1)
theta_edges_1 = np.linspace(-np.pi,np.pi,ntheta)
gg_edges_1 = np.linspace(0.1, 6, ngg)
for n in range(np.size(t0[0,:])):
H, _, _ = np.histogram2d(gg0[:,n], theta0[:,n], [gg_edges, theta_edges], weights=gg0[:,n])
print('Max H:',np.max(H))
Theta, R = np.meshgrid(theta_edges_1,gg_edges_1)
H_temp = np.sum(H[:,:]*R,0)
print('averaged |theta|=',np.sum(H_temp*abs(theta_edges_1))/np.sum(H_temp)/np.pi*180)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection='polar'))
ax.set_facecolor('whitesmoke')
levels = np.logspace(1,5, 101)
H[H<0.01] = np.nan
img=ax.pcolormesh(Theta, R, H, norm=colors.LogNorm(vmin=0.01, vmax=1e3), cmap='viridis')
# cax = fig.add_axes([0.68,0.97,0.25,0.02])
# cbar=fig.colorbar(img,cax=cax, ticks=[1e3,1e5],orientation='horizontal')
# cbar.ax.set_xticklabels(cbar.ax.get_xticklabels(), fontsize=font_size_2)
# cbar.set_label(r'dI/d$\theta$dE [A.U.]',fontdict=font2)
# ax.tick_params(axis="y", pad=25)
ax.tick_params(axis="x", pad=10)
# ax.set_xticks([])
if (i%3 != 2):
ax.set_xticklabels([])
#ax.set_xlim(10,50)
#ax.set_ylim(0.,1.)
ax.set_xlabel(r'$\theta\ [^o]$',fontdict=font)
# ax.set_rlim(1e-1,1e3)
# ax.set_rmax(1e3)
l_r = np.array([0,1,2,3])
ax.set_rticks(l_r+1)
ax.set_yticklabels([])
# ax.set_yticklabels(['$10^%d$' % x for x in (l_r+1)])
ax.set_rlim(0, 6)
ax.set_rlabel_position(90)
# ax.set_rscale('log')
# ax.set_rscale('log')
# ax.set_thetamin(-90)
# ax.set_thetamax(90)
# ax.set_yticklabels([0.1,1,10,100,1000])
ax.set_xticklabels([0,90,180,270])
#ax.set_theta_zero_location('N')
# ax.set_ylabel(r'$\theta\ [^o]$',fontdict=font)
ax.tick_params(axis='x',labelsize=font_size)
ax.tick_params(axis='y',labelsize=font_size_2)
#ax.set_title('proton_angular_time='+str(time1), va='bottom', y=1., fontsize=20)
# plt.text(-100,650,' t = '++' fs',fontdict=font)
ax.grid(True,linestyle='--',linewidth=1.5,color='grey')
#plt.pcolormesh(x, y, ex.T, norm=mpl.colors.Normalize(vmin=0,vmax=100,clip=True), cmap=cm.cubehelix_r)
# plt.axis([x.min(), x.max(), y.min(), y.max()])
#### manifesting colorbar, changing label and axis properties ####
# cbar=plt.colorbar(pad=0.01)#ticks=[np.min(ex), -eee/2, 0, eee/2, np.min()])
# cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=font_size)
# cbar.set_label('dN/dE [A.U.]',fontdict=font)
# a0=200.0
# alpha=np.linspace(-3.5,0.5,501)
# plt.xlabel(r'$\theta$'+' [degree]',fontdict=font)
# plt.ylabel('time [fs]',fontdict=font)
# plt.xticks([-135,-90,-45,0,45,90,135],fontsize=font_size);
#plt.yticks([0,500,1000,1500],fontsize=font_size);
# plt.title(r'$dN/d\theta$'+' for no RR', fontsize=font_size)
# plt.xlim(-120,120)
# plt.ylim(0,1650)
#plt.title('electron at y='+str(round(y[n,0]/2/np.pi,4)),fontdict=font)
plt.subplots_adjust(top=0.90, bottom=0.11, left=0.1, right=0.93, hspace=0.10, wspace=0.05)
fig = plt.gcf()
fig.set_size_inches(6., 6.)
#fig.set_size_inches(5, 4.5)
fig.savefig(to_path+'theta_en_dist_'+to_path[7:-1]+'_'+str(n).zfill(4)+'.png',format='png',dpi=160)
plt.close("all")
|
[
"noreply@github.com"
] |
noreply@github.com
|
e3cc6b9117ff7d7c9fee0eba2bd19618379ed048
|
1ab7fff33be75efb4b725cd6c3ba5566c29bed93
|
/tutorial/tutorial/urls.py
|
1245d82edb7947719fca3d3ca4448a39e0087e7a
|
[] |
no_license
|
Anjali-Del/Anj
|
5b0ea6b5bc2b9c17653014d830e2526ac215ce1b
|
c361bc29b3da6700c51967590cb5f3abeb66881c
|
refs/heads/master
| 2021-01-15T17:41:27.574646
| 2015-07-30T05:11:00
| 2015-07-30T05:11:00
| 38,672,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 904
|
py
|
"""tutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url, patterns
from django.contrib import admin
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^', include('snippets.urls')),
)
|
[
"anjali@delhivery.com"
] |
anjali@delhivery.com
|
b17d3ad44bb4ae3b8a6f3fb5f6c5bbe92883ca46
|
9292bd4bd9589e08fa8277069b20abc0e6f9fd7d
|
/Clustering/app.py
|
585d9a1b10894213868e017694d0e98d33e6322b
|
[] |
no_license
|
Leonidesguerra/final_project
|
53f416b872677a98ff823c6bddf1fb86ac8f3fc6
|
68c9f6b5e13aad89c6528b6b15c16261f33098d1
|
refs/heads/main
| 2023-06-15T21:29:28.365502
| 2021-06-29T03:03:28
| 2021-06-29T03:03:28
| 377,657,466
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,139
|
py
|
import numpy as np
from numpy.core.fromnumeric import reshape
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session, session
from sqlalchemy import create_engine, func
# from config import DATABASE_URI
from flask import Flask, jsonify, render_template, redirect
#################################################
# Database Setup
#################################################
app = Flask(__name__)
# engine = sqlalchemy.create_engine(DATABASE_URI)
rds_connection_string = "postgres:imadlefl@localhost:5432/Agriculture_JB"
engine = create_engine(f'postgresql://{rds_connection_string}')
@app.route("/")
def home():
# go to home page to scrape info
return render_template("index.html")
@app.route("/visualizations")
def visualizations():
# go to home page to scrape info
return render_template("Agricultura_HTML.html")
@app.route("/mexican_states")
def perimeter():
# go to home page to scrape info
return render_template("mexican_states.html")
@app.route("/toppais")
def toppais():
data = engine.execute(
"SELECT cultivo, SUM(valorproduccion) FROM agr2017 GROUP BY cultivo LIMIT 20")
#df = pd.read_sql_query(query, engine)
# return df[['cultivo', 'sum']].to_dict()
all_data = []
for record in data:
data_dict = {}
data_dict['cultivo'] = record[0]
data_dict['sum'] = record[1]
all_data.append(data_dict)
return jsonify(all_data)
@app.route("/estadocrop")
def estadocrop():
data = engine.execute(
"SELECT estado, cultivo, SUM(valorproduccion) FROM agr2017 GROUP BY estado, cultivo;")
all_data = []
for record in data:
data_dict = {}
data_dict['estado'] = record[0]
data_dict['cultivo'] = record[1]
data_dict['sum'] = record[2]
all_data.append(data_dict)
return jsonify(all_data)
@app.route("/mapa")
def mapa():
data = engine.execute(
"SELECT estado, municipio, cultivo, SUM(valorproduccion), AVG(latitud), AVG(longitud), MAX(altitud) FROM agr2017 GROUP BY estado, municipio , cultivo;")
all_data = []
for record in data:
data_dict = {}
data_dict['estado'] = record[0]
data_dict['municipio'] = record[1]
data_dict['cultivo'] = record[2]
data_dict['sum'] = record[3]
data_dict['lat'] = record[4]
data_dict['lng'] = record[5]
data_dict['alt'] = record[6]
all_data.append(data_dict)
return jsonify(all_data)
@app.route("/clustering_map")
def clus_map():
data = engine.execute(
"SELECT latitud, longitud, cultivo, estado, clusters ,rendimiento FROM clustering;")
all_data = []
for record in data:
data_dict = {}
data_dict['latitud'] = record[0]
data_dict['longitud'] = record[1]
data_dict['cultivo'] = record[2]
data_dict['estado'] = record[3]
data_dict['clusters'] = record[4]
data_dict['rendimiento'] = record[5]
all_data.append(data_dict)
return jsonify(all_data)
if __name__ == '__main__':
app.run(debug=True)
|
[
"leonidesguerra@gmail.com"
] |
leonidesguerra@gmail.com
|
361e4d07975ca9bfa13fde5395e05cfab57a2474
|
847b39a71c85aeea7e3812f15f9bd5811edbec4d
|
/main2.py
|
b9ce4d61cef2890774a5093914c407f43e1f4fa5
|
[] |
no_license
|
arsalansaad/webcrawler
|
559eee2c95e0e4e3699e7788958e57f57c6378ed
|
2c70b769ce572d010bb1314303ad786de2304bac
|
refs/heads/master
| 2021-01-19T15:02:52.725367
| 2017-08-21T10:00:27
| 2017-08-21T10:00:27
| 100,939,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
import requests
from bs4 import BeautifulSoup
url = "http://www.hindustantimes.com/editorials/"
sourcecode = requests.get(url).text
soup = BeautifulSoup(sourcecode, "html.parser")
for link in soup.findAll("div",{ "class": "media-heading headingfour"}):
print(link.text)
for item in link.findAll('a'):
print(item.get('href'))
# for link in soup.findAll("div",class_="media-heading headingfour"):
# print(link.get('href'))
|
[
"arsalansaad.iitkgp@gmail.com"
] |
arsalansaad.iitkgp@gmail.com
|
eb160d82373fb3bb62f2083ae5cbdbcf702d1379
|
738ae0290d91596086810298eb3ced56967d45d2
|
/python-cmd/scrabble.py
|
6f1cbea6c0101940856302143274ae5de62a01cf
|
[] |
no_license
|
kkredit/hs-projects
|
6e7a8732331a23eacd154b4c0c611adc8795a0a6
|
97edcedf8116db57791f6b8c4666329f694d13b5
|
refs/heads/master
| 2021-01-09T09:36:43.379173
| 2016-06-01T21:58:15
| 2016-06-01T21:58:15
| 60,213,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,442
|
py
|
# scrabble.py
# A program to propose possible words in scrabble situations
# (mostly) Kevin Kredit
from string import *#split,lower
WORDLIST_FILENAME = "words.txt"
####################################NOT MINE##################################
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
#from string import *#split,lower
print ("Loading word list from file...")
inFile = open(WORDLIST_FILENAME, 'r')#, 0)
line = inFile.readline()
wordlist = line.split()#split(line)
print (" ", len(wordlist), "words loaded.")
return wordlist
dictionary = load_words()
###############################MINE############################################
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def search(word,start=0,finish=len(dictionary),x=1):
#print x,start,finish,finish-start,(finish+start)/2,\
# dictionary[(finish+start)/2]
if finish-start < 5:
if word in dictionary[start:finish]: return True
else: return False
tword = dictionary[int((start+finish)/2)]
if len(tword) < len(word):
start = (start+finish)/2
elif len(tword) > len(word):
finish = (start+finish)/2
else:##if length is correct
unfound,n = True,0
while unfound:
if alphabet.index(tword[n]) < alphabet.index(word[n]):
start,unfound = int((start+finish)/2),False
elif alphabet.index(tword[n]) > alphabet.index(word[n]):
finish,unfound = int((start+finish)/2),False
elif n+1==len(word):
return True
n += 1
return search(word,start,finish,x+1)
#######################################NOT MINE#########################
def anagram(word):
if word == '':
return ['']
else:
ans = []
for w in anagram(word[1:]):
for pos in range(len(w)+1):
ans.append(w[:pos]+word[0]+w[pos:])
return ans
#######################################MINE################################
def combinations(word,wnum,numletters):#to allow not-all-letter-using words?????????
if wnum == numletters: return [word]
else:
ans = []
for pos in range(len(word)):
ans += combinations(word[:pos]+word[pos+1:],wnum-1,numletters)
return ans
def main():
#from string import lower,split
letters = input('\nWhat are the scrambled letters: ').lower();
printed,numletters,more = [],len(letters),'yes'
while more[0].lower() == 'y' and numletters > 1:
found = 0
for word in combinations(letters,len(letters),numletters):
for w in anagram(word):
if (w not in printed) and search(w):##w in dictionary:#make my own search?
if not found:
print ('\nPossible',numletters,'letter words:\n')
print (w)
printed.append(w)
found += 1
print ('\nTotal:',found)
if numletters == 2: break
numletters -= 1
more = input(str('\nWould you like '+str(numletters)+
' letter combinations? (y/n) '))
if not found:
print ('\nThose letters do not form any words.')
if input('\nAgain? ')[0].lower() == 'y':main()
if __name__=='__main__':main()
|
[
"k.kredit.us@ieee.org"
] |
k.kredit.us@ieee.org
|
bb7be13aa1ae689ed05a4e1ef6b48ef41a63abf7
|
34edc8b21515817caa87aedeb07b87515c33ebd0
|
/shipping/serializers.py
|
c02da959ef30fc9d803c7ff4b5f9b8d0607690d0
|
[] |
no_license
|
waelbeso/Ftrina
|
b20c277030132b195af621d9e739040d42943a9b
|
449868f8c095bb920a2aef2e2dc4cb80de8ec82a
|
refs/heads/master
| 2022-09-06T16:34:40.391965
| 2018-05-27T12:19:05
| 2018-05-27T12:19:05
| 134,336,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,464
|
py
|
from shipping.models import Model,Zone
from rest_framework import serializers
from shop.models import WareHouse,Shop
from rest_framework.validators import UniqueTogetherValidator
class ModelSerializer(serializers.ModelSerializer):
def ShippingValidator(value):
import json
if "update" in value['method']:
if value['name']:
try:
Model.objects.get(name=value['name'],shop=value['shop'])
except Model.DoesNotExist:
return
shop_shipping_model = Model.objects.get(name=value['name'],shop=value['shop'])
if str(shop_shipping_model.id) in value['pk']:
return
raise serializers.ValidationError('You have Shipping Model with this name.')
raise serializers.ValidationError('Name is required.')
if "new" in value['method']:
if value['name']:
try:
Model.objects.get(name=value['name'],shop=value['shop'])
except Model.DoesNotExist:
return
raise serializers.ValidationError('You have Shipping Model with this name.')
raise serializers.ValidationError('Name is required.')
name = serializers.JSONField(required=True, validators=[ShippingValidator])
shop = serializers.PrimaryKeyRelatedField(queryset=Shop.objects.filter(),read_only=False)
ware_house = serializers.PrimaryKeyRelatedField(required=True,queryset=WareHouse.objects.filter(),read_only=False)
class Meta:
model = Model
fields = ('id', 'name','shop','ware_house','zone')
def update(self, instance, validated_data):
''' We did not update the shop record '''
ware_house = WareHouse.objects.get(pk=validated_data.pop('ware_house'))
instance.name = validated_data.get('name', instance.name)
instance.ware_house = ware_house
instance.save()
return instance
class ZoneSerializer(serializers.ModelSerializer):
model = serializers.PrimaryKeyRelatedField(queryset=Model.objects.filter(),read_only=False)
country = serializers.CharField(required=True)
province = serializers.CharField(required=True)
price = serializers.DecimalField(max_digits=19, decimal_places=2, coerce_to_string=None, max_value=None, min_value=None)
price_currency = serializers.CharField(max_length=None, min_length=None, allow_blank=False)
class Meta:
model = Zone
fields = ('id', 'model','country','province','price','price_currency')
validators = [
UniqueTogetherValidator(
queryset=Zone.objects.all(),
fields=('model', 'country','province')
)
]
|
[
"waelabbas@live.com"
] |
waelabbas@live.com
|
3ce6b9f20d08c14c582b9278fa91e5bb702c29b2
|
e3472add507c7fc16d013c2e318ca4e28158a13a
|
/tcc_tf/deterministic_alignment.py
|
4e8c87dc31aabe92912c8f4e9dd78be57da37059
|
[
"Apache-2.0"
] |
permissive
|
JiaHeng-DLUT/tcc_Temporal_Cycle_Consistency_Loss.pytorch
|
ebd5a9eba26a2332d81743c95a460eef2c690cb4
|
61490f457b406366f847822962f607e4c3d3e1bd
|
refs/heads/main
| 2022-12-27T20:33:20.462667
| 2020-10-11T11:12:05
| 2020-10-11T11:12:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,375
|
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deterministic alignment between all pairs of sequences in a batch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from .losses import classification_loss
from .losses import regression_loss
def pairwise_l2_distance(embs1, embs2):
"""Computes pairwise distances between all rows of embs1 and embs2."""
norm1 = tf.reduce_sum(tf.square(embs1), 1)
norm1 = tf.reshape(norm1, [-1, 1])
norm2 = tf.reduce_sum(tf.square(embs2), 1)
norm2 = tf.reshape(norm2, [1, -1])
# Max to ensure matmul doesn't produce anything negative due to floating
# point approximations.
dist = tf.maximum(
norm1 + norm2 - 2.0 * tf.matmul(embs1, embs2, False, True), 0.0)
return dist
def get_scaled_similarity(embs1, embs2, similarity_type, temperature):
"""Returns similarity between each all rows of embs1 and all rows of embs2.
The similarity is scaled by the number of channels/embedding size and
temperature.
Args:
embs1: Tensor, Embeddings of the shape [M, D] where M is the number of
embeddings and D is the embedding size.
embs2: Tensor, Embeddings of the shape [N, D] where N is the number of
embeddings and D is the embedding size.
similarity_type: String, Either one of 'l2' or 'cosine'.
temperature: Float, Temperature used in scaling logits before softmax.
Returns:
similarity: Tensor, [M, N] tensor denoting similarity between embs1 and
embs2.
"""
channels = tf.cast(tf.shape(embs1)[1], tf.float32)
# Go for embs1 to embs2.
if similarity_type == 'cosine':
similarity = tf.matmul(embs1, embs2, transpose_b=True)
elif similarity_type == 'l2':
similarity = -1.0 * pairwise_l2_distance(embs1, embs2)
else:
raise ValueError('similarity_type can either be l2 or cosine.')
# Scale the distance by number of channels. This normalization helps with
# optimization.
similarity /= channels
# Scale the distance by a temperature that helps with how soft/hard the
# alignment should be.
similarity /= temperature
return similarity
def align_pair_of_sequences(embs1,
embs2,
similarity_type,
temperature):
"""Align a given pair embedding sequences.
Args:
embs1: Tensor, Embeddings of the shape [M, D] where M is the number of
embeddings and D is the embedding size.
embs2: Tensor, Embeddings of the shape [N, D] where N is the number of
embeddings and D is the embedding size.
similarity_type: String, Either one of 'l2' or 'cosine'.
temperature: Float, Temperature used in scaling logits before softmax.
Returns:
logits: Tensor, Pre-softmax similarity scores after cycling back to the
starting sequence.
labels: Tensor, One hot labels containing the ground truth. The index where
the cycle started is 1.
"""
max_num_steps = tf.shape(embs1)[0]
# Find distances between embs1 and embs2.
sim_12 = get_scaled_similarity(embs1, embs2, similarity_type, temperature)
# Softmax the distance.
softmaxed_sim_12 = tf.nn.softmax(sim_12, axis=1)
# Calculate soft-nearest neighbors.
nn_embs = tf.matmul(softmaxed_sim_12, embs2)
# Find distances between nn_embs and embs1.
sim_21 = get_scaled_similarity(nn_embs, embs1, similarity_type, temperature)
logits = sim_21
labels = tf.one_hot(tf.range(max_num_steps), max_num_steps)
return logits, labels
def compute_deterministic_alignment_loss(embs,
steps,
seq_lens,
num_steps,
batch_size,
loss_type,
similarity_type,
temperature,
label_smoothing,
variance_lambda,
huber_delta,
normalize_indices):
"""Compute cycle-consistency loss for all steps in each sequence.
This aligns each pair of videos in the batch except with itself.
When aligning it also matters which video is the starting video. So for N
videos in the batch, we have N * (N-1) alignments happening.
For example, a batch of size 3 has 6 pairs of sequence alignments.
Args:
embs: Tensor, sequential embeddings of the shape [N, T, D] where N is the
batch size, T is the number of timesteps in the sequence, D is the size
of the embeddings.
steps: Tensor, step indices/frame indices of the embeddings of the shape
[N, T] where N is the batch size, T is the number of the timesteps.
seq_lens: Tensor, Lengths of the sequences from which the sampling was
done. This can provide additional information to the alignment loss.
num_steps: Integer/Tensor, Number of timesteps in the embeddings.
batch_size: Integer, Size of the batch.
loss_type: String, This specifies the kind of loss function to use.
Currently supported loss functions: 'classification', 'regression_mse',
'regression_mse_var', 'regression_huber'.
similarity_type: String, Currently supported similarity metrics: 'l2' ,
'cosine' .
temperature: Float, temperature scaling used to scale the similarity
distributions calculated using the softmax function.
label_smoothing: Float, Label smoothing argument used in
tf.keras.losses.categorical_crossentropy function and described in this
paper https://arxiv.org/pdf/1701.06548.pdf.
variance_lambda: Float, Weight of the variance of the similarity
predictions while cycling back. If this is high then the low variance
similarities are preferred by the loss while making this term low
results in high variance of the similarities (more uniform/random
matching).
huber_delta: float, Huber delta described in tf.keras.losses.huber_loss.
normalize_indices: Boolean, If True, normalizes indices by sequence
lengths. Useful for ensuring numerical instabilities doesn't arise as
sequence indices can be large numbers.
Returns:
loss: Tensor, Scalar loss tensor that imposes the chosen variant of the
cycle-consistency loss.
"""
labels_list = []
logits_list = []
steps_list = []
seq_lens_list = []
for i in range(batch_size):
for j in range(batch_size):
# We do not align the sequence with itself.
if i != j:
logits, labels = align_pair_of_sequences(embs[i],
embs[j],
similarity_type,
temperature)
logits_list.append(logits)
labels_list.append(labels)
steps_list.append(tf.tile(steps[i:i+1], [num_steps, 1]))
seq_lens_list.append(tf.tile(seq_lens[i:i+1], [num_steps]))
logits = tf.concat(logits_list, axis=0)
labels = tf.concat(labels_list, axis=0)
steps = tf.concat(steps_list, axis=0)
seq_lens = tf.concat(seq_lens_list, axis=0)
if loss_type == 'classification':
loss = classification_loss(logits, labels, label_smoothing)
elif 'regression' in loss_type:
loss = regression_loss(logits, labels, num_steps, steps, seq_lens,
loss_type, normalize_indices, variance_lambda,
huber_delta)
else:
raise ValueError('Unidentified loss_type %s. Currently supported loss '
'types are: regression_mse, regression_huber, '
'classification.' % loss_type)
return loss
|
[
"noreply@github.com"
] |
noreply@github.com
|
4122d8dfdf03cb8b82f3ada1eac86eba2d701a0f
|
72e76a8eeb3afbbd2d77eb79047410e3944947c5
|
/datasets.py
|
ac3d0e16e9df457d922d542baa0547f0f858d3d7
|
[] |
no_license
|
Tirthraj93/Topic-Modelling-and-Clustering
|
f6a042141ed54f65ce00cd7c51dc138e72ba2f5a
|
4a7edaa3845cf18f6bbd57fee740a6bd40c9cbfe
|
refs/heads/master
| 2020-04-17T05:31:55.423827
| 2016-08-30T20:48:15
| 2016-08-30T20:48:15
| 66,976,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
from lda.utils import ldac2dtm
def load_ldac(file_path):
return ldac2dtm(open(file_path), offset=0)
def load_vocab(file_path):
with open(file_path) as f:
vocab = tuple(f.read().split())
return vocab
def load_titles(file_path):
with open(file_path) as f:
titles = tuple(line.strip() for line in f.readlines())
return titles
|
[
"noreply@github.com"
] |
noreply@github.com
|
0c33b6b087134d2d154945f70cb86c1e18641dbc
|
f78bf2e853fd07aba9fb64617e7a3f6d169a5854
|
/MANUFACTURER.py
|
a8e8270ea55eb738a2c8c8d40e3236483b84a9e1
|
[] |
no_license
|
daemonluo/ua-parser-python
|
b321e4581300bd6d59912a9e5e620d7d6637722a
|
a92fc61e44a11f24c834c754812425be2df44c40
|
refs/heads/master
| 2021-06-11T22:51:12.775335
| 2016-09-01T14:04:56
| 2016-09-01T14:04:56
| 67,136,467
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115,269
|
py
|
#coding=utf8
import re
STRINGS_SAMSUNG = 'Samsung',
STRINGS_SHARP = 'Sharp',
STRINGS_SONY_ERICSSON = 'Sony Ericsson',
STRINGS_MOTOROLA = 'Motorola',
STRINGS_LG = 'LG',
STRINGS_HUAWEI = 'Huawei',
STRINGS_HTC = 'HTC',
STRINGS_COOLPAD = 'Coolpad',
STRINGS_ASUS = 'Asus',
STRINGS_ACER = 'Acer';
STRINGS_BASED = ' based device';
TOUCHWIZ_MODELS = {
'SAMSUNG': {
'GT-S3370C': [STRINGS_SAMSUNG, 'Corby 3G'],
'GT-S3650': [STRINGS_SAMSUNG, 'Corby'],
'GT-S3653': [STRINGS_SAMSUNG, 'Corby'],
'GT-S3850': [STRINGS_SAMSUNG, 'Corby II'],
'GT-S5230': [STRINGS_SAMSUNG, 'Star'],
'GT-S5230W': [STRINGS_SAMSUNG, 'Star'],
'GT-S5233': [STRINGS_SAMSUNG, 'Star'],
'GT-S5260': [STRINGS_SAMSUNG, 'Star II'],
'GT-S5560': [STRINGS_SAMSUNG, 'Marvel'],
'GT-S5620': [STRINGS_SAMSUNG, 'Monte'],
'GT-S7550': [STRINGS_SAMSUNG, 'Blue Earth'],
'GT-S8000': [STRINGS_SAMSUNG, 'Jet'],
'GT-S8003': [STRINGS_SAMSUNG, 'Jet'],
'SGH-F480': [STRINGS_SAMSUNG, 'Tocco'],
'SGH-T528g': [STRINGS_SAMSUNG, 'Straight Talk'],
'GT-B3410': [STRINGS_SAMSUNG, 'Star Qwerty'],
'GT-B5310': [STRINGS_SAMSUNG, 'Corby Pro'],
'GT-B7722': [STRINGS_SAMSUNG, 'Star Duos'],
'GT-C6712': [STRINGS_SAMSUNG, 'Star II Duos']
}
}
BADA_MODELS = {
'SAMSUNG': {
'GT- S5250': [STRINGS_SAMSUNG, 'Wave 525'],
'GT-S5250': [STRINGS_SAMSUNG, 'Wave 525'],
'GT-S5253': [STRINGS_SAMSUNG, 'Wave 525'],
'GT-S5330': [STRINGS_SAMSUNG, 'Wave 533'],
'GT-S5380': [STRINGS_SAMSUNG, 'Wave Y'],
'GT-S5380D': [STRINGS_SAMSUNG, 'Wave Y'],
'GT-S5380K': [STRINGS_SAMSUNG, 'Wave Y'],
'GT-S5750E': [STRINGS_SAMSUNG, 'Wave 575'],
'GT-S5753E': [STRINGS_SAMSUNG, 'Wave 575'],
'GT-S7230B': [STRINGS_SAMSUNG, 'Wave 723'],
'GT-S7230E': [STRINGS_SAMSUNG, 'Wave 723'],
'GT-S7233E': [STRINGS_SAMSUNG, 'Wave 723'],
'GT-S7250': [STRINGS_SAMSUNG, 'Wave M'],
'GT-S7250D': [STRINGS_SAMSUNG, 'Wave M'],
'GT-S8500': [STRINGS_SAMSUNG, 'Wave'],
'GT-S8500C': [STRINGS_SAMSUNG, 'Wave'],
'GT-S8500R': [STRINGS_SAMSUNG, 'Wave'],
'GT-S8500T': [STRINGS_SAMSUNG, 'Wave'],
'GT-S8530': [STRINGS_SAMSUNG, 'Wave II'],
'GT-S8600': [STRINGS_SAMSUNG, 'Wave 3'],
'SHW-M410': [STRINGS_SAMSUNG, 'Wave 3']
}
}
TIZEN_MODELS = {
'SAMSUNG': {
'GT-I9500': [STRINGS_SAMSUNG, 'GT-I9500']
}
}
BREW_MODELS = {
'Coolpad D508': [STRINGS_COOLPAD, 'D508'],
'Coolpad E600': [STRINGS_COOLPAD, 'E600'],
'SCH-F839': [STRINGS_SAMSUNG, 'SCH-F839']
}
WINDOWS_MOBILE_MODELS = {
'DX900': [STRINGS_ACER, 'Tempo DX900'],
'F900': [STRINGS_ACER, 'Tempo F900'],
'Coolpad F800': [STRINGS_COOLPAD, 'F800'],
'garmin-asus-Nuvifone-M10': ['Garmin-Asus', 'Nuvifone M10'],
'HP iPAQ 510': ['HP', 'iPAQ 510'],
'HD mini T5555': [STRINGS_HTC, 'HD mini'],
'HTC HD mini': [STRINGS_HTC, 'HD mini'],
'HTC HD mini T5555': [STRINGS_HTC, 'HD mini'],
'HTC HD2': [STRINGS_HTC, 'HD2'],
'HTC HD2 T8585': [STRINGS_HTC, 'HD2'],
'HD2 T8585': [STRINGS_HTC, 'HD2'],
'T-Mobile LEO': [STRINGS_HTC, 'HD2'],
'dopodT5588': [STRINGS_HTC, 'Hengshan'],
'HTC Mega-T3333': [STRINGS_HTC, 'Mega'],
'HTC Snap S521': [STRINGS_HTC, 'Snap'],
'HTC Touch2 T3320': [STRINGS_HTC, 'Touch 2'],
'HTC Touch2 T3333': [STRINGS_HTC, 'Touch 2'],
'HTC Touch2 T3335': [STRINGS_HTC, 'Touch 2'],
'HTC P3700': [STRINGS_HTC, 'Touch Diamond'],
'HTC Touch Diamond2 T5353': [STRINGS_HTC, 'Touch Diamond 2'],
'HTC Touch HD T8282': [STRINGS_HTC, 'Touch HD'],
'HTC Touch HD T8283': [STRINGS_HTC, 'Touch HD'],
'HTC Touch HD2 T8585': [STRINGS_HTC, 'Touch HD2'],
'HTC Touch Pro2 T7373': [STRINGS_HTC, 'Touch Pro 2'],
'T7380': [STRINGS_HTC, 'Touch Pro 2'],
'HTC TyTN II': [STRINGS_HTC, 'TyTN II'],
'GT-B7300': [STRINGS_SAMSUNG, 'Omnia Lite'],
'GT-B7610': [STRINGS_SAMSUNG, 'Omnia Pro'],
'GT-i8000': [STRINGS_SAMSUNG, 'Omnia 2'],
'GT-I8000': [STRINGS_SAMSUNG, 'Omnia 2'],
'GT-I8000U': [STRINGS_SAMSUNG, 'Omnia 2'],
'M1i': [STRINGS_SONY_ERICSSON, 'M1i Aspen']
}
WINDOWS_PHONE_MODELS = {
'Acer': {
'Allegro': [STRINGS_ACER, 'Allegro'],
'M310': [STRINGS_ACER, 'Allegro']
},
'Asus': {
'Galaxy6': [STRINGS_ASUS, 'Galaxy 6']
},
'DELL': {
'Venue Pro': ['Dell', 'Venue Pro']
},
'FujitsuToshibaMobileCommun': {
'IS12T': ['Fujitsu Toshiba', 'IS12T']
},
'HTC': {
'7 Mozart': [STRINGS_HTC, '7 Mozart'],
'7 Mozart T8698': [STRINGS_HTC, '7 Mozart'],
'T8697': [STRINGS_HTC, '7 Mozart'],
'T8698': [STRINGS_HTC, '7 Mozart'],
'PD67100': [STRINGS_HTC, '7 Mozart'],
'Mozart T8698': [STRINGS_HTC, '7 Mozart'],
'Mozart': [STRINGS_HTC, '7 Mozart'],
'USCCHTC-PC93100': [STRINGS_HTC, 'Arrive'],
'Gold': [STRINGS_HTC, 'Gold '],
'HD2': [STRINGS_HTC, 'HD2'],
'HD7': [STRINGS_HTC, 'HD7'],
'HD7 T9292': [STRINGS_HTC, 'HD7'],
'T9295': [STRINGS_HTC, 'HD7'],
'T9296': [STRINGS_HTC, 'HD7'],
'HD7 Infinity': [STRINGS_HTC, 'HD7'],
'T7575': [STRINGS_HTC, '7 Pro'],
'7 Pro T7576': [STRINGS_HTC, '7 Pro'],
'mwp6985': [STRINGS_HTC, 'Trophy'],
'7 Trophy T8686': [STRINGS_HTC, 'Trophy'],
'7 Trophy': [STRINGS_HTC, 'Trophy'],
'PC40100': [STRINGS_HTC, 'Trophy'],
'Touch-IT Trophy': [STRINGS_HTC, 'Trophy'],
'Radar': [STRINGS_HTC, 'Radar'],
'Radar 4G': [STRINGS_HTC, 'Radar'],
'Radar C110e': [STRINGS_HTC, 'Radar'],
'Mazaa': [STRINGS_HTC, 'Mazaa'],
'Mondrian': [STRINGS_HTC, 'Mondrian'],
'Schubert': [STRINGS_HTC, 'Schubert'],
'7 Schubert T9292': [STRINGS_HTC, 'Schubert'],
'Spark': [STRINGS_HTC, 'Spark'],
'T8788': [STRINGS_HTC, 'Surround'],
'TITAN X310e': [STRINGS_HTC, 'Titan'],
'X310e': [STRINGS_HTC, 'Titan'],
'PI39100': [STRINGS_HTC, 'Titan'],
'PI86100': [STRINGS_HTC, 'Titan II'],
'Ultimate': [STRINGS_HTC, 'Ultimate']
},
'LG': {
'GW910': [STRINGS_LG, 'Optimus 7'],
'LG E-900': [STRINGS_LG, 'Optimus 7 E900'],
'LG-E900': [STRINGS_LG, 'Optimus 7 E900'],
'LG-E900h': [STRINGS_LG, 'Optimus 7 E900'],
'LG-C900': [STRINGS_LG, 'Optimus 7Q'],
'LG-C900B': [STRINGS_LG, 'Quantum'],
'LG-C900k': [STRINGS_LG, 'Quantum']
},
'nokia': {
'SeaRay': ['Nokia', 'Lumia 800'],
'800C': ['Nokia', 'Lumia 800']
},
'NOKIA': {
'710': ['Nokia', 'Lumia 710'],
'Nokia 710': ['Nokia', 'Lumia 710'],
'Lumia 710': ['Nokia', 'Lumia 710'],
'Lumia 719': ['Nokia', 'Lumia 719'],
'Lumia 800': ['Nokia', 'Lumia 800'],
'800': ['Nokia', 'Lumia 800'],
'Lumia 900': ['Nokia', 'Lumia 900'],
'XXX': ['Nokia', 'prototype']
},
'SAMSUNG': {
'GT-I8350': [STRINGS_SAMSUNG, 'Omnia W'],
'GT-I8350T': [STRINGS_SAMSUNG, 'Omnia W'],
'SGH-i677': [STRINGS_SAMSUNG, 'Focus Flash'],
'SGH-i707': [STRINGS_SAMSUNG, 'Taylor'],
'SGH-i917': [STRINGS_SAMSUNG, 'Omnia 7'],
'SGH-I917': [STRINGS_SAMSUNG, 'Omnia 7'],
'SGH-i917.': [STRINGS_SAMSUNG, 'Focus'],
'SGH-i917R': [STRINGS_SAMSUNG, 'Focus'],
'SGH-i937': [STRINGS_SAMSUNG, 'Focus S'],
'OMNIA7': [STRINGS_SAMSUNG, 'Omnia 7'],
'OMINA7': [STRINGS_SAMSUNG, 'Omnia 7'],
'Taylor': [STRINGS_SAMSUNG, 'Taylor']
},
'TOSHIBA': {
'TSUNAGI': ['Toshiba', 'Tsunagi']
}
}
ANDROID_MODELS = {
'Android': [None, None],
'google sdk': [None, None],
'sdk': [None, None],
'generic': [None, None],
'generic x86': [None, None],
'amd brazos': ['AMD', 'Fusionbased device'],
'Amlogic M1 reference board': ['Amlogic', 'M1 reference board'],
'AML8726M': ['Amlogic', 'AML8726-Mbased device'],
'vexpress a9': ['ARM', 'Versatile Express development platform'],
'bcm7231': ['Broadcom', 'BCM7231based device', 'television'],
'bcm7425': ['Broadcom', 'BCM7425based device', 'television'],
'bcm7429': ['Broadcom', 'BCM7429based device', 'television'],
'imx50 rdp': ['Freescale', 'i.MX50based device'],
'imx51 bbg': ['Freescale', 'i.MX51based device'],
'imx53 loco': ['Freescale', 'i.MX53based device'],
'imx53 mp204f3': ['Freescale', 'i.MX53based device'],
'imx53 smd': ['Freescale', 'i.MX53based device'],
'imx53 yeagle': ['Freescale', 'i.MX53based device'],
'imx6q': ['Freescale', 'i.MX6Qbased device'],
'ODROID-A': ['Hardkernel', 'ODROID-A developer tablet', 'tablet'],
'mfld dv10': ['Intel', 'Medfieldbased device'],
'mfld dv20': ['Intel', 'Medfieldbased device'],
'mfld lw00': ['Intel', 'Medfieldbased device'],
'mfld pr2': ['Intel', 'Medfieldbased device'],
'mfld pr3': ['Intel', 'Medfieldbased device'],
'berlin bg2': ['Marvell', 'Armada 1000based device', 'television'],
'MStar Amber3': ['MStar', 'Amber3based device'],
'Konka Amber3': ['MStar', 'Amber3based device'],
'mt5396': ['Mediatek', 'MT5396based device', 'television'],
'bird75v2': ['Mediatek', 'MT6575based device'],
'eagle75v1 2': ['Mediatek', 'MT6575based device'],
'MBX DVBT reference board (c03ref)': ['MXB', 'DVBT reference board', 'television'],
'NS2816': ['Nufront', 'NuSmart 2816based device'],
'Ventana': ['nVidia', 'Tegra Ventana development kit'],
'Cardhu': ['nVidia', 'Tegra 3based device'],
'Panda': ['Pandaboard', 'Development Kit'],
'pandaboard': ['Pandaboard', 'Development Kit'],
'PandaBoard': ['Pandaboard', 'Development Kit'],
'MSM': ['Qualcomm', 'Snapdragonbased device'],
'msm7227 ffa': ['Qualcomm', 'Snapdragon S1based device'],
'msm7627 surf': ['Qualcomm', 'Snapdragon S1based device'],
'msm7627a': ['Qualcomm', 'Snapdragon S1based device'],
'msm7627a sku1': ['Qualcomm', 'Snapdragon S1based device'],
'msm7627a sku3': ['Qualcomm', 'Snapdragon S1based device'],
'msm7630 fusion': ['Qualcomm', 'Snapdragon S2based device'],
'msm7630 surf': ['Qualcomm', 'Snapdragon S2based device'],
'msm8660 cougar': ['Qualcomm', 'Snapdragon S3based device'],
'msm8660 surf': ['Qualcomm', 'Snapdragon S3based device'],
'msm8960': ['Qualcomm', 'Snapdragon S4based device'],
'rk2808sdk': ['Rockchip', 'RK2808based device'],
'RK2818': ['Rockchip', 'RK2818based device'],
'rk2818sdk': ['Rockchip', 'RK2818based device'],
'Android-for-Rockchip-2818': ['Rockchip', 'RK2818based device'],
'rk29sdk': ['Rockchip', 'RK29based device'],
'Rk29sdk': ['Rockchip', 'RK29based device'],
'rk30sdk': ['Rockchip', 'RK30based device'],
's3c6410': ['Samsung', 'S3C6410based device'],
'smdk6410': ['Samsung', 'S3C6410based device'],
'SMDKC110': ['Samsung', 'Exynos 3110based device'],
'SMDKV210': ['Samsung', 'Exynos 4210based device'],
'S5PV210': ['Samsung', 'Exynos 4210based device'],
'sec smdkc210': ['Samsung', 'Exynos 4210based device'],
'SMDK4x12': ['Samsung', 'Exynos 4212 or 4412based device'],
'smp86xx': ['Sigma', 'SMP86xxbased device', 'television'],
'sv8860': ['Skyviia', 'SV8860based device', 'television'],
'ste u8500': ['ST Ericsson', 'Novathor U8500based device'],
'Telechips M801 Evaluation Board': ['Telechips', 'M801based device', 'television'],
'Telechips TCC8900 Evaluation Board': ['Telechips', 'TCC8900based device', 'television'],
'TCC8920 STB EV': ['Telechips', 'TCC8920based device', 'television'],
'OMAP': ['Texas Instruments', 'OMAPbased device'],
'OMAP SS': ['Texas Instruments', 'OMAPbased device'],
'LogicPD Zoom2': ['Texas Instruments', 'OMAPbased device'],
'omap3evm': ['Texas Instruments', 'OMAP3based device'],
'Omap5sevm': ['Texas Instruments', 'OMAP5based device'],
'pnx8473 kiryung': ['Trident', 'PNX8473based device', 'television'],
'crespo': ['Google', 'Nexus S'],
'Crespo': ['Google', 'Nexus S'],
'Crespo4G': ['Google', 'Nexus S'],
'Passion': ['Google', 'Nexus One'],
'Bravo': ['HTC', 'Desire'],
'dream': ['HTC', 'Dream'],
'Vogue': ['HTC', 'Touch'],
'Vendor Optimus': ['LG', 'Optimus'],
'Stingray': ['Motorola', 'XOOM', 'tablet'],
'Wingray': ['Motorola', 'XOOM', 'tablet'],
'maguro': ['Samsung', 'Galaxy Nexus'],
'Maguro': ['Samsung', 'Galaxy Nexus'],
'Toro-VZW': ['Samsung', 'Galaxy Nexus'],
'blaze': ['Texas Instruments', 'Blaze Tablet', 'tablet'],
'Blaze': ['Texas Instruments', 'Blaze Tablet', 'tablet'],
'Blaze Tablet': ['Texas Instruments', 'Blaze Tablet', 'tablet'],
'BlueStacks': ['BlueStacks', 'App Player', 'desktop'],
'youwave custom': ['Youwave', 'Android on PC', 'desktop'],
'A100': ['Acer', 'Iconia Tab A100', 'tablet'],
'A101': ['Acer', 'Iconia Tab A101', 'tablet'],
'A200': ['Acer', 'Iconia Tab A200', 'tablet'],
'A500': ['Acer', 'Iconia Tab A500', 'tablet'],
'A501': ['Acer', 'Iconia Tab A501', 'tablet'],
'A510': ['Acer', 'Iconia Tab A510', 'tablet'],
'A511': ['Acer', 'Iconia Tab A511', 'tablet'],
'A700': ['Acer', 'Iconia Tab A700', 'tablet'],
'Acer A800': ['Acer', 'Iconia Tab A800', 'tablet'],
'E110': ['Acer', 'beTouch E110'],
'E120': ['Acer', 'beTouch E120'],
'E130': ['Acer', 'beTouch E130'],
'E140': ['Acer', 'beTouch E140'],
'E210': ['Acer', 'beTouch E210'],
'E310': ['Acer', 'Liquid mini'],
'E320': ['Acer', 'Liquid Express'],
'E330': ['Acer', 'Liquid Glow'],
'E400': ['Acer', 'beTouch E400'],
'G100W': ['Acer', 'G100W'],
'S100': ['Acer', 'Liquid'],
'S110': ['Acer', 'Stream'],
'S120': ['Acer', 'Liquid mt'],
'S300': ['Acer', 'Iconia Smart'],
'S500': ['Acer', 'CloudMobile'],
'TD600': ['Acer', 'beTouch TD600'],
'Liquid': ['Acer', 'Liquid'],
'Liquid E': ['Acer', 'Liquid E'],
'Liquid Mt': ['Acer', 'Liquid mt'],
'Liquid MT': ['Acer', 'Liquid mt'],
'Liquid Metal': ['Acer', 'Liquid mt'],
'Stream': ['Acer', 'Stream'],
'N700': ['aigo', 'N700', 'tablet'],
'M801': ['aigo', 'M801', 'tablet'],
'Novo7': ['Ainovo', 'Novo7', 'tablet'],
'Novo7 Aurora': ['Ainovo', 'Novo7 Aurora', 'tablet'],
'Novo7 Advanced': ['Ainovo', 'Novo7 Advanced', 'tablet'],
'Novo7 Advanced2': ['Ainovo', 'Novo7 Advanced 2', 'tablet'],
'Novo7 Basic': ['Ainovo', 'Novo7 Basic', 'tablet'],
'Novo7 ELF': ['Ainovo', 'Novo7 Elf', 'tablet'],
'Novo7 PALADIN': ['Ainovo', 'Novo7 Paladin', 'tablet'],
'Novo8 Advanced': ['Ainovo', 'Novo8 Advanced', 'tablet'],
'one touch 890': ['Alcatel', 'One Touch 890'],
'one touch 890D': ['Alcatel', 'One Touch 890'],
'one touch 891': ['Alcatel', 'One Touch 891'],
'ONE TOUCH 903': ['Alcatel', 'One Touch 903SHV-E170K'],
'one touch 906': ['Alcatel', 'One Touch 906'],
'one touch 908': ['Alcatel', 'One Touch 908'],
'one touch 908F': ['Alcatel', 'One Touch 908'],
'one touch 908S': ['Alcatel', 'One Touch 908'],
'one touch 910': ['Alcatel', 'One Touch 910'],
'one touch 918': ['Alcatel', 'One Touch 918'],
'one touch 918D': ['Alcatel', 'One Touch 918'],
'ONE TOUCH 918D': ['Alcatel', 'One Touch 918'],
'one touch 918M': ['Alcatel', 'One Touch 918'],
'one touch 918N': ['Alcatel', 'One Touch 918'],
'one touch 980': ['Alcatel', 'One Touch 980'],
'one touch 980A': ['Alcatel', 'One Touch 980'],
'one touch 981A': ['Alcatel', 'One Touch 981'],
'one touch 986': ['Alcatel', 'One Touch 986'],
'one touch 990': ['Alcatel', 'One Touch 990'],
'one touch 990A': ['Alcatel', 'One Touch 990'],
'one touch 991': ['Alcatel', 'One Touch 991'],
'one touch 991D': ['Alcatel', 'One Touch 991'],
'ONE TOUCH 993': ['Alcatel', 'One Touch 993'],
'one touch 995': ['Alcatel', 'One Touch 995'],
'Telenor OneTouch': ['Alcatel', 'One Touch 990'],
'OT 918': ['Alcatel', 'One Touch 918'],
'Venture': ['Alcatel', 'Venture'],
'Allwinner A10': ['AllWinner', 'A10', 'tablet'],
'97FC': ['AllWinner', 'A10 97FC', 'tablet'],
'Kindle Fire': ['Amazon', 'Kindle Fire', 'tablet'],
'Amazon Kindle Fire': ['Amazon', 'Kindle Fire', 'tablet'],
'AMD120': ['AnyDATA', 'AnyTAB AMD120', 'tablet'],
'MW0811': ['AOC', 'Breeze MW0811', 'tablet'],
'MW0821 V2.0': ['AOC', 'Breeze MW0821', 'tablet'],
'MW0922': ['AOC', 'Breeze MW0922', 'tablet'],
'Apanda A60': ['Apanda', 'A60'],
'apanda-A60': ['Apanda', 'A60'],
'A80KSC': ['Archos', 'Arnova 8', 'tablet'],
'AN7CG2': ['Archos', 'Arnova 7', 'tablet'],
'A101B': ['Archos', 'Arnova 10', 'tablet'],
'AN10BG2DT': ['Archos', 'Arnova 10 B', 'tablet'],
'AN10G2': ['Archos', 'Arnova 10 G2', 'tablet'],
'A32': ['Archos', '32', 'media'],
'A35DE': ['Archos', '35 Smart Home Phone'],
'A43': ['Archos', '43', 'media'],
'Archos5': ['Archos', '5', 'media'],
'A70H': ['Archos', '7', 'tablet'],
'A70HB': ['Archos', '7', 'tablet'],
'A70BHT': ['Archos', '7', 'tablet'],
'A70CHT': ['Archos', '7C', 'tablet'],
'A70S': ['Archos', '70', 'tablet'],
'A7EB': ['Archos', '70B', 'tablet'],
'ARCHOS 70it2': ['Archos', '70 IT 2', 'tablet'],
'ARCHOS 80G9': ['Archos', '80 G9', 'tablet'],
'ARCHOS 101G9': ['Archos', '101 G9', 'tablet'],
'A101IT': ['Archos', '101 IT', 'tablet'],
'ASTRI': ['ASTRI', 'e-reader', 'ereader'],
'eeepc': ['Asus', 'Eee Pc'],
'asus laptop': ['Asus', 'Eee Pc'],
'ME171': ['Asus', 'Eee Pad MeMO', 'tablet'],
'Slider SL101': ['Asus', 'Eee Pad Slider', 'tablet'],
'EPAD': ['Asus', 'Eee Pad Transformer', 'tablet'],
'TF101': ['Asus', 'Eee Pad Transformer', 'tablet'],
'Transformer TF101': ['Asus', 'Eee Pad Transformer', 'tablet'],
'Transformer TF101G': ['Asus', 'Eee Pad Transformer', 'tablet'],
'TF201': ['Asus', 'Eee Pad Transformer Prime', 'tablet'],
'Transformer Prime TF201': ['Asus', 'Eee Pad Transformer Prime', 'tablet'],
'Transformer Prime': ['Asus', 'Eee Pad Transformer Prime', 'tablet'],
'Transformer Pad TF300T': ['Asus', 'Transformer Pad 300', 'tablet'],
'ASUS Transformer TF300T': ['Asus', 'Transformer Pad 300', 'tablet'],
'ASUS Transformer Pad TF300T': ['Asus', 'Transformer Pad 300', 'tablet'],
'ASUS Transformer Pad TF300TG': ['Asus', 'Transformer Pad 300', 'tablet'],
'ASUS Transformer Pad TF700T': ['Asus', 'Transformer Pad Infinity 700', 'tablet'],
'ASUS Transformer Pad TF700K': ['Asus', 'Transformer Pad Infinity 700', 'tablet'],
'ASUS Transformer TF700K': ['Asus', 'Transformer Pad Infinity 700', 'tablet'],
'PadFone': ['Asus', 'Padfone', 'tablet'],
'OMS TTD': ['Asus', 'Eee Pc T10'],
'ASUS T20': ['Asus', 'Eee Pc T20'],
'ETBW11AA': ['Asus', 'Tough'],
'AUX V900': ['AUX', 'V900'],
'M910A': ['AUX', 'M910'],
'PICOpad-QGN': ['Axioo', 'Picopad QGN', 'tablet'],
'NOOK': ['Barnes & Noble', 'NOOK', 'ereader'],
'NookColor': ['Barnes & Noble', 'NOOK Color', 'ereader'],
'NOOK BNRV200': ['Barnes & Noble', 'NOOK Color', 'ereader'],
'NOOK BNRV300': ['Barnes & Noble', 'NOOK Color', 'ereader'],
'NookTablet': ['Barnes & Noble', 'NOOK Tablet', 'ereader'],
'Nook Tablet': ['Barnes & Noble', 'NOOK Tablet', 'ereader'],
'NOOK BNTV250': ['Barnes & Noble', 'NOOK Tablet', 'ereader'],
'NOOK BNTV250A': ['Barnes & Noble', 'NOOK Tablet', 'ereader'],
'BNTV250': ['Barnes & Noble', 'NOOK Tablet', 'ereader'],
'BNTV250A': ['Barnes & Noble', 'NOOK Tablet', 'ereader'],
'NOOK Slate': ['Barnes & Noble', 'NOOK Tablet', 'ereader'],
'BenWee 5100': ['BenWee', '5100'],
'CA907AAC0G': ['Besta', 'CA907AAC0G'],
'BM999': ['Bmorn', 'BM999', 'tablet'],
'V11': ['Bmorn', 'V11', 'tablet'],
'V99': ['Bmorn', 'V99', 'tablet'],
'bq DaVinci': ['bq', 'DaVinci', 'tablet'],
'CT704': ['Carrefour', 'CT704', 'tablet'],
'CT1002': ['Carrefour', 'CT1002', 'tablet'],
'Camangi-Mangrove7': ['Camangi', 'Mangrove 7', 'tablet'],
'WS171': ['Camangi', 'WebStation', 'tablet'],
'IS11CA': ['Casio', 'GzOne IS11CA'],
'C771': ['Casio', 'GzOne Commando'],
'CAT NOVA': ['Cat', 'NOVA', 'tablet'],
'ARMM3V': ['chinaleap', 'ARMM3V', 'tablet'],
'CIUS-7': ['Cisco', 'Cius', 'tablet'],
'CIUS-7-AT': ['Cisco', 'Cius', 'tablet'],
'CSL Spice MI300': ['CSL', 'Spice MI300'],
'CSL-MI410': ['CSL', 'Spice MI410'],
'MID1024': ['Coby', 'Kyros MID1024', 'tablet'],
'MID1125': ['Coby', 'Kyros MID1125', 'tablet'],
'MID1126': ['Coby', 'Kyros MID1126', 'tablet'],
'MID7010': ['Coby', 'Kyros MID7010', 'tablet'],
'MID7012': ['Coby', 'Kyros MID7012', 'tablet'],
'MID7015': ['Coby', 'Kyros MID7015', 'tablet'],
'MID7015A': ['Coby', 'Kyros MID7015', 'tablet'],
'MID7016': ['Coby', 'Kyros MID7016', 'tablet'],
'MID7020': ['Coby', 'Kyros MID7020', 'tablet'],
'MID7022': ['Coby', 'Kyros MID7022', 'tablet'],
'MID7024': ['Coby', 'Kyros MID7024', 'tablet'],
'MID7025': ['Coby', 'Kyros MID7025', 'tablet'],
'MID7127': ['Coby', 'Kyros MID7127', 'tablet'],
'MID8024': ['Coby', 'Kyros MID8024', 'tablet'],
'MID8125': ['Coby', 'Kyros MID8125', 'tablet'],
'MID8127': ['Coby', 'Kyros MID8127', 'tablet'],
'Z71': ['Commtiva', 'Z71'],
'V-T100': ['Commtiva', 'V-T100'],
'FIH-FB0': ['Commtiva', 'HD700'],
'Coolpad D510': ['Coolpad', 'D510'],
'Coolpad 8020': ['Coolpad', '8020'],
'D530': ['Coolpad', 'D530'],
'Coolpad D530': ['Coolpad', 'D530'],
'D539': ['Coolpad', 'D539'],
'Coolpad D539': ['Coolpad', 'D539'],
'E239': ['Coolpad', 'E239'],
'Coolpad E239': ['Coolpad', 'E239'],
'Coolpad N930': ['Coolpad', 'N930'],
'N930': ['Coolpad', 'N930'],
'Coolpad W706': ['Coolpad', 'W706'],
'Coolpad W706+': ['Coolpad', 'W706'],
'Coolpad W708': ['Coolpad', 'W708'],
'W711': ['Coolpad', 'W711'],
'Coolpad 5010': ['Coolpad', '5010'],
'Coolpad 5210': ['Coolpad', '5210'],
'Coolpad 5820': ['Coolpad', '5820'],
'5832': ['Coolpad', '5832'],
'Coolpad 5832': ['Coolpad', '5832'],
'5855': ['Coolpad', '5855'],
'Coolpad 5860': ['Coolpad', '5860'],
'Coolpad 5860+': ['Coolpad', '5860'],
'Coolpad 5860s': ['Coolpad', '5860'],
'5860': ['Coolpad', '5860'],
'5860A': ['Coolpad', '5860'],
'Coolpad 5870': ['Coolpad', '5870'],
'5870': ['Coolpad', '5870'],
'Coolpad 7005': ['Coolpad', '7005'],
'7260': ['Coolpad', '7260'],
'Coolpad 7019': ['Coolpad', '7019'],
'Coolpad 7260': ['Coolpad', '7260'],
'Coolpad 8013': ['Coolpad', '8013'],
'Coolpad 8809': ['Coolpad', '8809'],
'Coolpad 8810': ['Coolpad', '8810'],
'8810': ['Coolpad', '8810'],
'8150': ['Coolpad', '8150'],
'Coolpad 8150D': ['Coolpad', '8150'],
'Coolpad 8811': ['Coolpad', '8811'],
'Coolpad 9900': ['Coolpad', '9900'],
'Coolpad 8050': ['Coolpad', '8050'],
'ZiiO7': ['Creative', 'ZiiO 7', 'tablet'],
'ZiiLABS ZiiO7': ['Creative', 'ZiiO 7', 'tablet'],
'ZiiLABS ZiiO10 ': ['Creative', 'ZiiO 10', 'tablet'],
'CUBE K8GT A': ['Cube', 'K8GT A', 'tablet'],
'CUBE K8GT B': ['Cube', 'K8GT B', 'tablet'],
'K8GT C': ['Cube', 'K8GT C', 'tablet'],
'K8GT H': ['Cube', 'K8GT H', 'tablet'],
'CUBE K8GT H': ['Cube', 'K8GT H', 'tablet'],
'K8GT W': ['Cube', 'K8GT W', 'tablet'],
'CUBE U8GT': ['Cube', 'U8GT', 'tablet'],
'CUBE U9GT': ['Cube', 'U9GT', 'tablet'],
'CUBE U9GT 2': ['Cube', 'U9GT 2', 'tablet'],
'Cube U9GT2': ['Cube', 'U9GT 2', 'tablet'],
'U9GT': ['Cube', 'U9GT', 'tablet'],
'U9GT2 From moage.com': ['Cube', 'U9GT 2', 'tablet'],
'N90 From moage.com': ['Cube', 'U9GT 2', 'tablet'],
'U9GT S': ['Cube', 'U9GT S', 'tablet'],
'U9GT S A': ['Cube', 'U9GT SA', 'tablet'],
'U9GTS A': ['Cube', 'U9GT SA', 'tablet'],
'U10GT 2': ['Cube', 'U10GT 2', 'tablet'],
'U10GT S': ['Cube', 'U10GT S', 'tablet'],
'U30GT-H': ['Cube', 'U30GT H', 'tablet'],
'CUBE Q7PRO': ['Cube', 'Q7 Pro', 'tablet'],
'CUBE Q7PRO J': ['Cube', 'Q7 Pro', 'tablet'],
'Cydle M7 (v0005.04.03.12.ko)': ['Cydle', 'M7 MultiPAD', 'tablet'],
'Dell Aero': ['Dell', 'Aero'],
'Dell M01M': ['Dell', 'Mini 5', 'tablet'],
'Dell Streak': ['Dell', 'Streak', 'tablet'],
'001DL': ['Dell', 'Streak', 'tablet'],
'101DL': ['Dell', 'Streak Pro', 'tablet'],
'GS01': ['Dell', 'Streak Pro', 'tablet'],
'Dell Streak Pro': ['Dell', 'Streak Pro', 'tablet'],
'streak7': ['Dell', 'Streak 7', 'tablet'],
'Dell Streak 7': ['Dell', 'Streak 7', 'tablet'],
'Dell Streak 10 Pro': ['Dell', 'Streak 10 Pro', 'tablet'],
'Dell V04B': ['Dell', 'Streak V04B', 'tablet'],
'Dell Venue': ['Dell', 'Venue'],
'Dell XCD35': ['Dell', 'XCD35'],
'XCD35': ['Dell', 'XCD35'],
'iDx7': ['Digma', 'iDx7', 'tablet'],
'iDx10': ['Digma', 'iDx10', 'tablet'],
'iDx10 3G': ['Digma', 'iDx10', 'tablet'],
'DM009SH': ['Disney Mobile', 'DM009SH'],
'DM010SH': ['Disney Mobile', 'DM010SH'],
'DM012SH': ['Disney Mobile', 'DM012SH'],
'F-08D': ['Disney Mobile', 'F-08D'],
'P-05D': ['Disney Mobile', 'P-05D'],
'Tablet-P27': ['DracoTek', 'P27 Tablet', 'tablet'],
'edgejr': ['EnTourage', 'Pocket eDGe', 'tablet'],
'l97D': ['EPad', 'l97D', 'tablet'],
'M4301': ['Eston', 'MID M4301', 'media'],
'P10AN': ['Exper', 'Easypad P10AN', 'tablet'],
'FIH-F0X': ['FIH', 'F0X'],
'Fly IQ260': ['Fly', 'IQ260 BlackBird'],
'ISW11F': ['Fujitsu', 'Arrows Z'],
'ISW13F': ['Fujitsu', 'Arrows Z'],
'IS12F': ['Fujitsu', 'Arrows ES'],
'F-01D': ['Fujitsu', 'Arrows Tab LTE', 'tablet'],
'F-03D': ['Fujitsu', 'Arrows Kiss'],
'F-05D': ['Fujitsu', 'Arrows X LTE'],
'F-07D': ['Fujitsu', r'Arrows �¼'],
'F-10D': ['Fujitsu', 'Arrows X F-10D'],
'F-12C': ['Fujitsu', 'Globetrotter'],
'f12arc': ['Fujitsu', 'F12arc'],
'M532': ['Fujitsu', 'Stylistic M532', 'tablet'],
'Garminfone': ['Garmin-Asus', 'Garminfone'],
'Garmin-Asus A10': ['Garmin-Asus', 'Nuvifone A10'],
'Garmin-Asus A50': ['Garmin-Asus', 'Nuvifone A50'],
'TPA60W': ['Gateway', 'TPA60W', 'tablet'],
'Geeksphone ZERO': ['Geeksphone', 'ZERO'],
'gemei G2': ['Gemei', 'G2', 'tablet'],
'Gemei G2': ['Gemei', 'G2', 'tablet'],
'gemei G3': ['Gemei', 'G3', 'tablet'],
'Gemei G9': ['Gemei', 'G9', 'tablet'],
'GSmart G1317D': ['Gigabyte', 'GSmart G1317D'],
'Gigabyte TB100': ['Gigabyte', 'TB100', 'tablet'],
'GN100': ['Gionee', 'GN100'],
'GN105': ['Gionee', 'GN105'],
'GN106': ['Gionee', 'GN106'],
'GN200': ['Gionee', 'GN200'],
'GN205': ['Gionee', 'GN205'],
'GN700W': ['Gionee', 'GN700W'],
'GN708W': ['Gionee', 'GN708W'],
'Google Ion': ['Google', 'Ion'],
'Nexus One': ['Google', 'Nexus One'],
'NexusOne': ['Google', 'Nexus One'],
'HTC Nexus One': ['Google', 'Nexus One'],
'Nexus S': ['Google', 'Nexus S'],
'Google Nexus S': ['Google', 'Nexus S'],
'Nexus S 4G': ['Google', 'Nexus S 4G'],
'Dooderbutt-4.0.3-v1': ['Google', 'Nexus S 4G'],
'Nexus 7': ['Google', 'Nexus 7', 'tablet'],
'Haier HW-W910': ['Haier', 'HW-W910'],
'SN10T1': ['HANNspree', 'HANNSpad SN10T1', 'tablet'],
'SN10T2': ['HANNspree', 'HANNSpad SN10T2', 'tablet'],
'HannsComb': ['HANNspree', 'HANNSpad', 'tablet'],
'X1': ['HCL', 'ME X1', 'tablet'],
'MID Serails': ['Herotab', 'C8', 'tablet'],
'MID Serials': ['Herotab', 'C8', 'tablet'],
'COSMO DUO': ['Hiscreen', 'Cosmo DUO', 'tablet'],
'HS-U8': ['Hisense', 'U8'],
'HS-T92': ['Hisense', 'T92'],
'HS-E860': ['Hisense', 'E860'],
'HS-E910': ['Hisense', 'E910'],
'HS-E926': ['Hisense', 'E926'],
'HS-EG900': ['Hisense', 'EG900'],
'HS-ET919': ['Hisense', 'ET919'],
'EG968B': ['Hisense', 'EG968B'],
'HKPHONE H8-3G': ['HKPhone', 'H8 3G'],
'HOSIN U2': ['Hosin', 'U2'],
'Touchpad': ['HP', 'TouchPad', 'tablet'],
'HP Touchpad': ['HP', 'TouchPad', 'tablet'],
'cm tenderloin': ['HP', 'TouchPad', 'tablet'],
'aokp tenderloin': ['HP', 'TouchPad', 'tablet'],
'HTC Amaze 4G': ['HTC', 'Amaze 4G'],
'HTC Ruby': ['HTC', 'Amaze 4G'],
'HTC Amaze 4G(Ruby)': ['HTC', 'Amaze 4G'],
'Amaze 4G': ['HTC', 'Amaze 4G'],
'HTC Aria': ['HTC', 'Aria'],
'HTC Aria A6380': ['HTC', 'Aria'],
'HTC Liberty A6380': ['HTC', 'Aria'],
'HTC Liberty': ['HTC', 'Aria'],
'HTC A6366': ['HTC', 'Aria'],
'HTC Bee': ['HTC', 'Bee'],
'HTC ChaCha': ['HTC', 'ChaCha'],
'HTC ChaCha A810e': ['HTC', 'ChaCha'],
'HTC ChaChaCha A810e': ['HTC', 'ChaCha'],
'HTC A810e': ['HTC', 'ChaCha'],
'HTC A9188': ['HTC', 'Tianxi'],
'HTC Bravo': ['HTC', 'Desire'],
'HTC Desire': ['HTC', 'Desire'],
'HTC Desire A8181': ['HTC', 'Desire'],
'HTC Desire A8183': ['HTC', 'Desire'],
'HTC Desire Beats A8181': ['HTC', 'Desire'],
'HTC Desire CDMA': ['HTC', 'Desire'],
'HTC Desire SMS': ['HTC', 'Desire'],
'HTC Desire S.M.S': ['HTC', 'Desire'],
'HTC Desire C': ['HTC', 'Desire C'],
'HTC DesireHD': ['HTC', 'Desire HD'],
'HTC DesireHD A9191': ['HTC', 'Desire HD'],
'HTC DesireHD A9192': ['HTC', 'Desire HD'],
'HTC Desire HD A9191': ['HTC', 'Desire HD'],
'HTC A9191': ['HTC', 'Desire HD'],
'HTC A9191 for AT&T': ['HTC', 'Desire HD'],
'HTC A9192': ['HTC', 'Desire HD'],
'HTC Desire HD': ['HTC', 'Desire HD'],
'HTC Desire HD with Beats Audio': ['HTC', 'Desire HD'],
'HTC Desire S': ['HTC', 'Desire S'],
'HTC DesireS': ['HTC', 'Desire S'],
'HTC DesiresS': ['HTC', 'Desire S'],
'HTC DesireS S510e': ['HTC', 'Desire S'],
'HTC DesireS S510b': ['HTC', 'Desire S'],
'HTC Desire S S510e': ['HTC', 'Desire S'],
'HTC S510e': ['HTC', 'Desire S'],
'HTC Desire Saga': ['HTC', 'Desire S'],
'HTC Desire V': ['HTC', 'Desire V'],
'HTC T328w': ['HTC', 'Desire V'],
'HTC Desire VC': ['HTC', 'Desire VC'],
'HTC T328d': ['HTC', 'Desire VC'],
'HTC T328t': ['HTC', 'Desire VT'],
'HTC Desire Z': ['HTC', 'Desire Z'],
'HTC DesireZ': ['HTC', 'Desire Z'],
'HTC DesireZ A7272': ['HTC', 'Desire Z'],
'HTC Desire Z A7272': ['HTC', 'Desire Z'],
'HTC Vision': ['HTC', 'Desire Z'],
'HTC A7275': ['HTC', 'Desire Z'],
'HTC Dream': ['HTC', 'Dream'],
'HTC S710d': ['HTC', 'Droid Incredible 2'],
'HTC Incredible 2': ['HTC', 'Droid Incredible 2'],
'HTC X515d': ['HTC', 'EVO 3D'],
'HTC X515m': ['HTC', 'EVO 3D'],
'HTC X515C': ['HTC', 'EVO 3D'],
'HTC Evo 3D': ['HTC', 'EVO 3D'],
'HTC EVO 3D': ['HTC', 'EVO 3D'],
'HTC EVO 3D GSM': ['HTC', 'EVO 3D'],
'HTC EVO 3D X515a': ['HTC', 'EVO 3D'],
'HTC EVO 3D GSM X515m': ['HTC', 'EVO 3D'],
'HTC EVO 3D X515m': ['HTC', 'EVO 3D'],
'HTC EVO 3D X515M': ['HTC', 'EVO 3D'],
'HTC EVO3D X515a': ['HTC', 'EVO 3D'],
'HTC EVO3D X515m': ['HTC', 'EVO 3D'],
'HTC Evo 3D X515m': ['HTC', 'EVO 3D'],
'HTC Evo 3D with Beats Audio X515m': ['HTC', 'EVO 3D'],
'HTC Evo 4G': ['HTC', 'EVO 4G'],
'HTC EVO 4G': ['HTC', 'EVO 4G'],
'HTC X515E': ['HTC', 'EVO 4G+'],
'HTC EVO 4G+ For Sprint': ['HTC', 'EVO 4G+'],
'HTC EVO 4G++ For Sprint': ['HTC', 'EVO 4G+'],
'HTC C715c': ['HTC', 'EVO Design 4G'],
'HTC Design 4G': ['HTC', 'EVO Design 4G'],
'HTC EVO design 4G': ['HTC', 'EVO Design 4G'],
'HTC EVO Design 4G': ['HTC', 'EVO Design 4G'],
'HTC Evo Shift': ['HTC', 'EVO Shift'],
'HTC EVO Shift 4G': ['HTC', 'EVO Shift'],
'HTC A310e': ['HTC', 'Explorer'],
'HTC Explorer': ['HTC', 'Explorer'],
'HTC Explorer A310b': ['HTC', 'Explorer'],
'HTC Explorer A310e': ['HTC', 'Explorer'],
'HTC P510e': ['HTC', 'Flyer', 'tablet'],
'HTC Flyer': ['HTC', 'Flyer', 'tablet'],
'HTC Flyer P510e': ['HTC', 'Flyer', 'tablet'],
'HTC Flyer P512': ['HTC', 'Flyer', 'tablet'],
'HTC Flyer P512 NA': ['HTC', 'Flyer', 'tablet'],
'HTC P515E': ['HTC', 'Flyer 4G', 'tablet'],
'HTC Gratia A6380': ['HTC', 'Gratia'],
'HTC HD': ['HTC', 'HD'],
'HTC HD2': ['HTC', 'HD2'],
'HTC HD2 T8585': ['HTC', 'HD2'],
'HTC HD2(Leo)': ['HTC', 'HD2'],
'HTC HD7': ['HTC', 'HD7'],
'HTC T9299+': ['HTC', 'HD7'],
'HTC HD7 for Sprint': ['HTC', 'HD7'],
'HTC HD7 4G T9299 For AT&T': ['HTC', 'HD7'],
'HTC HD7 4G T9299+ For AT&T': ['HTC', 'HD7'],
'HTC T9299+ For AT&T': ['HTC', 'HD7'],
'HTC HD7S T9399+': ['HTC', 'HD7s'],
'HTC HD7S T9899+': ['HTC', 'HD7s'],
'HTC T9899+ For AT&T': ['HTC', 'HD7s'],
'VitMod ExtraLite 1.6.5.fullodex for HTC HD7 Pro': ['HTC', 'HD7 Pro'],
'HTC Hero': ['HTC', 'Hero'],
'HTC HERO': ['HTC', 'Hero'],
'HTC Hero CDMA': ['HTC', 'Hero'],
'HTC HERO CDMA': ['HTC', 'Hero'],
'HTC HERO200': ['HTC', 'Hero 200'],
'HTC Hero S': ['HTC', 'Hero S'],
'HTC IMAGIO': ['HTC', 'Imagio'],
'HTC Incredible': ['HTC', 'Incredible'],
'HTC Incredible S710E': ['HTC', 'Incredible S'],
'HTC S710e': ['HTC', 'Incredible S'],
'HTC Incredible S': ['HTC', 'Incredible S'],
'HTC Incredible S S710e': ['HTC', 'Incredible S'],
'HTC Incredible S s710e': ['HTC', 'Incredible S'],
'HTC IncredibleS S710e': ['HTC', 'Incredible S'],
'HTC Incredible S with Beats Audio': ['HTC', 'Incredible S'],
'HTC Vivo': ['HTC', 'Incredible S'],
'HTC Innovation': ['HTC', 'Innovation'],
'HTC Inspire 4G': ['HTC', 'Inspire 4G'],
'HTC HD7 Inspire 4G For Vodafone': ['HTC', 'Inspire 4G'],
'HTC P715a': ['HTC', 'Jetstream', 'tablet'],
'HTC Legend': ['HTC', 'Legend'],
'HTC Magic': ['HTC', 'Magic'],
'HTC Sapphire': ['HTC', 'Magic'],
'HTC Lexikon': ['HTC', 'Merge'],
'HTC One S': ['HTC', 'One S'],
'HTC Z520e': ['HTC', 'One S'],
'HTC One V': ['HTC', 'One V'],
'HTC T320e': ['HTC', 'One V'],
'HTC One X': ['HTC', 'One X'],
'HTC S720e': ['HTC', 'One X'],
'HTC Endeavour-LS': ['HTC', 'One X'],
'HTC One XL': ['HTC', 'One XL'],
'HTC X710a': ['HTC', 'Raider 4G'],
'HTC Raider': ['HTC', 'Raider 4G'],
'HTC Raider X710e': ['HTC', 'Raider 4G'],
'HTC Raider X710s': ['HTC', 'Raider 4G'],
'HTC Raider 4G X710e': ['HTC', 'Raider 4G'],
'HTC PH39100': ['HTC', 'Raider 4G'],
'HTC Holiday': ['HTC', 'Raider 4G'],
'HTC Velocity 4G X710s': ['HTC', 'Raider 4G'],
'HTC Rezound': ['HTC', 'Rezound'],
'HTC Rhyme S510b': ['HTC', 'Rhyme'],
'HTC S510b': ['HTC', 'Rhyme'],
'HTC Bliss': ['HTC', 'Rhyme'],
'HTC Bliss S510b': ['HTC', 'Rhyme'],
'HTC Salsa C510e': ['HTC', 'Salsa'],
'HTC C510e': ['HTC', 'Salsa'],
'HTC Z710a': ['HTC', 'Sensation'],
'HTC Z710e': ['HTC', 'Sensation'],
'HTC Z710t': ['HTC', 'Sensation'],
'HTC Sensation': ['HTC', 'Sensation'],
'HTC Sensation Z710': ['HTC', 'Sensation'],
'HTC Sensation Z710a': ['HTC', 'Sensation'],
'HTC Sensation Z710e': ['HTC', 'Sensation'],
'HTC Sensation Z710E': ['HTC', 'Sensation'],
'HTC Sensation Z710e For AT&T': ['HTC', 'Sensation'],
'HTC Sensation Z710e with Beats Audio': ['HTC', 'Sensation'],
'HTC Sensation with Beats Audio Z710e': ['HTC', 'Sensation'],
'HTC Sensation with Beats Audio': ['HTC', 'Sensation'],
'HTC Sensation Taste': ['HTC', 'Sensation'],
'HTC Pyramid': ['HTC', 'Sensation'],
'HTC Pyramid Z710a': ['HTC', 'Sensation'],
'HTC Pyramid Z710e': ['HTC', 'Sensation'],
'HTC Sensation 4G': ['HTC', 'Sensation'],
'HTC Sensation 4G with Beats Audio': ['HTC', 'Sensation'],
'HTC Sensation G14': ['HTC', 'Sensation'],
'HTC Sensation G14 for AT&T': ['HTC', 'Sensation'],
'HTC G14 sensation': ['HTC', 'Sensation'],
'HTC Z715e': ['HTC', 'Sensation XE'],
'HTC Sensation Z715e': ['HTC', 'Sensation XE'],
'HTC SensationXE Beats': ['HTC', 'Sensation XE'],
'HTC SensationXE Beats Z715a': ['HTC', 'Sensation XE'],
'HTC SensationXE Beats Z715e': ['HTC', 'Sensation XE'],
'HTC Sensation XE': ['HTC', 'Sensation XE'],
'HTC Sensation XE Z715e': ['HTC', 'Sensation XE'],
'HTC SensationXE Z715e': ['HTC', 'Sensation XE'],
'HTC Sensation XE Beats': ['HTC', 'Sensation XE'],
'HTC SensationXE with Beats Audio': ['HTC', 'Sensation XE'],
'HTC Sensation XE with Beats Audio': ['HTC', 'Sensation XE'],
'HTC Sensation XE with Beats Audio Z715a': ['HTC', 'Sensation XE'],
'HTC Sensation Juredroid XE Beats Audio': ['HTC', 'Sensation XE'],
'HTC Sensation XE with Beats Audio Z715e': ['HTC', 'Sensation XE'],
'HTC Sensation XE With Beats Audio Z715e': ['HTC', 'Sensation XE'],
'HTC Sensation 4G XE with Beats Audio': ['HTC', 'Sensation XE'],
'HTC Sensation with Beats Audio Z715e': ['HTC', 'Sensation XE'],
'HTC X315E': ['HTC', 'Sensation XL'],
'HTC SensationXL Beats X315b': ['HTC', 'Sensation XL'],
'HTC SensationXL Beats X315e': ['HTC', 'Sensation XL'],
'HTC Sensation XL with Beats Audio X315b': ['HTC', 'Sensation XL'],
'HTC Sensation XL with Beats Audio X315e': ['HTC', 'Sensation XL'],
'HTC Runnymede': ['HTC', 'Sensation XL'],
'HTC G21': ['HTC', 'Sensation XL'],
'HTC PH06130': ['HTC', 'Status'],
'HTC Status': ['HTC', 'Status'],
'HTC Tattoo': ['HTC', 'Tattoo'],
'HTC TATTOO A3288': ['HTC', 'Tattoo'],
'HTC click': ['HTC', 'Tattoo'],
'HTC X310e': ['HTC', 'Titan'],
'HTC T7373': ['HTC', 'Touch Pro II'],
'HTC ThunderBolt': ['HTC', 'ThunderBolt'],
'HTC Mecha': ['HTC', 'ThunderBolt'],
'HTC Velocity 4G': ['HTC', 'Velocity 4G'],
'HTC Wildfire': ['HTC', 'Wildfire'],
'HTC Wildfire A3333': ['HTC', 'Wildfire'],
'HTC A3366': ['HTC', 'Wildfire'],
'HTC A3380': ['HTC', 'Wildfire'],
'HTC WildfireS': ['HTC', 'Wildfire S'],
'HTC Wildfire S': ['HTC', 'Wildfire S'],
'Htc Wildfire s': ['HTC', 'Wildfire S'],
'HTC Wildfire S A510e': ['HTC', 'Wildfire S'],
'HTC Wildfire S A510b': ['HTC', 'Wildfire S'],
'HTC WildfireS A510e': ['HTC', 'Wildfire S'],
'HTC WildfireS A510b': ['HTC', 'Wildfire S'],
'htc wildfire s a510e': ['HTC', 'Wildfire S'],
'HTC Wildfire S A515c': ['HTC', 'Wildfire S'],
'HTC A510a': ['HTC', 'Wildfire S'],
'HTC A510e': ['HTC', 'Wildfire S'],
'HTC A510c': ['HTC', 'Wildfire S'],
'HTCX06HT': ['HTC', 'Desire'],
'HTC A6390': ['HTC', 'A6390'],
'HTC A8180': ['HTC', 'A8180'],
'HTC PG762': ['HTC', 'PG762'],
'HTC S715e': ['HTC', 'S715e'],
'HTC S720t': ['HTC', 'S720t'],
'HTC Z510d': ['HTC', 'Z510d'],
'HTC Z560e': ['HTC', 'Z560e'],
'HTC VLE U': ['HTC', 'One S'],
'HTC VLE#U': ['HTC', 'One S'],
'HTC VIE U': ['HTC', 'One S'],
'HTC EVA UL': ['HTC', 'One V'],
'HTC ENR U': ['HTC', 'One X'],
'ENR U': ['HTC', 'One X'],
'EndeavorU': ['HTC', 'One X'],
'Liberty': ['HTC', 'Aria'],
'Desire': ['HTC', 'Desire'],
'Desire A8181': ['HTC', 'Desire'],
'desire hd': ['HTC', 'Desire HD'],
'Desire HD': ['HTC', 'Desire HD'],
'Dedire HD': ['HTC', 'Desire HD'],
'Desire Hd (ace)': ['HTC', 'Desire HD'],
'Desire S': ['HTC', 'Desire S'],
'DesireS': ['HTC', 'Desire S'],
'Desire Saga': ['HTC', 'Desire S'],
'Desire Z': ['HTC', 'Desire Z'],
'Dream': ['HTC', 'Dream'],
'Droid Incredible': ['HTC', 'Droid Incredible'],
'EVO': ['HTC', 'EVO'],
'Evo HD2': ['HTC', 'EVO HD'],
'Evo 3D Beats X515m': ['HTC', 'EVO 3D'],
'Evo 3D GSM': ['HTC', 'EVO 3D'],
'EVO 3D X515m': ['HTC', 'EVO 3D'],
'EVO3D X515m': ['HTC', 'EVO 3D'],
'Evo 4G': ['HTC', 'EVO 4G'],
'EVO 4G': ['HTC', 'EVO 4G'],
'photon': ['HTC', 'HD mini'],
'GinDream\/GinMagic': ['HTC', 'Dream'],
'HD2': ['HTC', 'HD2'],
'HD7 Pro': ['HTC', 'HD7 Pro'],
'Hero': ['HTC', 'Hero'],
'HERO CDMA': ['HTC', 'Hero'],
'HERO200': ['HTC', 'Hero 200'],
'Incredible': ['HTC', 'Droid Incredible'],
'Incredible 2': ['HTC', 'Droid Incredible 2'],
'Incredible S': ['HTC', 'Incredible S'],
'IncredibleS S710e': ['HTC', 'Incredible S'],
'IncredibleS': ['HTC', 'Incredible S'],
'Inspire HD': ['HTC', 'Inspire 4G'],
'Inspire 4G': ['HTC', 'Inspire 4G'],
'Legend': ['HTC', 'Legend'],
'NexusHD2': ['HTC', 'HD2'],
'Nexus HD2': ['HTC', 'HD2'],
'Docomo HT-03A': ['HTC', 'Magic'],
'MIUI.us Sensation 4G': ['HTC', 'Sensation 4G'],
'SiRF Dream': ['HTC', 'Dream'],
'Pyramid': ['HTC', 'Sensation'],
'Sensation': ['HTC', 'Sensation'],
'Sensation Z710e': ['HTC', 'Sensation'],
'Sensation 4G': ['HTC', 'Sensation'],
'Sensation 4g': ['HTC', 'Sensation'],
'TripNiCE Pyramid': ['HTC', 'Sensation'],
'SensationXE Beats Z715e': ['HTC', 'Sensation XE'],
'SensationXL Beats X315e': ['HTC', 'Sensation XL'],
'Click': ['HTC', 'Tattoo'],
'Wildfire': ['HTC', 'Wildfire'],
'Wildfire S': ['HTC', 'Wildfire S'],
'Wildfire S A510e': ['HTC', 'Wildfire S'],
'Sprint APX515CKT': ['HTC', 'EVO 3D'],
'Sprint APA9292KT': ['HTC', 'EVO 4G'],
'Sprint APA7373KT': ['HTC', 'EVO Shift 4G'],
'Sprint APC715CKT': ['HTC', 'EVO Design 4G'],
'A3380': ['HTC', 'Wildfire'],
'A6277': ['HTC', 'Hero'],
'a7272': ['HTC', 'Desire Z'],
'A7272+(HTC DesireZ)': ['HTC', 'Desire Z'],
'S31HT': ['HTC', 'Aria'],
'S710d': ['HTC', 'Droid Incredible 2'],
'S710D': ['HTC', 'Droid Incredible 2'],
'X06HT': ['HTC', 'Desire'],
'001HT': ['HTC', 'Desire HD'],
'X325a': ['HTC', 'One X'],
'Z520m': ['HTC', 'One S'],
'Z710': ['HTC', 'Sensation'],
'Z710e': ['HTC', 'Sensation'],
'T9199h': ['HTC', 'T9199h'],
'HTC S610d': ['HTC', 'S610d'],
'ADR6200': ['HTC', 'Droid Eris'],
'ADR6300': ['HTC', 'Droid Incredible'],
'ADR6325VW': ['HTC', 'Merge'],
'ADR6330VW': ['HTC', 'Rhyme'],
'ADR6350': ['HTC', 'Droid Incredible 2'],
'ADR6400L': ['HTC', 'Thunderbolt 4G'],
'ADR6400L 4G': ['HTC', 'Thunderbolt 4G'],
'ADR6410LVW 4G': ['HTC', 'Fireball'],
'ADR6425LVW': ['HTC', 'Rezound'],
'ADR6425LVW 4G': ['HTC', 'Rezound'],
'Coquettish Red': ['HTC', 'Rezound'],
'PB99400': ['HTC', 'Droid Incredible'],
'pcdadr6350': ['HTC', 'Droid Incredible 2'],
'PC36100': ['HTC', 'EVO 4G'],
'PG06100': ['HTC', 'EVO Shift 4G'],
'PG41200': ['HTC', 'EVO View 4G', 'tablet'],
'PG86100': ['HTC', 'EVO 3D'],
'PG8610000': ['HTC', 'EVO 3D'],
'PH44100': ['HTC', 'EVO Design 4G'],
'PJ83100': ['HTC', 'One X'],
'ISW11HT': ['HTC', 'EVO 4G'],
'ISW12HT': ['HTC', 'EVO 3D'],
'ISW13HT': ['HTC', 'J'],
'USCCADR6275US Carrier ID 45': ['HTC', 'Desire'],
'USCCADR6285US': ['HTC', 'Hero S'],
'USCCADR6325US Carrier ID 45': ['HTC', 'Merge'],
'MediaPad': ['Huawei', 'MediaPad', 'tablet'],
'Huawei MediaPad': ['Huawei', 'MediaPad', 'tablet'],
'HUAWEI MediaPad': ['Huawei', 'MediaPad', 'tablet'],
'Huawei S7-312u': ['Huawei', 'MediaPad', 'tablet'],
'MediaPad 10 FHD': ['Huawei', 'MediaPad', 'tablet'],
'Huawei C8500': ['Huawei', 'C8500'],
'Huawei C8500S': ['Huawei', 'C8500'],
'Huawei C8600': ['Huawei', 'C8600'],
'Huawei C8650': ['Huawei', 'C8650'],
'Huawei C8650+': ['Huawei', 'C8650'],
'Huawei C8800': ['Huawei', 'IDEOS X5'],
'Huawei C8810': ['Huawei', 'Ascend G300'],
'Huawei C8812': ['Huawei', 'Ascend C8812'],
'Huawei C8812E': ['Huawei', 'Ascend C8812'],
'Huawei C8825D': ['Huawei', 'Ascend C8825D'],
'Huawei C8860E': ['Huawei', 'Honor'],
'Huawei M835': ['Huawei', 'M835'],
'Huawei M860': ['Huawei', 'Ascend'],
'Huawei M921': ['Huawei', 'M921'],
'Huawei S8520': ['Huawei', 'S8520'],
'Huawei S8600': ['Huawei', 'S8600'],
'Huawei T8300': ['Huawei', 'T8300'],
'Huawei T8600': ['Huawei', 'T8600'],
'Huawei T8830': ['Huawei', 'T8830'],
'T8830': ['Huawei', 'T8830'],
'T8620': ['Huawei', 'T8620'],
'Huawei T8828': ['Huawei', 'T8828'],
'Huawei U8220': ['Huawei', 'U8220'],
'Huawei u8500': ['Huawei', 'IDEOS X2'],
'Huawei U8815': ['Huawei', 'Ascend G300'],
'Huawei U8825D': ['Huawei', 'Ascend G330D'],
'Huawei U8850': ['Huawei', 'Vision'],
'Huawei U8652': ['Huawei', 'Sonic'],
'Huawei U8800-51': ['Huawei', 'IDEOS X5'],
'Huawei U8818': ['Huawei', 'Ascend G300'],
'Huawei U9000': ['Huawei', 'Ascend X'],
'Huawei IDEOS U8500': ['Huawei', 'IDEOS X2'],
'Huawei IDEOS U8650': ['Huawei', 'Sonic'],
'Huawei IDEOS X3': ['Huawei', 'IDEOS X3'],
'Huawei Ideos X5': ['Huawei', 'IDEOS X5'],
'Huawei Ideos X5 1.12.9(ret4rt)': ['Huawei', 'IDEOS X5'],
'Huawei SONIC': ['Huawei', 'Sonic'],
'Huawei 8100-9': ['Huawei', 'U8100'],
'FUSIONideos': ['Huawei', 'IDEOS'],
'Gnappo Ideos': ['Huawei', 'IDEOS'],
'Ideos': ['Huawei', 'IDEOS'],
'IDEOS X5': ['Huawei', 'IDEOS X5'],
'Ideos S7': ['Huawei', 'IDEOS S7', 'tablet'],
'IDEOS S7': ['Huawei', 'IDEOS S7', 'tablet'],
'IDEOS S7 Slim': ['Huawei', 'IDEOS S7', 'tablet'],
'Huawei S7': ['Huawei', 'IDEOS S7', 'tablet'],
'SONIC': ['Huawei', 'Sonic'],
'Kyivstar Aqua': ['Huawei', 'Sonic'],
'Lucky Ultra Sonic U8650': ['Huawei', 'Sonic'],
'Turkcell T20': ['Huawei', 'Sonic'],
'MTC 950': ['Huawei', 'U8160'],
'MTC 955': ['Huawei', 'Sonic'],
'MTC Evo': ['Huawei', 'C8500'],
'MTC Android': ['Huawei', 'U8110'],
'S31HW': ['Huawei', 'Pocket WiFi S'],
'S41HW': ['Huawei', 'Pocket WiFi S II'],
'007HW': ['Huawei', 'Vision'],
'UM840': ['Huawei', 'Evolution'],
'M860': ['Huawei', 'Ascend'],
'M865': ['Huawei', 'Ascend II'],
'M886': ['Huawei', 'Glory'],
'C8150': ['Huawei', 'IDEOS'],
'c8500': ['Huawei', 'C8500'],
'C8500': ['Huawei', 'C8500'],
'C8500S': ['Huawei', 'C8500'],
'C8600': ['Huawei', 'C8600'],
'c8650': ['Huawei', 'C8650'],
'C8650': ['Huawei', 'C8650'],
'c8800': ['Huawei', 'C8800'],
'C8800': ['Huawei', 'C8800'],
'c8810': ['Huawei', 'Ascend G300C'],
'C8812': ['Huawei', 'Ascend C8812'],
'S8600': ['Huawei', 'S8600'],
'U8100': ['Huawei', 'U8100'],
'U8110': ['Huawei', 'U8110'],
'u8120': ['Huawei', 'U8120'],
'U8120': ['Huawei', 'U8120'],
'U8180': ['Huawei', 'IDEOS X1'],
'U8220': ['Huawei', 'Pulse'],
'U8300': ['Huawei', 'U8300'],
'U8350': ['Huawei', 'Boulder'],
'U8150': ['Huawei', 'IDEOS'],
'U8160': ['Huawei', 'U8160'],
'U8500': ['Huawei', 'IDEOS X2'],
'U8500 HiQQ': ['Huawei', 'U8500 HiQQ Edition'],
'U8510': ['Huawei', 'IDEOS X3'],
'u8650': ['Huawei', 'Sonic'],
'U8650': ['Huawei', 'Sonic'],
'U8650-1': ['Huawei', 'Sonic'],
'U8660': ['Huawei', 'Sonic'],
'u8800': ['Huawei', 'IDEOS X5'],
'U8800': ['Huawei', 'IDEOS X5'],
'U8800+': ['Huawei', 'IDEOS X5'],
'U8800X': ['Huawei', 'IDEOS X5'],
'U8800pro': ['Huawei', 'IDEOS X5 Pro'],
'U8800PRO': ['Huawei', 'IDEOS X5 Pro'],
'U8800Pro': ['Huawei', 'IDEOS X5 Pro'],
'u8800pro': ['Huawei', 'IDEOS X5 Pro'],
'U8800 Pro': ['Huawei', 'IDEOS X5 Pro'],
'U8818': ['Huawei', 'Ascend G300'],
'U8850': ['Huawei', 'Vision'],
'u8860': ['Huawei', 'Honor'],
'U8860': ['Huawei', 'Honor'],
'U9000': ['Huawei', 'Ascend X'],
'U9200': ['Huawei', 'Ascend P1'],
'U9200-1': ['Huawei', 'Ascend P1'],
'U9500': ['Huawei', 'Ascend D1'],
'U9501L': ['Huawei', 'Ascend D LTE'],
'U9510': ['Huawei', 'Ascend D quad'],
'U9510E': ['Huawei', 'Ascend D quad'],
'Comet': ['Huawei', 'Comet'],
'GS02': ['Huawei', 'Honor'],
'GS03': ['Huawei', 'Ascend P1'],
'DroniX-0.5': ['Huawei', 'U8180'],
'MTS-SP101': ['Huawei', 'C8511'],
'TSP21': ['Huawei', 'U8110'],
'HYUNDAI H6': ['Hyundai', 'Storm H6'],
'iBall Slide i7011': ['iBall', 'Slide i7011'],
'NetTAB RUNE': ['IconBit', 'NetTab Rune', 'tablet'],
'D70W': ['Icoo', 'D70W', 'tablet'],
'D80': ['Icoo', 'D80', 'tablet'],
'INFOBAR A01': ['iida', 'INFOBAR A01'],
'M009F': ['Infotmic', 'M009F'],
'AZ210A': ['Intel', 'AZ210A'],
'AZ210B': ['Intel', 'AZ210B'],
'AZ510': ['Intel', 'AZ510'],
'greenridge': ['Intel', 'Green Ridge', 'tablet'],
'INQ Cloud Touch': ['INQ', 'Cloud Touch'],
'ILT-MX100': ['iRiver', 'Tab', 'tablet'],
'IVIO_DE38': ['Ivio', 'DE38'],
'JY-G2': ['Jiayu', 'G2'],
'JXD S601WIFI': ['JXD', 'S601 WIFI', 'media'],
'A2': ['KakaTech', 'A2'],
'D91': ['KK', 'D91', 'tablet'],
'K080': ['Kobo', 'K080', 'ereader'],
'A106': ['koobee', 'A160'],
'KPT A9': ['KPT', 'A9'],
'EV-S100': ['Kttech', 'Take EV-S100'],
'KM-S120': ['Kttech', 'Take 2 KM-S120'],
'KM-S200': ['TAKE', 'Janus KM-S200'],
'KM-S220': ['Kttech', 'Take Tachy KM-S220'],
'Kyobo mirasol eReader': ['Kyobo', 'eReader', 'ereader'],
'ISW11K': ['Kyocera', 'Digno'],
'JC-KSP8000': ['Kyocera', 'Echo'],
'KSP8000': ['Kyocera', 'Echo'],
'Zio': ['Kyocera', 'Zio'],
'C5155': ['Kyocera', 'C5155'],
'C5170': ['Kyocera', 'C5170'],
'M9300': ['Kyocera', 'M9300'],
'E800': ['K-Touch', 'E800'],
'W606': ['K-Touch', 'W606'],
'K-Touch T619': ['K-Touch', 'T619'],
'K-Touch W619': ['K-Touch', 'W619'],
'K-Touch W650': ['K-Touch', 'W650'],
'W700': ['K-Touch', 'W700'],
'W800': ['K-Touch', 'W800'],
'W806': ['K-Touch', 'W806'],
'W808': ['K-Touch', 'W808'],
'W810': ['K-Touch', 'W810'],
'X900': ['Lava', 'XOLO X900'],
'Lenovo A798t': ['Lenovo', 'A798t'],
'LENOVO-Lenovo-A288t': ['Lenovo', 'LePhone A288'],
'ThinkPad Tablet': ['Lenovo', 'ThinkPad Tablet', 'tablet'],
'K1': ['Lenovo', 'IdeaPad K1', 'tablet'],
'Ideapad S10-3T': ['Lenovo', 'IdeaPad S10-3T', 'tablet'],
'S2005A-H': ['Lenovo', 'S2005A'],
'IdeaTab S2007A-D': ['Lenovo', 'IdeaTab S2007A', 'tablet'],
'IdeaTabV2007A': ['Lenovo', 'IdeaTab V2007A', 'tablet'],
'IdeaTabV2007A-D-I': ['Lenovo', 'IdeaTab V2007A', 'tablet'],
'IdeaTabV2010A': ['Lenovo', 'IdeaTab V2010A', 'tablet'],
'IdeaTab A2107A-H': ['Lenovo', 'IdeaTab V2107A', 'tablet'],
'A1 07': ['Lenovo', 'LePad', 'tablet'],
'lepad 001b': ['Lenovo', 'LePad', 'tablet'],
'lepad 001n': ['Lenovo', 'LePad', 'tablet'],
'3GC101': ['Lenovo', 'LePhone 3GC101'],
'Lenovo 3GC101': ['Lenovo', 'LePhone 3GC101'],
'3GW100': ['Lenovo', 'LePhone 3GW100'],
'Lenovo 3GW100': ['Lenovo', 'LePhone 3GW100'],
'3GW101': ['Lenovo', 'LePhone 3GW101'],
'Lenovo 3GW101': ['Lenovo', 'LePhone 3GW101'],
'Lephone 3GW101': ['Lenovo', 'LePhone 3GW101'],
'Lenovo A1-32AB0': ['Lenovo', 'LePhone A1-32AB0'],
'Lenovo S1-37AH0': ['Lenovo', 'LePhone S1-37AH0'],
'S1 37AHO': ['Lenovo', 'LePhone S1-37AH0'],
'Lenovo S2-38AH0': ['Lenovo', 'LePhone S2-38AH0'],
'Lenovo S2-38AT0': ['Lenovo', 'LePhone S2-38AT0'],
'Lenovo A288t': ['Lenovo', 'LePhone A288'],
'Lenovo A366t': ['Lenovo', 'LePhone A366'],
'Lenovo A390e': ['Lenovo', 'LePhone A390'],
'Lenovo A500': ['Lenovo', 'LePhone A500'],
'Lenovo A520': ['Lenovo', 'LePhone A520'],
'Lenovo A560e': ['Lenovo', 'A560'],
'Lenovo A668t': ['Lenovo', 'LePhone A668'],
'Lenovo A698t': ['Lenovo', 'LePhone A698'],
'Lenovo A750': ['Lenovo', 'LePhone A750'],
'Lenovo A780': ['Lenovo', 'LePhone A780'],
'Lenovo A789': ['Lenovo', 'LePhone A789'],
'Lenovo A790e': ['Lenovo', 'LePhone A790'],
'Lenovo P70': ['Lenovo', 'LePhone P70'],
'Lenovo P700': ['Lenovo', 'LePhone P700'],
'Lenovo S850e': ['Lenovo', 'S850'],
'Lenovo S880': ['Lenovo', 'S880'],
'Lenovo K860': ['Lenovo', 'K860'],
'A30t': ['Lenovo', 'A30t'],
'Lenovo A60': ['Lenovo', 'A60'],
'Lenovo A65': ['Lenovo', 'A65'],
'Lenovo A66t': ['Lenovo', 'A66t'],
'Lenovo A68e': ['Lenovo', 'A68e'],
'Lenovo K800': ['Lenovo', 'K800'],
'IDEA TV T100': ['Lenovo', 'IDEA TV', 'television'],
'IDEA TV K91': ['Lenovo', 'IDEA TV', 'television'],
'TC970': ['Le Pan', 'TC970', 'tablet'],
'LePanII': ['Le Pan', 'II', 'tablet'],
'LG-C555': ['LG', 'Optimus Chat'],
'LG-C555-parrot': ['LG', 'Optimus Chat'],
'LG-C660h': ['LG', 'Optimus Pro'],
'LG-C729': ['LG', 'DoublePlay'],
'LG-C800G': ['LG', 'Eclypse'],
'LG-CX670': ['LG', 'Optimus 3G'],
'LG-E400': ['LG', 'Optimus L3'],
'LG-E400f': ['LG', 'Optimus L3'],
'LG-E510': ['LG', 'Optimus Hub'],
'LG-E510f': ['LG', 'Optimus Hub'],
'LG-E510g': ['LG', 'Optimus Hub'],
'LG-E610': ['LG', 'Optimus L5'],
'LG-E612': ['LG', 'Optimus L5'],
'LG-E612g': ['LG', 'Optimus L5'],
'LG-E615F': ['LG', 'E615'],
'LG-E617G': ['LG', 'E617'],
'LG-E720': ['LG', 'Optimus Chic'],
'LG-E720b': ['LG', 'Optimus Chic'],
'LG-E730': ['LG', 'Optimus Sol'],
'LG-E970': ['LG', 'Shine'],
'LG-F100L': ['LG', 'Optimus Vu'],
'LG-F100S': ['LG', 'Optimus Vu'],
'LG-F120K': ['LG', 'Optimus LTE Tag'],
'LG-F120L': ['LG', 'Optimus LTE Tag'],
'LG-F120S': ['LG', 'Optimus LTE Tag'],
'LG-F160K': ['LG', 'Optimus LTE II'],
'LG-F160L': ['LG', 'Optimus LTE II'],
'LG-F160S': ['LG', 'Optimus LTE II'],
'LG-F180L': ['LG', 'F180L'],
'LG-GT540': ['LG', 'Optimus'],
'LG-GT540f': ['LG', 'Optimus'],
'LG-GT540 Swift': ['LG', 'Optimus'],
'LG-GW620': ['LG', 'GW620'],
'LG-KH5200': ['LG', 'Andro-1'],
'LG-KU3700': ['LG', 'Optimus One'],
'LG-KU5400': ['LG', 'PRADA 3.0'],
'LG-KU5900': ['LG', 'Optimus Black'],
'LG-L40G': ['LG', 'L40G'],
'LG-LG855': ['LG', 'Marquee'],
'LG-LS670': ['LG', 'Optimus S'],
'LG-LS696': ['LG', 'Optimus Elite'],
'LG-LS840': ['LG', 'Viper 4G'],
'LG-LS855': ['LG', 'Marquee'],
'LG-LS860': ['LG', '\'Cayenne\''],
'LG-LS970': ['LG', '\'Eclipse\''],
'LG-LU3000': ['LG', 'Optimus Mach'],
'LG-LU3100': ['LG', 'Optimus Chic'],
'LG-LU3700': ['LG', 'Optimus One'],
'LG-LU5400': ['LG', 'PRADA 3.0'],
'LG-LU6200': ['LG', 'Optimus Q2'],
'LG-lu6200': ['LG', 'Optimus Q2'],
'LG-LU6500': ['LG', 'Optimus Note'],
'LG-LU6800': ['LG', 'Optimus Big'],
'LG-LU8300': ['LG', 'Optimus Pad LTE'],
'LG-LW690': ['LG', 'Optimus C'],
'LG-LW770': ['LG', 'LW770'],
'LG-MS690': ['LG', 'Optimus M'],
'LG-MS770': ['LG', 'MS770'],
'LG-MS840': ['LG', 'Connect 4G'],
'LG-MS910': ['LG', 'Esteem'],
'LG-MS695': ['LG', 'Optimus M+'],
'LG P350': ['LG', 'Optimus Me'],
'LG-P350': ['LG', 'Optimus Me'],
'LG-P350f': ['LG', 'Optimus Me'],
'LG-P350g': ['LG', 'Optimus Me'],
'LG-P355': ['LG', 'P355'],
'LG-P500': ['LG', 'Optimus One'],
'LG-P500h': ['LG', 'Optimus One'],
'LG-P500h-parrot': ['LG', 'Optimus One'],
'LG-P503': ['LG', 'Optimus One'],
'LG-P504': ['LG', 'Optimus One'],
'LG-P505': ['LG', 'Phoenix'],
'LG-P505R': ['LG', 'Phoenix'],
'LG-P506': ['LG', 'Thrive'],
'LG-P509': ['LG', 'Optimus T'],
'LG-P690': ['LG', 'Optimus Net'],
'LG-P693': ['LG', 'P693'],
'LG-P698': ['LG', 'Optimus Net'],
'LG-P698f': ['LG', 'Optimus Net'],
'LG-P700': ['LG', 'Optimus L7'],
'LG-P705': ['LG', 'Optimus L7'],
'LG-P705f': ['LG', 'Optimus L7'],
'LG-P705g': ['LG', 'Optimus L7'],
'LG-P708g': ['LG', 'P708'],
'LG-P720': ['LG', 'Optimus Chic'],
'LG-P720h': ['LG', 'Optimus Chic'],
'LG-P725': ['LG', 'Optimus 3D Max'],
'LG-P760': ['LG', 'P760'],
'LG-P769': ['LG', 'P769'],
'LG-P860': ['LG', 'P860'],
'LG-P870': ['LG', 'P870'],
'LG-P870F': ['LG', 'P870'],
'LG-P880': ['LG', 'X3'],
'LG-P880g': ['LG', 'X3'],
'LG-P895': ['LG', 'P895'],
'LG-P920': ['LG', 'Optimus 3D'],
'LG-P920h': ['LG', 'Optimus 3D'],
'LG-P925': ['LG', 'Thrill'],
'LG-P925g': ['LG', 'Thrill'],
'LG-P930': ['LG', 'Nitro HD'],
'LG-P936': ['LG', 'Optimus LTE'],
'LG-P940': ['LG', 'PRADA 3.0'],
'LG-P970': ['LG', 'Optimus Black'],
'LG-P970h': ['LG', 'Optimus Black'],
'LG-P990': ['LG', 'Optimus 2X Speed'],
'LG-P990h': ['LG', 'Optimus 2X Speed'],
'LG-P990hN': ['LG', 'Optimus 2X Speed'],
'LG-P990H': ['LG', 'Optimus 2X Speed'],
'LG-P993': ['LG', 'Optimus 2X'],
'LG-SU540': ['LG', 'PRADA 3.0'],
'LG-SU640': ['LG', 'Optimus LTE'],
'LG-SU660': ['LG', 'Optimus 2X'],
'LG-SU760': ['LG', 'Optimus 3D'],
'LG-SU760-Kust': ['LG', 'Optimus 3D'],
'LG-SU870': ['LG', 'Optimus 3D Cube'],
'LG-SU880': ['LG', 'Optimus EX'],
'LG-US670': ['LG', 'Optimus U'],
'LG-US730': ['LG', 'US730'],
'LG-V900': ['LG', 'Optimus Pad', 'tablet'],
'LG-V905R': ['LG', 'Optimus G-Slate', 'tablet'],
'LG-V909': ['LG', 'Optimus G-Slate', 'tablet'],
'LG-VM670': ['LG', 'Optimus V'],
'LG-VM696': ['LG', 'Optimus Elite'],
'LG-VM701': ['LG', 'Optimus Slider'],
'LG-VS660': ['LG', 'Vortex'],
'LG-VS700': ['LG', 'Enlighten'],
'LG-VS740': ['LG', 'Ally'],
'LG-VS840': ['LG', 'Connect 4G'],
'LG-VS910': ['LG', 'Revolution'],
'lgp-970': ['LG', 'Optimus Black'],
'E900': ['LG', 'Optimus 7'],
'GT540': ['LG', 'Optimus GT540'],
'GW620': ['LG', 'GW620'],
'KU9500': ['LG', 'Optimus Z'],
'LGC660': ['LG', 'Optimus Pro'],
'LGL45C': ['LG', 'Optimus Net'],
'LGL55C': ['LG', 'Optimus Q'],
'LU2300': ['LG', 'Optimus Q'],
'LS670': ['LG', 'Optimus S'],
'P940': ['LG', 'PRADA 3.0'],
'P990': ['LG', 'Optimus 2X Speed'],
'USCC-US730': ['LG', 'US730'],
'USCC-US760': ['LG', 'Genesis'],
'VM670': ['LG', 'Optimus V'],
'VS840 4G': ['LG', 'Connect 4G'],
'VS900-4G': ['LG', 'VS900'],
'VS910 4G': ['LG', 'Revolution 4G'],
'VS920 4G': ['LG', 'Spectrum 4G'],
'VS930 4G': ['LG', 'VS930'],
'VS950 4G': ['LG', 'VS950'],
'L-01D': ['LG', 'Optimus LTE'],
'L-02D': ['LG', 'PRADA phone'],
'L-04C': ['LG', 'Optimus Chat'],
'L-05D': ['LG', 'Optimus it'],
'L-06C': ['LG', 'Optimus Pad', 'tablet'],
'L-06D': ['LG', 'Optimus Vu'],
'L-07C': ['LG', 'Optimus Bright'],
'LG-Eve': ['LG', 'Eve'],
'LG-Optimus One P500': ['LG', 'Optimus One'],
'LG-Optimus 2X': ['LG', 'Optimus 2X'],
'LG-GT540 Optimus': ['LG', 'Optimus'],
'LG-Optimus Black': ['LG', 'Optimus Black'],
'Ally': ['LG', 'Ally'],
'Optimus': ['LG', 'Optimus'],
'Optimus Me': ['LG', 'Optimus Me'],
'optimus me p350': ['LG', 'Optimus Me'],
'Optimus 2X': ['LG', 'Optimus 2X'],
'Optimus 2x': ['LG', 'Optimus 2X'],
'IS11LG': ['LG', 'Optimus X'],
'Vortex': ['LG', 'Vortex'],
'LDK-ICK v1.4': ['LG', 'Esteem'],
'T6': ['Malata', 'Zpad T6', 'tablet'],
'Malata SMBA1002': ['Malata', 'Tablet SMB-A1002', 'tablet'],
'STM712HCZ': ['Mediacom', 'SmartPad 712c', 'tablet'],
'STM803HC': ['Mediacom', 'SmartPad 810c', 'tablet'],
'Mediacom 810C': ['Mediacom', 'SmartPad 810c', 'tablet'],
'Smartpad810c': ['Mediacom', 'SmartPad 810c', 'tablet'],
'SmartPad810c': ['Mediacom', 'SmartPad 810c', 'tablet'],
'MP810C': ['Mediacom', 'SmartPad 810c', 'tablet'],
'MP907C': ['Mediacom', 'SmartPad 907c', 'tablet'],
'MTK6516': ['Mediatek', 'MTK6516'],
'LIFETAB S9512': ['Medion', 'Lifetab S9512', 'tablet'],
'LIFETAB P9514': ['Medion', 'Lifetab P9514', 'tablet'],
'MD LIFETAB P9516': ['Medion', 'Lifetab P9516', 'tablet'],
'MEDION LIFE P4310': ['Medion', 'Life P4310'],
'M8': ['Meizu', 'M8'],
'M9': ['Meizu', 'M9'],
'M040': ['Meizu', 'M040'],
'M9-unlocked': ['Meizu', 'M9'],
'meizu m9': ['Meizu', 'M9'],
'MEIZU M9': ['Meizu', 'M9'],
'MEIZU MX': ['Meizu', 'MX'],
'M030': ['Meizu', 'MX M030'],
'M031': ['Meizu', 'MX M031'],
'M032': ['Meizu', 'MX M032'],
'Slidepad': ['Memup', 'Slidepad', 'tablet'],
'A45': ['Micromax', 'A45 Punk'],
'Micromax A50': ['Micromax', 'A50 Ninja'],
'Micromax A60': ['Micromax', 'Andro A60'],
'Micromax A70': ['Micromax', 'Andro A70'],
'P300(Funbook)': ['Micromax', 'Funbook P300', 'tablet'],
'AT735': ['Moinstone', 'AT735', 'tablet'],
'A853': ['Motorola', 'Milestone'],
'A953': ['Motorola', 'Milestone 2'],
'A1680': ['Motorola', 'MOTO A1680'],
'ET1': ['Motorola', 'ET1 Enterprise Tablet', 'tablet'],
'MB200': ['Motorola', 'CLIQ'],
'MB300': ['Motorola', 'BACKFLIP'],
'MB501': ['Motorola', 'CLIQ XT'],
'MB502': ['Motorola', 'CHARM'],
'MB511': ['Motorola', 'FLIPOUT'],
'MB520': ['Motorola', 'BRAVO'],
'MB525': ['Motorola', 'DEFY'],
'MB525+': ['Motorola', 'DEFY'],
'MB525 for me': ['Motorola', 'DEFY'],
'MB526': ['Motorola', 'DEFY+'],
'MB611': ['Motorola', 'CLIQ 2'],
'MB612': ['Motorola', 'XPRT'],
'MB632': ['Motorola', 'PRO+'],
'MB855': ['Motorola', 'PHOTON 4G'],
'MB860': ['Motorola', 'ATRIX'],
'MB861': ['Motorola', 'ATRIX'],
'mb861': ['Motorola', 'ATRIX'],
'MB865': ['Motorola', 'ATRIX 2'],
'MB870': ['Motorola', 'Droid X2'],
'MB886': ['Motorola', 'DINARA'],
'ME501': ['Motorola', 'CLIQ XT'],
'ME511': ['Motorola', 'FLIPOUT'],
'me525': ['Motorola', 'MOTO ME525'],
'Me525': ['Motorola', 'MOTO ME525'],
'ME525': ['Motorola', 'MOTO ME525'],
'ME525+': ['Motorola', 'MOTO ME525'],
'ME600': ['Motorola', 'BACKFLIP'],
'ME632': ['Motorola', 'PRO+'],
'ME722': ['Motorola', 'Milestone 2'],
'ME811': ['Motorola', 'Droid X'],
'ME860': ['Motorola', 'ATRIX'],
'ME863': ['Motorola', 'Milestone 3'],
'ME865': ['Motorola', 'ATRIX 2'],
'MT620': ['Motorola', 'MOTO MT620'],
'MT620t': ['Motorola', 'MOTO MT620'],
'MT716': ['Motorola', 'MOTO MT716'],
'MT810': ['Motorola', 'MOTO MT810'],
'MT870': ['Motorola', 'MOTO MT870'],
'MT917': ['Motorola', 'MT917'],
'MZ505': ['Motorola', 'XOOM Family Edition', 'tablet'],
'MZ600': ['Motorola', 'XOOM 4G LTE', 'tablet'],
'MZ601': ['Motorola', 'XOOM 3G', 'tablet'],
'MZ602': ['Motorola', 'XOOM 4G LTE', 'tablet'],
'MZ603': ['Motorola', 'XOOM 3G', 'tablet'],
'MZ604': ['Motorola', 'XOOM WiFi', 'tablet'],
'MZ605': ['Motorola', 'XOOM 3G', 'tablet'],
'MZ606': ['Motorola', 'XOOM WiFi', 'tablet'],
'MZ607': ['Motorola', 'XOOM 2 WiFi Media Edition', 'tablet'],
'MZ609': ['Motorola', 'Droid XYBOARD 8.2', 'tablet'],
'MZ609 4G': ['Motorola', 'Droid XYBOARD 8.2', 'tablet'],
'MZ615': ['Motorola', 'XOOM 2 WiFi', 'tablet'],
'MZ617': ['Motorola', 'Droid XYBOARD 10.1', 'tablet'],
'MZ617 4G': ['Motorola', 'Droid XYBOARD 10.1', 'tablet'],
'WX435': ['Motorola', 'TRIUMPH WX435'],
'WX445': ['Motorola', 'CITRUS WX445'],
'XT300': ['Motorola', 'SPICE'],
'XT301': ['Motorola', 'MOTO XT301'],
'XT311': ['Motorola', 'FIRE'],
'XT316': ['Motorola', 'MOTO XT316'],
'XT319': ['Motorola', 'MOTO XT319'],
'XT390': ['Motorola', 'MOTO XT390'],
'XT320': ['Motorola', 'DEFY Mini'],
'XT321': ['Motorola', 'DEFY Mini'],
'XT500': ['Motorola', 'MOTO XT500'],
'xt-500': ['Motorola', 'MOTO XT500'],
'XT502': ['Motorola', 'QUENCH XT5'],
'XT530': ['Motorola', 'FIRE XT'],
'XT531': ['Motorola', 'FIRE XT'],
'XT532': ['Motorola', 'XT532'],
'XT535': ['Motorola', 'DEFY'],
'XT550': ['Motorola', 'XT550'],
'XT556': ['Motorola', 'XT556'],
'XT603': ['Motorola', 'ADMIRAL'],
'XT610': ['Motorola', 'Droid Pro'],
'XT615': ['Motorola', 'MOTO XT615'],
'XT626': ['Motorola', 'MOTO XT626'],
'XT681': ['Motorola', 'MOTO XT681'],
'XT682': ['Motorola', 'Droid 3'],
'XT685': ['Motorola', 'MOTO XT685'],
'XT687': ['Motorola', 'ATRIX TV'],
'XT701': ['Motorola', 'XT701'],
'XT702': ['Motorola', 'MOTO XT702'],
'XT711': ['Motorola', 'MOTO XT711'],
'XT720': ['Motorola', 'Milestone'],
'XT875': ['Motorola', 'Droid Bionic'],
'XT800': ['Motorola', 'MOTO XT800'],
'XT800+': ['Motorola', 'MOTO XT800'],
'XT800W': ['Motorola', 'MOTO Glam'],
'XT806': ['Motorola', 'MOTO XT806'],
'XT860': ['Motorola', 'Milestone 3'],
'XT862': ['Motorola', 'Droid 3'],
'XT882': ['Motorola', 'MOTO XT882'],
'XT883': ['Motorola', 'Milestone 3'],
'XT889': ['Motorola', 'XT889'],
'XT897': ['Motorola', 'Droid 4'],
'XT901': ['Motorola', 'RAZR'],
'XT910': ['Motorola', 'RAZR'],
'XT910K': ['Motorola', 'RAZR'],
'XT910S': ['Motorola', 'RAZR'],
'XT910 4G': ['Motorola', 'RAZR'],
'XT912': ['Motorola', 'Droid RAZR'],
'XT923': ['Motorola', 'Droid RAZR HD'],
'XT925': ['Motorola', 'Droid RAZR HD'],
'XT926': ['Motorola', 'Droid RAZR'],
'XT926 4G': ['Motorola', 'Droid RAZR'],
'XT928': ['Motorola', 'XT928'],
'Atrix 2': ['Motorola', 'ATRIX 2'],
'Atrix 4g': ['Motorola', 'ATRIX 4G'],
'Atrix 4G': ['Motorola', 'ATRIX 4G'],
'Atrix 4G ME860': ['Motorola', 'ATRIX 4G'],
'CLIQ': ['Motorola', 'CLIQ'],
'CLIQ XT': ['Motorola', 'CLIQ XT'],
'CLIQ2': ['Motorola', 'CLIQ 2'],
'Corvair': ['Motorola', 'Corvair', 'tablet'],
'DEFY': ['Motorola', 'DEFY'],
'Defy+': ['Motorola', 'DEFY+'],
'Defy Plus': ['Motorola', 'DEFY+'],
'Devour': ['Motorola', 'Devour'],
'Dext': ['Motorola', 'Dext'],
'Droid': ['Motorola', 'Droid'],
'DROID': ['Motorola', 'Droid'],
'DROID2': ['Motorola', 'Droid 2'],
'DROID2 GLOBAL': ['Motorola', 'Droid 2'],
'DROID2 Global': ['Motorola', 'Droid 2'],
'Droid2Global': ['Motorola', 'Droid 2'],
'DROID 2': ['Motorola', 'Droid 2'],
'DROID3': ['Motorola', 'Droid 3'],
'DROID4': ['Motorola', 'Droid 4'],
'DROID4 4G': ['Motorola', 'Droid 4'],
'DROID Pro': ['Motorola', 'Droid Pro'],
'DROID BIONIC': ['Motorola', 'Droid Bionic'],
'DROID BIONIC 4G': ['Motorola', 'Droid Bionic'],
'DROID BIONIC XT875 4G': ['Motorola', 'Droid Bionic'],
'DROIDRAZR': ['Motorola', 'Droid RAZR'],
'Droid Razr': ['Motorola', 'Droid RAZR'],
'DROID RAZR': ['Motorola', 'Droid RAZR'],
'DROID RAZR 4G': ['Motorola', 'Droid RAZR'],
'DROID SPYDER': ['Motorola', 'Droid RAZR'],
'DROID RAZR HD': ['Motorola', 'Droid RAZR HD'],
'DROID RAZR HD 4G': ['Motorola', 'Droid RAZR HD'],
'DroidX': ['Motorola', 'Droid X'],
'DROIDX': ['Motorola', 'Droid X'],
'droid x': ['Motorola', 'Droid X'],
'Droid X': ['Motorola', 'Droid X'],
'DROID X': ['Motorola', 'Droid X'],
'DROID X2': ['Motorola', 'Droid X2'],
'Electrify': ['Motorola', 'Electrify'],
'Milestone XT720': ['Motorola', 'Milestone'],
'Milestone Xt720': ['Motorola', 'Milestone'],
'Milestone': ['Motorola', 'Milestone'],
'A853 Milestone': ['Motorola', 'Milestone'],
'Milestone X': ['Motorola', 'Milestone X'],
'Milestone X2': ['Motorola', 'Milestone X2'],
'MotoroiX': ['Motorola', 'Droid X'],
'Moto Backflip': ['Motorola', 'BACKFLIP'],
'RAZR': ['Motorola', 'RAZR'],
'Triumph': ['Motorola', 'TRIUMPH'],
'Opus One': ['Motorola', 'i1'],
'Photon': ['Motorola', 'PHOTON'],
'Photon 4G': ['Motorola', 'PHOTON 4G'],
'XOOM': ['Motorola', 'XOOM', 'tablet'],
'Xoom': ['Motorola', 'XOOM', 'tablet'],
'XOOM 2': ['Motorola', 'XOOM 2', 'tablet'],
'XOOM 2 ME': ['Motorola', 'XOOM 2', 'tablet'],
'XOOM MZ606': ['Motorola', 'XOOM WiFi', 'tablet'],
'ISW11M': ['Motorola', 'PHOTON'],
'IS12M': ['Motorola', 'RAZR'],
'MOTWX435KT': ['Motorola', 'TRIUMPH'],
'X3-Ice MIUI XT720 Memorila Classics': ['Motorola', 'Milestone'],
'NABI-A': ['Nabi', 'Kids tablet', 'tablet'],
'Newpad': ['Newsmy', 'Newpad', 'tablet'],
'Newpad-K97': ['Newsmy', 'Newpad K97', 'tablet'],
'Newpad P9': ['Newsmy', 'Newpad P9', 'tablet'],
'M-PAD N8': ['Newsmy', 'M-pad N8', 'tablet'],
'LT-NA7': ['NEC', 'LT-NA7'],
'N-01D': ['NEC', 'MEDIAS PP N-01D'],
'N-04C': ['NEC', 'MEDIAS N-04C'],
'N-04D': ['NEC', 'MEDIAS LTE N-04D'],
'N-05D': ['NEC', 'MEDIAS ES N-05D'],
'N-06C': ['NEC', 'MEDIAS WP N-06C'],
'N-06D': ['NEC', 'MEDIAS Tab N-06D', 'tablet'],
'N-07D': ['NEC', 'MEDIAS X N-07D'],
'101N': ['NEC', 'MEDIAS CH Softbank 101N'],
'IS11N': ['NEC', 'MEDIAS BR IS11N'],
'Nexian NX-A890': ['Nexian', 'Journey'],
'NX-A891': ['Nexian', 'Ultra Journey'],
'M726HC': ['Nextbook', 'Premium 7', 'ereader'],
'NXM726HN': ['Nextbook', 'Premium 7', 'ereader'],
'NXM803HD': ['Nextbook', 'Premium 8', 'ereader'],
'DATAM803HC': ['Nextbook', 'Premium 8', 'ereader'],
'NXM901': ['Nextbook', 'Next 3', 'ereader'],
'NGM Vanity Smart': ['NGM', 'Vanity Smart'],
'Nokia N9': ['Nokia', 'N9'],
'Nokia N900': ['Nokia', 'N900'],
'Lumia800': ['Nokia', 'Lumia 800'],
'Lumia 900': ['Nokia', 'Lumia 900'],
'Notion Ink ADAM': ['Notion Ink', 'ADAM', 'tablet'],
'P4D SIRIUS': ['Nvsbl', 'P4D SIRIUS', 'tablet'],
'P4D Sirius': ['Nvsbl', 'P4D SIRIUS', 'tablet'],
'EFM710A': ['Oblio', 'Mint 7x', 'tablet'],
'ODYS-Xpress': ['Odys', 'Xpress', 'tablet'],
'Olivetti Olipad 100': ['Olivetti', 'Olipad 100', 'tablet'],
'OP110': ['Olivetti', 'Olipad 110', 'tablet'],
'ONDA MID': ['Onda', 'MID', 'tablet'],
'VX580A': ['Onda', 'VX580A', 'tablet'],
'VX610A': ['Onda', 'VX610A', 'tablet'],
'TQ150': ['Onda', 'TQ150'],
'N2T': ['ONN', 'N2T', 'tablet'],
'Renesas': ['Opad', 'Renesas', 'tablet'],
'renesas emev': ['Opad', 'Renesas', 'tablet'],
'X903': ['Oppo', 'Find Me X903'],
'X905': ['Oppo', 'Find 3 X905'],
'R805': ['Oppo', 'R805'],
'R801': ['Oppo', 'R801'],
'R811': ['Oppo', 'R811'],
'X909': ['Oppo', 'X909'],
'OPPOR801': ['Oppo', 'R801'],
'OPPOX905': ['Oppo', 'Find 3 X905'],
'OPPOX907': ['Oppo', 'Find 3 X907'],
'X907': ['Oppo', 'Find 3 X907'],
'X9015': ['Oppo', 'Find X9015'],
'OPPOX9017': ['Oppo', 'Finder X9017'],
'OPPOU701': ['Oppo', 'OPPOU701'],
'OPPOR807': ['Oppo', 'Real R807'],
'OPPOR805': ['Oppo', 'Real R805'],
'R807': ['Oppo', 'Real R807'],
'OPPOT703': ['Oppo', 'T703'],
'P-01D': ['Panasonic', 'P-01D'],
'P-02D': ['Panasonic', 'Lumix Phone'],
'P-04D': ['Panasonic', 'Eluga'],
'P-07C': ['Panasonic', 'P-07C'],
'dL1': ['Panasonic', 'Eluga dL1'],
'101P': ['Panasonic', 'Lumix Phone'],
'JT-H580VT': ['Panasonic', 'BizPad 7', 'tablet'],
'JT-H581VT': ['Panasonic', 'BizPad 10', 'tablet'],
'FZ-A1A': ['Panasonic', 'Toughpad', 'tablet'],
'pandigital9hr': ['Pandigital', '9HR', 'tablet'],
'pandigital9hr2': ['Pandigital', '9HR2', 'tablet'],
'pandigitalopc1': ['Pandigital', 'OPC1', 'tablet'],
'pandigitalopp1': ['Pandigital', 'OPP1', 'tablet'],
'pandigitalp1hr': ['Pandigital', 'p1hr', 'tablet'],
'IM-A600S': ['Pantech', 'SIRIUS �±'],
'IM-A630K': ['Pantech', 'SKY Izar'],
'IM-A690L': ['Pantech', 'SKY'],
'IM-A690S': ['Pantech', 'SKY'],
'IM-A710K': ['Pantech', 'SKY Vega Xpress'],
'IM-A720L': ['Pantech', 'SKY Vega Xpress'],
'IM-A725L': ['Pantech', 'SKY Vega X+'],
'IM-A730s': ['Pantech', 'SKY Vega S'],
'IM-A730S': ['Pantech', 'SKY Vega S'],
'IM-A750K': ['Pantech', 'SKY Mirach A'],
'IM-A760S': ['Pantech', 'SKY Vega Racer'],
'IM-A770K': ['Pantech', 'SKY Vega Racer'],
'IM-A780L': ['Pantech', 'SKY Vega Racer'],
'IM-A800S': ['Pantech', 'SKY Vega LTE'],
'IM-A810K': ['Pantech', 'SKY Vega LTE M'],
'IM-A810S': ['Pantech', 'SKY Vega LTE M'],
'IM-A820L': ['Pantech', 'SKY Vega LTE EX'],
'IM-A830K': ['Pantech', 'SKY Vega Racer 2'],
'IM-A830L': ['Pantech', 'SKY Vega Racer 2'],
'IM-A830S': ['Pantech', 'SKY Vega Racer 2'],
'IM-A840S': ['Pantech', 'SKY Vega S5'],
'IM-A850K': ['Pantech', 'IM-A850K'],
'IM-T100K': ['Pantech', 'SKY Vega No. 5', 'tablet'],
'IS06': ['Pantech', 'SIRIUS �±'],
'ADR8995': ['Pantech', 'Breakout'],
'ADR8995 4G': ['Pantech', 'Breakout'],
'ADR910L 4G': ['Pantech', 'ADR910L'],
'PantechP4100': ['Pantech', 'Element', 'tablet'],
'PantechP8000': ['Pantech', 'Crossover'],
'PantechP8010': ['Pantech', 'P8010'],
'PantechP9060': ['Pantech', 'Pocket'],
'PantechP9070': ['Pantech', 'Burst'],
'SKY IM-A600S': ['Pantech', 'SIRIUS �±'],
'SKY IM-A630K': ['Pantech', 'SKY Izar'],
'SKY IM-A650S': ['Pantech', 'SKY Vega'],
'IS11PT': ['Pantech', 'Mirach IS11PT'],
'PAT712W': ['Perfeo', 'PAT712W', 'tablet'],
'X7G': ['Pearl', 'Touchlet X7G', 'tablet'],
'FWS810': ['PHICOMM', 'FWS810'],
'Philips PI5000': ['Philips', 'PI5000', 'tablet'],
'PI7000': ['Philips', 'PI7000', 'tablet'],
'Philips W626': ['Philips', 'W626'],
'Philips W632': ['Philips', 'W632'],
'MOMO': ['Ployer', 'MOMO', 'tablet'],
'MOMO15': ['Ployer', 'MOMO15', 'tablet'],
'PocketBook A7': ['PocketBook', 'A7', 'tablet'],
'PocketBook A10': ['PocketBook', 'A10', 'tablet'],
'Mobii 7': ['Point Of View', 'Mobii 7', 'tablet'],
'PMP3384BRU': ['Prestigio', 'Multipad 3384', 'tablet'],
'TB07FTA': ['Positivo', 'TB07FTA', 'tablet'],
'QW TB-1207': ['Qware', 'Pro3', 'tablet'],
'W6HD ICS': ['Ramos', 'W6HD', 'tablet'],
'w10': ['Ramos', 'W10', 'tablet'],
'W10': ['Ramos', 'W10', 'tablet'],
'w10 v2.0': ['Ramos', 'W10 v2.0', 'tablet'],
'W10 V2.0': ['Ramos', 'W10 v2.0', 'tablet'],
'T11AD': ['Ramos', 'T11AD', 'tablet'],
'T11AD.FE': ['Ramos', 'T11AD', 'tablet'],
'PlayBook': ['RIM', 'BlackBerry PlayBook', 'tablet'],
'RBK-490': ['Ritmix', 'RBK-490', 'tablet'],
'A8HD': ['Saayi', 'Dropad A8HD', 'tablet'],
'GT-S7568': ['Samsung', 'S7568'],
'Galaxy Nexus': ['Samsung', 'Galaxy Nexus'],
'GT-B5330': ['Samsung', 'GT-B5330'],
'GT-B5510': ['Samsung', 'Galaxy Y Pro'],
'GT-B5510B': ['Samsung', 'Galaxy Y Pro'],
'GT-B5510L': ['Samsung', 'Galaxy Y Pro'],
'GT-B5512': ['Samsung', 'Galaxy Y Pro Duos'],
'GT-B7510': ['Samsung', 'Galaxy Pro'],
'GT-B7510L': ['Samsung', 'Galaxy Pro'],
'GT-I5500': ['Samsung', 'Galaxy 5'],
'GT-I5500B': ['Samsung', 'Galaxy 5'],
'GT-I5500L': ['Samsung', 'Galaxy 5'],
'GT-I5500M': ['Samsung', 'Galaxy 5'],
'GT-I5500-MR3': ['Samsung', 'Galaxy 5'],
'GT-I5503': ['Samsung', 'Galaxy 5'],
'GT-I5508': ['Samsung', 'Galaxy 5'],
'GT-I5510': ['Samsung', 'Galaxy 551'],
'GT-I5510L': ['Samsung', 'Galaxy 551'],
'GT-I5510M': ['Samsung', 'Galaxy 551'],
'GT-I5510T': ['Samsung', 'Galaxy 551'],
'GT-I5700': ['Samsung', 'Galaxy Spica'],
'GT-I5700L': ['Samsung', 'Galaxy Spica'],
'GT-I5800': ['Samsung', 'Galaxy Apollo'],
'GT-I5800D': ['Samsung', 'Galaxy Apollo'],
'GT-I5800L': ['Samsung', 'Galaxy Apollo'],
'GT-I5801': ['Samsung', 'Galaxy Apollo'],
'GT-I6500U': ['Samsung', 'Saturn'],
'GT-I8000': ['Samsung', 'Omnia 2'],
'GT-I8150': ['Samsung', 'Galaxy W'],
'GT-I8150B': ['Samsung', 'Galaxy W'],
'GT-I8160': ['Samsung', 'Galaxy Ace 2'],
'GT-I8160L': ['Samsung', 'Galaxy Ace 2'],
'GT-I8160P': ['Samsung', 'Galaxy Ace 2'],
'GT-I8320': ['Samsung', 'H1'],
'GT-I8520': ['Samsung', 'Galaxy Beam'],
'GT-I8530': ['Samsung', 'Galaxy Beam'],
'GT-I8250': ['Samsung', 'Galaxy Beam'],
'GT-i9000': ['Samsung', 'Galaxy S'],
'GT-I9000': ['Samsung', 'Galaxy S'],
'GT-I9000B': ['Samsung', 'Galaxy S'],
'GT-I9000M': ['Samsung', 'Galaxy S Vibrant'],
'GT-I9000T': ['Samsung', 'Galaxy S'],
'GT-I9001': ['Samsung', 'Galaxy S Plus'],
'GT-I9003': ['Samsung', 'Galaxy SL'],
'GT-I9003L': ['Samsung', 'Galaxy SL'],
'GT-I9008': ['Samsung', 'Galaxy S'],
'GT-I9008L': ['Samsung', 'Galaxy S'],
'GT-I9010': ['Samsung', 'Galaxy S Giorgio Armani'],
'GT-I9018': ['Samsung', 'Galaxy GT-I9018'],
'GT-I9070': ['Samsung', 'Galaxy S Advance'],
'GT-I9070P': ['Samsung', 'Galaxy S Advance'],
'GT-I9082': ['Samsung', 'Galaxy Grand DUOS'],
'GT-I9088': ['Samsung', 'Galaxy S'],
'GT-i9100': ['Samsung', 'Galaxy S II'],
'GT-I9100': ['Samsung', 'Galaxy S II'],
'GT-I9100G': ['Samsung', 'Galaxy S II'],
'GT-I9100M': ['Samsung', 'Galaxy S II'],
'GT-I9100T': ['Samsung', 'Galaxy S II'],
'GT-I9100P': ['Samsung', 'Galaxy S II'],
'GT-I9103': ['Samsung', 'Galaxy R'],
'GT-I9108': ['Samsung', 'Galaxy S II'],
'GT-I9210': ['Samsung', 'Galaxy S II LTE'],
'GT-I9210T': ['Samsung', 'Galaxy S II LTE'],
'GT-I9220': ['Samsung', 'Galaxy Note'],
'GT-I9228': ['Samsung', 'Galaxy Note'],
'GT-I9250': ['Samsung', 'Galaxy Nexus'],
'GT-I9250 EUR XX': ['Samsung', 'Galaxy Nexus'],
'GT-I9260': ['Samsung', 'Galaxy Premier'],
'GT-I9300': ['Samsung', 'Galaxy S III'],
'GT-I9300T': ['Samsung', 'Galaxy S III'],
'GT-I9303T': ['Samsung', 'Galaxy S III'],
'GT-I9308': ['Samsung', 'Galaxy S III'],
'GT-I9500': ['Samsung', 'Galaxy GT-I9500'],
'GT-I9800': ['Samsung', 'Galaxy GT-I9800'],
'GT-N7000': ['Samsung', 'Galaxy Note'],
'GT-N7000B': ['Samsung', 'Galaxy Note'],
'GT-N7100': ['Samsung', 'Galaxy Note II'],
'GT-N7102': ['Samsung', 'Galaxy Note II'],
'GT-N8000': ['Samsung', 'Galaxy Note 10.1'],
'GT-N8010': ['Samsung', 'Galaxy Note 10.1'],
'GT-P1000': ['Samsung', 'Galaxy Tab', 'tablet'],
'GT-P1000L': ['Samsung', 'Galaxy Tab', 'tablet'],
'GT-P1000M': ['Samsung', 'Galaxy Tab', 'tablet'],
'GT-P1000N': ['Samsung', 'Galaxy Tab', 'tablet'],
'GT-P1000T': ['Samsung', 'Galaxy Tab', 'tablet'],
'GT-P1000 Tablet': ['Samsung', 'Galaxy Tab', 'tablet'],
'GT-P1010': ['Samsung', 'Galaxy Tab', 'tablet'],
'GT-P3100': ['Samsung', 'Galaxy Tab 2 (7.0)', 'tablet'],
'GT-P3100B': ['Samsung', 'Galaxy Tab 2 (7.0)', 'tablet'],
'GT-P3110': ['Samsung', 'Galaxy Tab 2 (7.0)', 'tablet'],
'GT-P3113': ['Samsung', 'Galaxy Tab 2 (7.0)', 'tablet'],
'GT-P5100': ['Samsung', 'Galaxy Tab 2 (10.1)', 'tablet'],
'GT-P5110': ['Samsung', 'Galaxy Tab 2 (10.1)', 'tablet'],
'GT-P5113': ['Samsung', 'Galaxy Tab 2 (10.1)', 'tablet'],
'GT-P6200': ['Samsung', 'Galaxy Tab 7.0 Plus', 'tablet'],
'GT-P6200L': ['Samsung', 'Galaxy Tab 7.0 Plus', 'tablet'],
'GT-P6201': ['Samsung', 'Galaxy Tab 7.0 Plus N', 'tablet'],
'GT-P6210': ['Samsung', 'Galaxy Tab 7.0 Plus', 'tablet'],
'GT-P6211': ['Samsung', 'Galaxy Tab 7.0 Plus N', 'tablet'],
'GT-P6800': ['Samsung', 'Galaxy Tab 7.7', 'tablet'],
'GT-P6810': ['Samsung', 'Galaxy Tab 7.7', 'tablet'],
'GT-P7100': ['Samsung', 'Galaxy Tab 10.1V', 'tablet'],
'GT-P7300': ['Samsung', 'Galaxy Tab 8.9', 'tablet'],
'GT-P7300B': ['Samsung', 'Galaxy Tab 8.9', 'tablet'],
'GT-P7310': ['Samsung', 'Galaxy Tab 8.9', 'tablet'],
'GT-P7320': ['Samsung', 'Galaxy Tab 8.9', 'tablet'],
'GT-P7320T': ['Samsung', 'Galaxy Tab 8.9', 'tablet'],
'GT-P7500': ['Samsung', 'Galaxy Tab 10.1', 'tablet'],
'GT-P7500D': ['Samsung', 'Galaxy Tab 10.1', 'tablet'],
'GT-P7500R': ['Samsung', 'Galaxy Tab 10.1', 'tablet'],
'GT-P7500V': ['Samsung', 'Galaxy Tab 10.1', 'tablet'],
'GT-P7501': ['Samsung', 'Galaxy Tab 10.1N', 'tablet'],
'GT-P7510': ['Samsung', 'Galaxy Tab 10.1', 'tablet'],
'GT-P7511': ['Samsung', 'Galaxy Tab 10.1N', 'tablet'],
'GT-S5300': ['Samsung', 'Galaxy Pocket'],
'GT-S5360': ['Samsung', 'Galaxy Y'],
'GT-S5360B': ['Samsung', 'Galaxy Y'],
'GT-S5360L': ['Samsung', 'Galaxy Y'],
'GT-S5363': ['Samsung', 'Galaxy Y'],
'GT-S5367': ['Samsung', 'Galaxy Y TV'],
'GT-S5368': ['Samsung', 'GT-S5368'],
'GT-S5369': ['Samsung', 'Galaxy Y'],
'GT-S5570': ['Samsung', 'Galaxy Mini'],
'GT-S5570B': ['Samsung', 'Galaxy Mini'],
'GT-S5570I': ['Samsung', 'Galaxy Mini'],
'GT-S5570L': ['Samsung', 'Galaxy Mini'],
'GT-S5578': ['Samsung', 'Galaxy Mini'],
'GT-S5660': ['Samsung', 'Galaxy Gio'],
'GT-S5660M': ['Samsung', 'Galaxy Gio'],
'GT-S5660V': ['Samsung', 'Galaxy Gio'],
'GT-S5670': ['Samsung', 'Galaxy Fit'],
'GT-S5670B': ['Samsung', 'Galaxy Fit'],
'GT-S5670L': ['Samsung', 'Galaxy Fit'],
'GT-S5690': ['Samsung', 'Galaxy Xcover'],
'GT-S5690L': ['Samsung', 'Galaxy Xcover'],
'GT-S5820': ['Samsung', 'Galaxy Ace'],
'GT-S5830': ['Samsung', 'Galaxy Ace'],
'GT-S5830B': ['Samsung', 'Galaxy Ace'],
'GT-S5830C': ['Samsung', 'Galaxy Ace'],
'GT-S5830D': ['Samsung', 'Galaxy Ace'],
'GT-S5830D-parrot': ['Samsung', 'Galaxy Ace'],
'GT-S5830i': ['Samsung', 'Galaxy Ace'],
'GT-S5830L': ['Samsung', 'Galaxy Ace'],
'GT-S5830M': ['Samsung', 'Galaxy Ace'],
'GT-S5830T': ['Samsung', 'Galaxy Ace'],
'GT-S5838': ['Samsung', 'Galaxy Ace'],
'GT-S5839i': ['Samsung', 'Galaxy Ace'],
'GT-S6102': ['Samsung', 'Galaxy Y Duos'],
'GT-S6102B': ['Samsung', 'Galaxy Y Duos'],
'GT-S6500': ['Samsung', 'Galaxy Mini 2'],
'GT-S6500D': ['Samsung', 'Galaxy Mini 2'],
'GT-S6702': ['Samsung', 'GT-S6702'],
'GT-S6802': ['Samsung', 'Galaxy Ace Duos'],
'GT-S7500': ['Samsung', 'Galaxy Ace Plus'],
'GT-S7500L': ['Samsung', 'Galaxy Ace Plus'],
'GT-S7500W': ['Samsung', 'Galaxy Ace Plus'],
'GT-T959': ['Samsung', 'Galaxy S Vibrant'],
'SCH-i509': ['Samsung', 'Galaxy Y'],
'SCH-i559': ['Samsung', 'Galaxy Pop'],
'SCH-i569': ['Samsung', 'Galaxy Gio'],
'SCH-i579': ['Samsung', 'Galaxy Ace'],
'SCH-i589': ['Samsung', 'Galaxy Ace Duos'],
'SCH-i705 4G': ['Samsung', 'Galaxy Tab 2 (7.0)', 'tablet'],
'SCH-i809': ['Samsung', 'SCH-i809'],
'SCH-i889': ['Samsung', 'Galaxy Note'],
'SCH-i909': ['Samsung', 'Galaxy S'],
'SCH-i919': ['Samsung', 'SCH-i919'],
'SCH-i929': ['Samsung', 'SCH-i929'],
'SCH-I100': ['Samsung', 'Gem'],
'SCH-I110': ['Samsung', 'Illusion'],
'SCH-I400': ['Samsung', 'Continuum'],
'SCH-I405': ['Samsung', 'Stratosphere'],
'SCH-I405 4G': ['Samsung', 'Stratosphere'],
'SCH-I500': ['Samsung', 'Fascinate'],
'SCH-I510': ['Samsung', 'Stealth V'],
'SCH-I510 4G': ['Samsung', 'Droid Charge'],
'SCH-I515': ['Samsung', 'Galaxy Nexus'],
'SCH-I535': ['Samsung', 'Galaxy S III'],
'SCH-I535 4G': ['Samsung', 'Galaxy S III'],
'SCH-I619': ['Samsung', 'SCH-I619'],
'SCH-I699': ['Samsung', 'SCH-I699'],
'SCH-I779': ['Samsung', 'SCH-I779'],
'SCH-I800': ['Samsung', 'Galaxy Tab 7.0', 'tablet'],
'SCH-I815': ['Samsung', 'Galaxy Tab 7.7', 'tablet'],
'SCH-I815 4G': ['Samsung', 'Galaxy Tab 7.7', 'tablet'],
'SCH-I905': ['Samsung', 'Galaxy Tab 10.1', 'tablet'],
'SCH-I905 4G': ['Samsung', 'Galaxy Tab 10.1', 'tablet'],
'SCH-I909': ['Samsung', 'Galaxy S'],
'SCH-I915': ['Samsung', 'SCH-I915'],
'SCH-I939': ['Samsung', 'Galaxy S III'],
'SCH-M828C': ['Samsung', 'Galaxy Precedent'],
'SCH-M828Carray(9096483449)': ['Samsung', 'Galaxy Precedent'],
'SCH-R530U': ['Samsung', 'Galaxy S III'],
'SCH-R680': ['Samsung', 'Repp'],
'SCH-R720': ['Samsung', 'Admire'],
'SCH-R730': ['Samsung', 'Transfix'],
'SCH-R760': ['Samsung', 'Galaxy S II'],
'SCH-R820': ['Samsung', 'SCH-R820'],
'SCH-R880': ['Samsung', 'Acclaim'],
'SCH-R910': ['Samsung', 'Galaxy Indulge 4G'],
'SCH-R915': ['Samsung', 'Galaxy Indulge'],
'SCH-R920': ['Samsung', 'Galaxy Attain 4G'],
'SCH-R930': ['Samsung', 'Galaxy S Aviator'],
'SCH-R940': ['Samsung', 'Galaxy S Lightray'],
'SCH-S720C': ['Samsung', 'Galaxy Proclaim'],
'SCH-S735C': ['Samsung', 'SCH-S735'],
'SCH-W899': ['Samsung', 'SCH-W899'],
'SCH-W999': ['Samsung', 'SCH-W999'],
'SGH-I547': ['Samsung', 'SGH-I547'],
'SGH-I717': ['Samsung', 'Galaxy Note'],
'SGH-I717D': ['Samsung', 'Galaxy Note'],
'SGH-I717M': ['Samsung', 'Galaxy Note'],
'SGH-I717R': ['Samsung', 'Galaxy Note'],
'SGH-I727': ['Samsung', 'Galaxy S II Skyrocket'],
'SGH-i727R': ['Samsung', 'Galaxy S II'],
'SGH-I727R': ['Samsung', 'Galaxy S II'],
'SGH-I747': ['Samsung', 'Galaxy S III'],
'SGH-I747M': ['Samsung', 'Galaxy S III'],
'SGH-I748': ['Samsung', 'Galaxy S III'],
'SGH-I757': ['Samsung', 'Galaxy S II Skyrocket HD'],
'SGH-I777': ['Samsung', 'Galaxy S II'],
'SGH-I9777': ['Samsung', 'Galaxy S II'],
'SGH-I896': ['Samsung', 'Captivate'],
'SGH-I897': ['Samsung', 'Captivate'],
'SGH-I927': ['Samsung', 'Captivate Glide'],
'SGH-I927R': ['Samsung', 'Captivate Glide'],
'SGH-I957': ['Samsung', 'Galaxy Tab 8.9', 'tablet'],
'SGH-I957D': ['Samsung', 'Galaxy Tab 8.9', 'tablet'],
'SGH-I957M': ['Samsung', 'Galaxy Tab 8.9', 'tablet'],
'SGH-I957R': ['Samsung', 'Galaxy Tab 8.9', 'tablet'],
'SGH-I987': ['Samsung', 'Galaxy Tab 7.0', 'tablet'],
'SGH-I997': ['Samsung', 'Infuse 4G'],
'SGH-I997R': ['Samsung', 'Infuse 4G'],
'SGH-I9000': ['Samsung', 'Galaxy S'],
'SGH-S730G': ['Samsung', 'SGH-S730'],
'SGH-T499': ['Samsung', 'Dart'],
'SGH-T499V': ['Samsung', 'Galaxy Mini'],
'SGH-T499Y': ['Samsung', 'Galaxy Mini'],
'SGH-T589': ['Samsung', 'Gravity Smart'],
'SGH-T589R': ['Samsung', 'Gravity Smart'],
'SGH-T679': ['Samsung', 'Exhibit II 4G'],
'SGH-T679M': ['Samsung', 'Exhibit II 4G'],
'SGH-T759': ['Samsung', 'Exhibit 4G'],
'SGH-T769': ['Samsung', 'Galaxy S Blaze 4G'],
'SGH-T839': ['Samsung', 'T-Mobile Sidekick'],
'SGH-T849': ['Samsung', 'Galaxy Tab 7.0', 'tablet'],
'SGH-T859': ['Samsung', 'Galaxy Tab 10.1', 'tablet'],
'SGH-T869': ['Samsung', 'Galaxy Tab 7.0 Plus', 'tablet'],
'SGH-T879': ['Samsung', 'Galaxy Note'],
'SGH-T959': ['Samsung', 'Vibrant'],
'SGH-T959D': ['Samsung', 'Galaxy S Fascinate 3G+'],
'SGH-T959P': ['Samsung', 'Galaxy S Fascinate 4G'],
'SGH-T959V': ['Samsung', 'Galaxy S 4G'],
'SGH-T989': ['Samsung', 'Galaxy S II'],
'SGH-T989D': ['Samsung', 'Galaxy S II X'],
'SGH-T999': ['Samsung', 'Galaxy S Blaze 4G'],
'SGH-T999V': ['Samsung', 'Galaxy S Blaze 4G'],
'SHV-E120K': ['Samsung', 'Galaxy S II HD LTE'],
'SHV-E120L': ['Samsung', 'Galaxy S II HD LTE'],
'SHV-E120S': ['Samsung', 'Galaxy S II HD LTE'],
'SHV-E110S': ['Samsung', 'Galaxy S II LTE'],
'SHV-E140S': ['Samsung', 'Galaxy Tab 8.9', 'tablet'],
'SHV-E150S': ['Samsung', 'Galaxy Tab 7.7', 'tablet'],
'SHV-E160K': ['Samsung', 'Galaxy Note'],
'SHV-E160L': ['Samsung', 'Galaxy Note LTE'],
'SHV-E160S': ['Samsung', 'Galaxy Note LTE'],
'SHV-E170K': ['Samsung', 'SHV-E170K'],
'SHV-E170L': ['Samsung', 'SHV-E170L'],
'SHV-E210K': ['Samsung', 'Galaxy S III'],
'SHV-E210L': ['Samsung', 'Galaxy S III'],
'SHV-E210S': ['Samsung', 'Galaxy S III'],
'SHW-M100S': ['Samsung', 'Galaxy A'],
'SHW-M110S': ['Samsung', 'Galaxy S'],
'SHW-M130L': ['Samsung', 'Galaxy U'],
'SHW-M130K': ['Samsung', 'Galaxy K'],
'SHW-M180K': ['Samsung', 'Galaxy Tab', 'tablet'],
'SHW-M180L': ['Samsung', 'Galaxy Tab', 'tablet'],
'SHW-M180S': ['Samsung', 'Galaxy Tab', 'tablet'],
'SHW-M180W': ['Samsung', 'Galaxy Tab', 'tablet'],
'SHW-M185S': ['Samsung', 'Galaxy Tab', 'tablet'],
'SHW-M190S': ['Samsung', 'Galaxy S Hoppin'],
'SHW-M220L': ['Samsung', 'Galaxy Neo'],
'SHW-M240S': ['Samsung', 'Galaxy Ace'],
'SHW-M250K': ['Samsung', 'Galaxy S II'],
'SHW-M250L': ['Samsung', 'Galaxy S II'],
'SHW-M250S': ['Samsung', 'Galaxy S II'],
'SHW-M300W': ['Samsung', 'Galaxy Tab 10.1', 'tablet'],
'SHW-M305W': ['Samsung', 'Galaxy Tab 8.9', 'tablet'],
'SHW-M340S': ['Samsung', 'Galaxy M Style'],
'SHW-M380K': ['Samsung', 'Galaxy Tab 10.1', 'tablet'],
'SHW-M380S': ['Samsung', 'Galaxy Tab 10.1', 'tablet'],
'SHW-M380W': ['Samsung', 'Galaxy Tab 10.1', 'tablet'],
'SHW-M440S': ['Samsung', 'Galaxy S III'],
'SMT-i9100': ['Samsung', 'SMT-I9100', 'tablet'],
'SPH-D600': ['Samsung', 'Conquer 4G'],
'SPH-D700': ['Samsung', 'Epic 4G'],
'SPH-D705': ['Samsung', 'Epic 4G 2'],
'SPH-D710': ['Samsung', 'Epic 4G Touch'],
'SPH-L700': ['Samsung', 'Galaxy Nexus'],
'SPH-L710': ['Samsung', 'Galaxy S III'],
'SPH-M820': ['Samsung', 'Galaxy Prevail'],
'SPH-M820-BST': ['Samsung', 'Galaxy Prevail'],
'SPH-M580': ['Samsung', 'Replenish'],
'SPH-M900': ['Samsung', 'Moment'],
'SPH-M910': ['Samsung', 'Intercept'],
'SPH-M920': ['Samsung', 'Transform'],
'SPH-M930': ['Samsung', 'Transform Ultra'],
'SPH-M930BST': ['Samsung', 'Transform Ultra'],
'SPH-P100': ['Samsung', 'Galaxy Tab', 'tablet'],
'YP-GB1': ['Samsung', 'Galaxy Player', 'media'],
'YP-GB70': ['Samsung', 'Galaxy Player 70', 'media'],
'YP-GB70D': ['Samsung', 'Galaxy Player 70 Plus', 'media'],
'YP-GS1': ['Samsung', 'Galaxy S WiFi 3.6', 'media'],
'YP-G1': ['Samsung', 'Galaxy S WiFi 4.0', 'media'],
'YP-GI1': ['Samsung', 'Galaxy S WiFi 4.2', 'media'],
'YP-G50': ['Samsung', 'Galaxy Player', 'media'],
'YP-G70': ['Samsung', 'Galaxy S WiFi 5.0', 'media'],
'GT9100': ['Samsung', 'Galaxy S II'],
'I897': ['Samsung', 'Captivate'],
'I7500': ['Samsung', 'Galaxy'],
'I9000': ['Samsung', 'Galaxy S'],
'T959': ['Samsung', 'Galaxy S Vibrant'],
'Captivate-I897': ['Samsung', 'Captivate'],
'Galaxy': ['Samsung', 'Galaxy'],
'Galaxy Note': ['Samsung', 'Galaxy Note'],
'GalaxyS': ['Samsung', 'Galaxy S'],
'Galaxy S II': ['Samsung', 'Galaxy S II'],
'Galaxy X': ['Samsung', 'Galaxy X'],
'Galaxy Spica': ['Samsung', 'Galaxy Spica'],
'GALAXY Tab': ['Samsung', 'Galaxy Tab', 'tablet'],
'GALAXY NEXUS': ['Samsung', 'Galaxy Nexus'],
'Vibrantmtd': ['Samsung', 'Vibrant'],
'SC-01C': ['Samsung', 'Galaxy Tab', 'tablet'],
'SC-01D': ['Samsung', 'Galaxy Tab 10.1 LTE', 'tablet'],
'SC-02B': ['Samsung', 'Galaxy S'],
'SC-02C': ['Samsung', 'Galaxy S II'],
'SC-02D': ['Samsung', 'Galaxy Tab 7.0 Plus', 'tablet'],
'SC-03D': ['Samsung', 'Galaxy S II LTE'],
'SC-04D': ['Samsung', 'Galaxy Nexus'],
'SC-05D': ['Samsung', 'Galaxy Note LTE'],
'SC-06D': ['Samsung', 'Galaxy S III'],
'ISW11SC': ['Samsung', 'Galaxy S II WiMAX'],
'GT-S7562': ['Samsung', 'GT-S7562'],
'GT-S7562i': ['Samsung', 'GT-S7562i'],
'A01SH': ['Sharp', 'A01SH'],
'IS01': ['Sharp', 'IS01'],
'IS03': ['Sharp', 'IS03'],
'IS05': ['Sharp', 'IS05'],
'IS11SH': ['Sharp', 'Aquos IS11SH'],
'IS12SH': ['Sharp', 'Aquos IS12SH'],
'IS13SH': ['Sharp', 'Aquos IS13SH'],
'IS14SH': ['Sharp', 'Aquos IS14SH'],
'ISW16SH': ['Sharp', 'Aquos ISW16SH'],
'EB-W51GJ': ['Sharp', 'EB-W51GJ'],
'SBM003SH': ['Sharp', 'Galapagos'],
'SBM005SH': ['Sharp', 'Galapagos'],
'SBM006SH': ['Sharp', 'Aquos'],
'SBM007SH': ['Sharp', 'Aquos 007SH'],
'SBM009SH': ['Sharp', 'Aquos 009SH'],
'SBM102SH': ['Sharp', 'Aquos 102SH'],
'SBM103SH': ['Sharp', 'Aquos 103SH'],
'SBM104SH': ['Sharp', 'Aquos 104SH'],
'SBM107SH': ['Sharp', 'Aquos 107SH'],
'SBM107SHB': ['Sharp', 'Aquos 107SH'],
'SH-01D': ['Sharp', 'Aquos SH-01D'],
'SH-02D': ['Sharp', 'Aquos slider SH-02D'],
'SH-03C': ['Sharp', 'Lynx 3D'],
'SH-06D': ['Sharp', 'Aquos SH-06D'],
'SH-09D': ['Sharp', 'Aquos Zeta SH-09D'],
'SH-10B': ['Sharp', 'Lynx'],
'SH-12C': ['Sharp', 'Aquos'],
'SH-13C': ['Sharp', 'Aquos f SH-13C'],
'SH80F': ['Sharp', 'Aquos SH80F'],
'SH72x8U': ['Sharp', 'SH72x8U'],
'SH8118U': ['Sharp', 'SH8118U'],
'SH8128U': ['Sharp', 'SH8128U'],
'SH8158U': ['Sharp', 'SH8158U'],
'SH8188U': ['Sharp', 'SH8188U'],
'SH8268U': ['Sharp', 'SH8268U'],
'INFOBAR C01': ['Sharp', 'INFOBAR C01'],
'SPX-5': ['Simvalley', 'SPX-5'],
'SPX-5 3G': ['Simvalley', 'SPX-5 3G'],
'SmartQ G7': ['SmartQ', 'G7', 'tablet'],
'SmartQT7': ['SmartQ', 'T7', 'tablet'],
'SmartQT10': ['SmartQ', 'T10', 'tablet'],
'SmartQT15': ['SmartQ', 'T15', 'tablet'],
'SmartQT19': ['SmartQ', 'T19', 'tablet'],
'SmartQT20': ['SmartQ', 'T20', 'tablet'],
'OMS1 6': ['Sony Ericsson', 'A8i'],
'E10a': ['Sony Ericsson', 'Xperia X10 Mini'],
'E10i': ['Sony Ericsson', 'Xperia X10 Mini'],
'E10iv': ['Sony Ericsson', 'Xperia X10 Mini'],
'E15': ['Sony Ericsson', 'Xperia X8'],
'E15a': ['Sony Ericsson', 'Xperia X8'],
'E15i': ['Sony Ericsson', 'Xperia X8'],
'E15iv': ['Sony Ericsson', 'Xperia X8'],
'E15i-o': ['Sony Ericsson', 'Xperia X8'],
'E16i': ['Sony Ericsson', 'W8 Walkman'],
'LT11i': ['Sony Ericsson', 'Xperia Neo V'],
'LT15': ['Sony Ericsson', 'Xperia Arc'],
'LT15a': ['Sony Ericsson', 'Xperia Arc'],
'LT15i': ['Sony Ericsson', 'Xperia Arc'],
'LT15iv': ['Sony Ericsson', 'Xperia Arc'],
'LT15i-o': ['Sony Ericsson', 'Xperia Arc'],
'LT18a': ['Sony Ericsson', 'Xperia Arc S'],
'LT18i': ['Sony Ericsson', 'Xperia Arc S'],
'LT18iv': ['Sony Ericsson', 'Xperia Arc S'],
'LT18i-o': ['Sony Ericsson', 'Xperia Arc S'],
'LT22i': ['Sony', 'Xperia P'],
'LT26i': ['Sony', 'Xperia S'],
'LT26ii': ['Sony', 'Xperia S'],
'LT26i-o': ['Sony', 'Xperia S'],
'LT28at': ['Sony', 'Xperia Ion'],
'LT28h': ['Sony', 'Xperia Ion'],
'LT28i': ['Sony', 'Xperia Ion'],
'LT29i': ['Sony', 'Xperia GX'],
'SonyLT29i': ['Sony', 'Xperia GX'],
'SonyLT30a': ['Sony', 'Xperia Mint'],
'SonyLT30p': ['Sony', 'Xperia Mint'],
'MK16a': ['Sony Ericsson', 'Xperia Pro'],
'MK16i': ['Sony Ericsson', 'Xperia Pro'],
'MT11a': ['Sony Ericsson', 'Xperia Neo V'],
'MT11i': ['Sony Ericsson', 'Xperia Neo V'],
'MT11iv': ['Sony Ericsson', 'Xperia Neo V'],
'MT11i-o': ['Sony Ericsson', 'Xperia Neo V'],
'MT15a': ['Sony Ericsson', 'Xperia Neo'],
'MT15i': ['Sony Ericsson', 'Xperia Neo'],
'MT15iv': ['Sony Ericsson', 'Xperia Neo'],
'MT15i-o': ['Sony Ericsson', 'Xperia Neo'],
'MT25i': ['Sony', 'Xperia Neo L'],
'MT27i': ['Sony', 'Xperia Sola'],
'R800a': ['Sony Ericsson', 'Xperia Play'],
'R800i': ['Sony Ericsson', 'Xperia Play'],
'R800iv': ['Sony Ericsson', 'Xperia Play'],
'R800at': ['Sony Ericsson', 'Xperia Play'],
'R800x': ['Sony Ericsson', 'Xperia Play'],
'SK17a': ['Sony Ericsson', 'Xperia Mini Pro'],
'SK17i': ['Sony Ericsson', 'Xperia Mini Pro'],
'SK17iv': ['Sony Ericsson', 'Xperia Mini Pro'],
'SK17i-o': ['Sony Ericsson', 'Xperia Mini Pro'],
'ST15a': ['Sony Ericsson', 'Xperia Mini'],
'ST15i': ['Sony Ericsson', 'Xperia Mini'],
'ST17a': ['Sony Ericsson', 'Xperia Active'],
'ST17i': ['Sony Ericsson', 'Xperia Active'],
'ST18a': ['Sony Ericsson', 'Xperia Ray'],
'ST18i': ['Sony Ericsson', 'Xperia Ray'],
'ST18iv': ['Sony Ericsson', 'Xperia Ray'],
'ST18av': ['Sony Ericsson', 'Xperia Ray'],
'SonyST21': ['Sony', '\'Tapioca\''],
'SonyST21i': ['Sony', '\'Tapioca\''],
'SonyST21a2': ['Sony', '\'Tapioca\''],
'ST21': ['Sony', '\'Tapioca\''],
'ST21i': ['Sony', '\'Tapioca\''],
'SonyST23i': ['Sony', '\'Tapioca DS\''],
'ST25i': ['Sony', 'Xperia U'],
'ST27i': ['Sony', 'Xperia Go'],
'U20a': ['Sony Ericsson', 'Xperia X10 Mini Pro'],
'U20i': ['Sony Ericsson', 'Xperia X10 Mini Pro'],
'U20iv': ['Sony Ericsson', 'Xperia X10 Mini Pro'],
'WT13i': ['Sony Ericsson', 'Mix Walkman'],
'WT18i': ['Sony Ericsson', 'Walkman'],
'WT19a': ['Sony Ericsson', 'Live with Walkman'],
'WT19i': ['Sony Ericsson', 'Live with Walkman'],
'WT19iv': ['Sony Ericsson', 'Live with Walkman'],
'X8': ['Sony Ericsson', 'Xperia X8'],
'X10': ['Sony Ericsson', 'Xperia X10'],
'X10a': ['Sony Ericsson', 'Xperia X10'],
'X10i': ['Sony Ericsson', 'Xperia X10'],
'X10iv': ['Sony Ericsson', 'Xperia X10'],
'X10S': ['Sony Ericsson', 'Xperia X10'],
'X10mini': ['Sony Ericsson', 'Xperia X10 Mini'],
'X10 Mini': ['Sony Ericsson', 'Xperia X10 Mini'],
'X10 Mini Pro': ['Sony Ericsson', 'Xperia X10 Mini Pro'],
'Z1i': ['Sony Ericsson', 'Xperia Play'],
'S51SE': ['Sony Ericsson', 'Xperia Mini'],
'IS11S': ['Sony Ericsson', 'Xperia Acro'],
'IS12S': ['Sony Ericsson', 'Xperia Acro HD'],
'SO-01B': ['Sony Ericsson', 'Xperia X10'],
'SO-01C': ['Sony Ericsson', 'Xperia Arc'],
'SO-01D': ['Sony Ericsson', 'Xperia Play'],
'SO-02C': ['Sony Ericsson', 'Xperia Acro'],
'SO-02D': ['Sony Ericsson', 'Xperia NX'],
'SO-03C': ['Sony Ericsson', 'Xperia Ray'],
'SO-03D': ['Sony Ericsson', 'Xperia Acro HD'],
'SO-04D': ['Sony', 'Xperia GX'],
'SO-05D': ['Sony', 'Xperia SX'],
'XPERIA X8': ['Sony Ericsson', 'Xperia X8'],
'Xperia X8': ['Sony Ericsson', 'Xperia X8'],
'Xperia X10': ['Sony Ericsson', 'Xperia X10'],
'Xperia ray': ['Sony Ericsson', 'Xperia Ray'],
'Xperia Ray': ['Sony Ericsson', 'Xperia Ray'],
'Xperia Arc': ['Sony Ericsson', 'Xperia Arc'],
'Xperia Mini': ['Sony Ericsson', 'Xperia Mini'],
'Xperia neo': ['Sony Ericsson', 'Xperia Neo'],
'Xperia Neo': ['Sony Ericsson', 'Xperia Neo'],
'XPERIA NEO': ['Sony Ericsson', 'Xperia Neo'],
'Xperia NeoV': ['Sony Ericsson', 'Xperia Neo V'],
'Xperia Neo V': ['Sony Ericsson', 'Xperia Neo V'],
'Xperia Play': ['Sony Ericsson', 'Xperia Play'],
'Sony Ericsson Xperia X1': ['Sony Ericsson', 'Xperia X1'],
'SonyHayabusa': ['Sony', 'Xperia Ion'],
'Hayabusa': ['Sony', 'Xperia Ion'],
'nozomi': ['Sony', 'Xperia S'],
'Sony Tablet P': ['Sony', 'Tablet P', 'tablet'],
'Sony Tablet S': ['Sony', 'Tablet S', 'tablet'],
'NWZ-Z1000Series': ['Sony', 'Walkman Z', 'media'],
'NW-Z1000Series': ['Sony', 'Walkman Z', 'media'],
'Spice Mi280': ['Spice', 'Mi-280'],
'Spice Mi300': ['Spice', 'Mi-300'],
'Spice Mi-310': ['Spice', 'Mi-310'],
'Spice Mi-425': ['Spice', 'Mi-425'],
'SPICE Mi-720': ['Spice', 'Mi-720'],
'A7272+': ['Star', 'A7272+'],
'e1109 v73 gq1002 ctp': ['Star', 'X18i'],
'TS1004T': ['Surf 3Q', 'TS1004T', 'tablet'],
'SYTABEX7-2': ['Sylvania', 'SYTABEX7', 'tablet'],
'TCL A860': ['TCL', 'A860'],
'TCL A906': ['TCL', 'A906'],
'TCL A909': ['TCL', 'A909'],
'TCL A919': ['TCL', 'A919'],
'TCL A990': ['TCL', 'A990'],
'TCL A996': ['TCL', 'A996'],
'TCL A998': ['TCL', 'A998'],
'TCL GENESEE E708': ['TCL', 'Genesee E708'],
'A10t(5DM3)': ['Teclast', 'A10T', 'tablet'],
'P72': ['Teclast', 'P72', 'tablet'],
'P76TI': ['Teclast', 'P76Ti', 'tablet'],
'P81HD': ['Teclast', 'P81HD', 'tablet'],
'P85(R8A1)': ['Teclast', 'P85', 'tablet'],
'T720 SE': ['Teclast', 'T720', 'tablet'],
'T760 from moage.com': ['Teclast', 'T760', 'tablet'],
'tegav2': ['Tegatech', 'TEGA v2', 'tablet'],
'TM-7025': ['teXet', 'TM-7025', 'tablet'],
'MoFing': ['Thomson', 'MoFing', 'tablet'],
'Ultimate10': ['Tomtec', 'Ultimate10', 'tablet'],
'Thl V7': ['THL', 'V7'],
'ThL V7': ['THL', 'V7'],
'ThL V8': ['THL', 'V8'],
'ThL V9': ['THL', 'V9'],
'ThL V11': ['THL', 'V11'],
'TSB CLOUD COMPANION;TOSHIBA AC AND AZ': ['Toshiba', 'Dynabook AZ', 'desktop'],
'TOSHIBA AC AND AZ': ['Toshiba', 'Dynabook AZ', 'desktop'],
'TOSHIBA FOLIO AND A': ['Toshiba', 'Folio 100', 'tablet'],
'T-01C': ['Toshiba', 'Regza T-01C'],
'T-01D': ['Toshiba', 'Regza T-01D'],
'IS04': ['Toshiba', 'Regza IS04'],
'IS11T': ['Toshiba', 'Regza IS11T'],
'AT1S0': ['Toshiba', 'Regza AT1S0'],
'Tostab03': ['Toshiba', 'Regza AT100', 'tablet'],
'AT100': ['Toshiba', 'Regza AT100', 'tablet'],
'AT200': ['Toshiba', 'Regza AT200', 'tablet'],
'AT470': ['Toshiba', 'Regza AT470', 'tablet'],
'AT570': ['Toshiba', 'Regza AT570', 'tablet'],
'AT830': ['Toshiba', 'Regza AT830', 'tablet'],
'Folio 100': ['Toshiba', 'Folio 100', 'tablet'],
'folio100': ['Toshiba', 'Folio 100', 'tablet'],
'THRiVE': ['Toshiba', 'THRiVE', 'tablet'],
'Fantastic T3': ['TWM', 'Fantastic T3'],
'M70014': ['United Star Technology', 'M70014', 'tablet'],
'PS47': ['Velocity Micro', 'Cruz PS47', 'tablet'],
'T301': ['Velocity Micro', 'Cruz T301', 'tablet'],
'Vibo-A688': ['FIH', 'Vibo A688'],
'Videocon-V7500': ['Videocon', 'V7500'],
'GTablet': ['ViewSonic', 'gTablet', 'tablet'],
'GtabComb': ['ViewSonic', 'gTablet', 'tablet'],
'TeamDRH ICS for GTablet': ['ViewSonic', 'gTablet', 'tablet'],
'ViewPad7': ['ViewSonic', 'ViewPad 7', 'tablet'],
'ViewPad 10e': ['ViewSonic', 'ViewPad 10e', 'tablet'],
'VTAB1008': ['Vizio', 'VTAB1008', 'tablet'],
'VTAB3010': ['Vizio', 'VTAB3010', 'tablet'],
'VOTO W5300': ['VOTO', 'W5300'],
'xPAD-70': ['WayteQ', 'xPAD-70', 'tablet'],
'xTAB-70': ['WayteQ', 'xTAB-70', 'tablet'],
'WellcoM-A99': ['WellcoM', 'A99'],
'N12': ['Window', 'N12', 'tablet'],
'N12R': ['Window', 'N12R', 'tablet'],
'N50': ['Window', 'N50', 'tablet'],
'N50DT': ['Window', 'N50DT', 'tablet'],
'N50GT': ['Window', 'N50GT', 'tablet'],
'N50GT A': ['Window', 'N50GT-A', 'tablet'],
'N70': ['Window', 'N70', 'tablet'],
'N70 DUAL CORE': ['Window', 'N70 Dual Core', 'tablet'],
'N80': ['Window', 'N80', 'tablet'],
'N90': ['Window', 'N90', 'tablet'],
'N90 DUAL CORE2 V12': ['Window', 'N90 Dual Core', 'tablet'],
'N612': ['Wishway', 'N612'],
'AT-AS43D': ['Wolfgang', 'AT-AS43D'],
'M12': ['Wopad', 'M12', 'tablet'],
'WM8650': ['WonderMedia', 'WM8650', 'tablet'],
'MI-ONE': ['Xiaomi', 'MI-ONE'],
'MI-ONE C1': ['Xiaomi', 'MI-ONE C1'],
'MI-ONE Plus': ['Xiaomi', 'MI-ONE Plus'],
'MI 1S': ['Xiaomi', 'MI-ONE Plus'],
'MI 1SC': ['Xiaomi', 'MI-ONE 1SC'],
'mione plus': ['Xiaomi', 'MI-ONE Plus'],
'MI-TWO': ['Xiaomi', 'MI-TWO'],
'MI 2': ['Xiaomi', 'MI-TWO'],
'MI 2S': ['Xiaomi', 'MI-TWO Plus'],
'MI 2SC': ['Xiaomi', 'MI-TWO Plus'],
'Q07CL01': ['XVision', 'Q07', 'tablet'],
'N6': ['Yarvik', '210 Tablet', 'tablet'],
'EMR1879': ['Yidong', 'EMR1879', 'tablet'],
'yusun W702': ['Yusun', 'W702'],
'YX-YUSUN E80': ['Yusun', 'E80'],
'zt180': ['Zenithink', 'ZT-180', 'tablet'],
'Jaguar7': ['ZiiLabs', 'Jaguar 7', 'tablet'],
'Ziss Ranger HD': ['Ziss', 'Ranger HD'],
'ZTE Libra': ['ZTE', 'Libra'],
'ZTE-T T9': ['ZTE', 'Light Tab T9', 'tablet'],
'V9': ['ZTE', 'Light Tab V9', 'tablet'],
'V9e+': ['ZTE', 'Light Tab 2', 'tablet'],
'V9A': ['ZTE', 'Light Tab 2', 'tablet'],
'Light Tab 2W': ['ZTE', 'Light Tab 2', 'tablet'],
'Light Tab 2': ['ZTE', 'Light Tab 2', 'tablet'],
'V9C': ['ZTE', 'Light Tab 3', 'tablet'],
'V55': ['ZTE', 'Optik', 'tablet'],
'Acqua': ['ZTE', 'Acqua'],
'Blade': ['ZTE', 'Blade'],
'Blade-V880': ['ZTE', 'Blade'],
'ZTE-U V880': ['ZTE', 'Blade'],
'Blade-opda': ['ZTE', 'Blade'],
'ZTE-BLADE': ['ZTE', 'Blade'],
'ZTE Blade': ['ZTE', 'Blade'],
'ZTE V880': ['ZTE', 'Blade'],
'ZTE-U(V)880+': ['ZTE', 'Blade'],
'V880': ['ZTE', 'Blade'],
'a5': ['ZTE', 'Blade'],
'Blade2': ['ZTE', 'Blade 2'],
'Blade S': ['ZTE', 'Blade S'],
'X500': ['ZTE', 'Score'],
'ZTE-X500': ['ZTE', 'Score'],
'Skate': ['ZTE', 'Skate'],
'ZTE Skate': ['ZTE', 'Skate'],
'ZTE-Skate': ['ZTE', 'Skate'],
'ZTE-SKATE': ['ZTE', 'Skate'],
'ZTE-V960': ['ZTE', 'Skate'],
'ZTE-U V960': ['ZTE', 'Skate'],
'ZTE Racer': ['ZTE', 'Racer'],
'ZTE-RACER': ['ZTE', 'Racer'],
'MTC 916': ['ZTE', 'Racer'],
'Racer': ['ZTE', 'Racer'],
'RacerII': ['ZTE', 'Racer 2'],
'RACERII': ['ZTE', 'Racer 2'],
'ZTE Roamer': ['ZTE', 'Roamer'],
'N860': ['ZTE', 'Warp'],
'N880': ['ZTE', 'Blade'],
'ZTE-T U802': ['ZTE', 'T-U802'],
'ZTE-T U806': ['ZTE', 'T-U806'],
'ZTE-T U812': ['ZTE', 'T-U812'],
'ZTE-T U830': ['ZTE', 'T-U830'],
'ZTE-T U880': ['ZTE', 'T-U880'],
'ZTE T U880': ['ZTE', 'T-U880'],
'ZTE-TU880': ['ZTE', 'T-U880'],
'ZTE-TU900': ['ZTE', 'T-U900'],
'ZTE-T U960': ['ZTE', 'T-U960'],
'ZTE-TU960s': ['ZTE', 'T-U960'],
'ZTE-T U960s': ['ZTE', 'T-U960'],
'ZTE U N720': ['ZTE', 'U-N720'],
'ZTE-U V856': ['ZTE', 'U-V856'],
'ZTE-U V857': ['ZTE', 'U-V857'],
'ZTE-U V881': ['ZTE', 'U-V881'],
'ZTE-U X850': ['ZTE', 'U-X850'],
'ZTE-U X876': ['ZTE', 'U-X876'],
'ZTE-X876': ['ZTE', 'U-X876'],
'ZTE-C R750': ['ZTE', 'C-R750'],
'ZTE-C N600': ['ZTE', 'C-N600'],
'ZTE-C N600+': ['ZTE', 'C-N600'],
'ZTE-C N606': ['ZTE', 'C-N606'],
'ZTE-C N700': ['ZTE', 'C-N700'],
'ZTE-C N760': ['ZTE', 'C-N760'],
'ZTE-C N880': ['ZTE', 'C-N880'],
'ZTE-C N880S': ['ZTE', 'C-N880'],
'ZTE-C N880s': ['ZTE', 'C-N880'],
'ZTE-C X500': ['ZTE', 'C-X500'],
'ZTE-C X920': ['ZTE', 'C-X920'],
'ZXY-ZTE-C X920': ['ZTE', 'C-X920'],
'ZTE GV821': ['ZTE', 'G-V821'],
'ZTE N880E': ['ZTE', 'N880E'],
'ZTE-N880E': ['ZTE', 'N880E'],
'MIUI N880S': ['ZTE', 'N880S'],
'ZTE N882E': ['ZTE', 'N882E'],
'ZTE N855D': ['ZTE', 'N855D'],
'ZTE-N910': ['ZTE', 'N910'],
'E810': ['ZTE', 'E810'],
'u880': ['ZTE', 'U880'],
'ZTE U880E': ['ZTE', 'U880E'],
'U880': ['ZTE', 'U880'],
'ZTE U970': ['ZTE', 'U970'],
'ZTE V768': ['ZTE', 'V768'],
'ZTE-V856': ['ZTE', 'V856'],
'ZTE V877b': ['ZTE', 'V877'],
'ZTE V889D': ['ZTE', 'V889'],
'ZTE-Z990': ['ZTE', 'Z990'],
'ZTEU790': ['ZTE', 'U790'],
'003Z': ['ZTE', 'Softbank 003Z'],
'008Z': ['ZTE', 'Softbank 008Z'],
'009Z': ['ZTE', 'Softbank Star7'],
'i-mobile i691': ['i-Mobile', 'i691'],
'i-mobile i695': ['i-Mobile', 'i695'],
'i-mobile i858': ['i-Mobile', 'i858'],
'i-mobile 3G 8500': ['i-Mobile', '3G 8500'],
'i-mobile I-Note': ['i-Mobile', 'i-Note', 'tablet'],
'Optimus Boston': ['Optimus', 'Boston'],
'Optimus San Francisco': ['Optimus', 'San Francisco'],
'Optimus Monte Carlo': ['Optimus', 'Monte Carlo'],
'Orange Boston': ['Orange', 'Boston'],
'Orange Monte Carlo': ['Orange', 'Monte Carlo'],
'San Francisco': ['Orange', 'San Francisco'],
'San Francisco for Orange': ['Orange', 'San Francisco'],
'Orange San Francisco': ['Orange', 'San Francisco'],
'MOVE': ['T-Mobile', 'MOVE'],
'T-Mobile G1': ['T-Mobile', 'G1'],
'T-Mobile G2': ['T-Mobile', 'G2'],
'T-Mobile G2 Touch': ['T-Mobile', 'G2'],
'LG-P999': ['T-Mobile', 'G2x'],
'LG-E739': ['T-Mobile', 'myTouch'],
'T-Mobile myTouch 3G': ['T-Mobile', 'myTouch 3G'],
'T-Mobile myTouch 3G Slide': ['T-Mobile', 'myTouch 3G Slide'],
'T-Mobile Espresso': ['T-Mobile', 'myTouch 3G Slide'],
'HTC myTouch 3G Slide': ['T-Mobile', 'myTouch 3G Slide'],
'T-Mobile myTouch 4G': ['T-Mobile', 'myTouch 4G'],
'HTC Glacier': ['T-Mobile', 'myTouch 4G'],
'HTC Panache': ['T-Mobile', 'myTouch 4G'],
'myTouch4G': ['T-Mobile', 'myTouch 4G'],
'My Touch 4G': ['T-Mobile', 'myTouch 4G'],
'HTC Mytouch 4G': ['T-Mobile', 'myTouch 4G'],
'HTC My Touch 4G': ['T-Mobile', 'myTouch 4G'],
'HTC mytouch4g': ['T-Mobile', 'myTouch 4G'],
'HTC myTouch 4G Slide': ['T-Mobile', 'myTouch 4G Slide'],
'myTouch 4G Slide': ['T-Mobile', 'myTouch 4G Slide'],
'T-Mobile myTouch Q': ['T-Mobile', 'myTouch Q'],
'LG-C800': ['T-Mobile', 'myTouch Q'],
'Pulse Mini': ['T-Mobile', 'Pulse Mini'],
'Vodafone 845': ['Vodafone', '845 Nova'],
'Vodafone 858': ['Vodafone', '858 Smart'],
'Vodafone 945': ['Vodafone', '945'],
'Vodafone Smart II': ['Vodafone', 'Smart II'],
'SmartTab10': ['Vodafone', 'SmartTab 10', 'tablet'],
'SCH-N719': ['Samsung', 'Galaxy Note II'],
'Coolpad 8190': ['Coolpad', '8190'],
'U705T': ['Oppo', 'Ulike2'],
'Coolpad 8020+': ['Coolpad', '8020'],
'Huawei Y310-5000': ['Huawei', 'Y310'],
'GT-S7572': ['Samsung', 'Galaxy Trend Duos II'],
'Lenovo A278t': ['Lenovo', 'A278t'],
'Lenovo A690': ['Lenovo', 'A690'],
'GT-I8262D': ['Samsung', 'LePhone I8262D'],
'Lenovo A278t': ['Lenovo', 'A278t'],
'MI 2C': ['Xiaomi', 'MI-TWO'],
'Coolpad 8070': ['Coolpad', '8070'],
'R813T': ['Oppo', 'R813T'],
'ZTE U930': ['ZTE', 'U930'],
'Lenovo A360': ['Lenovo', 'LePhone A360'],
'SCH-N719': ['Samsung', 'Galaxy Note II'],
'Coolpad 8010': ['Coolpad', '8010'],
'LENOVO-Lenovo-A288t': ['Lenovo', 'A288t'],
'U701T': ['Oppo', 'U701T'],
'ZTEU795': ['Coolpad', 'U795'],
'Haier-HT-I617': ['Haier', 'I617'],
'ZTEU880s': ['ZTE', 'T-U880'],
'GT-S6352': ['Samsung', 'GT-S6352'],
'GT-S7568': ['Samsung', 'GT-S7568'],
'K-Touch T619+': ['K-Touch', 'T619'],
'MI 2A': ['Xiaomi', 'MI-TWO A'],
'GT-N7108': ['Samsung', 'Galaxy Note II'],
'K-Touch T621': ['K-Touch', 'T621'],
'LENOVO-Lenovo-A298t': ['Lenovo', 'A298'],
'Coolpad 8150': ['Coolpad', '8150'],
'5860S': ['Coolpad', '5860'],
'ZTEU807': ['ZTE', 'U807'],
'SCH-I739': ['Samsung', 'SCH-I739'],
'SCH-I829': ['Samsung', 'SCH-I829'],
'HS-E830': ['Hisense', 'E830'],
'HS-E920': ['Hisense', 'E920'],
'Lenovo S720': ['Lenovo', 'S720'],
'MI 2C': ['Xiaomi', 'MI-TWO'],
'OPPO R813T': ['Oppo', 'R813'],
'SCH-I879': ['Samsung', 'Galaxy Note'],
'GT-S6102E': ['Samsung', 'Galaxy Y Duos'],
'Redmi Note 3': ['Xiaomi', 'Redmi Note 3']
}
BLACKBERRY_MODELS = {
'9600': 'Bold',
'9650': 'Bold',
'9700': 'Bold',
'9780': 'Bold',
'9790': 'Bold',
'9900': 'Bold',
'9930': 'Bold',
'8300': 'Curve',
'8310': 'Curve',
'8320': 'Curve',
'8330': 'Curve',
'8350i': 'Curve',
'8520': 'Curve',
'8530': 'Curve',
'8900': 'Curve',
'9220': 'Curve',
'9300': 'Curve',
'9330': 'Curve',
'9350': 'Curve',
'9360': 'Curve',
'9370': 'Curve',
'9380': 'Curve',
'8100': 'Pearl',
'8110': 'Pearl',
'8120': 'Pearl',
'8130': 'Pearl',
'8220': 'Pearl',
'8230': 'Pearl',
'9100': 'Pearl',
'9105': 'Pearl',
'9530': 'Storm',
'9550': 'Storm',
'9670': 'Style',
'9800': 'Torch',
'9810': 'Torch',
'9850': 'Torch',
'9860': 'Torch',
'9630': 'Tour',
'9981': 'Porsche P'
}
OTHER_BROWSERS = [
{'name': 'AdobeAIR', 'regexp': r'AdobeAIR/([0-9.]*)'},
{'name': 'Awesomium', 'regexp': r'Awesomium/([0-9.]*)'},
{'name': 'Canvace', 'regexp': r'Canvace Standalone/([0-9.]*)'},
{'name': 'Ekioh', 'regexp': r'Ekioh/([0-9.]*)'},
{'name': 'JavaFX', 'regexp': r'JavaFX/([0-9.]*)'},
{'name': 'GFXe', 'regexp': r'GFXe/([0-9.]*)'},
{'name': 'LuaKit', 'regexp': r'luakit'},
{'name': 'Titanium', 'regexp': r'Titanium/([0-9.]*)'},
{'name': 'OpenWebKitSharp', 'regexp': r'OpenWebKitSharp'},
{'name': 'Prism', 'regexp': r'Prism/([0-9.]*)'},
{'name': 'Qt', 'regexp': r'Qt/([0-9.]*)'},
{'name': 'QtEmbedded', 'regexp': r'QtEmbedded'},
{'name': 'QtEmbedded', 'regexp': r'QtEmbedded.*Qt/([0-9.]*)'},
{'name': 'RhoSimulator', 'regexp': r'RhoSimulator'},
{'name': 'UWebKit', 'regexp': r'UWebKit/([0-9.]*)'},
{'name': 'PhantomJS', 'regexp': r'PhantomJS/([0-9.]*)'},
{'name': 'Google Web Preview', 'regexp': r'Google Web Preview'},
{'name': 'Google Earth', 'regexp': r'Google Earth/([0-9.]*)'},
{'name': 'EA Origin', 'regexp': r'Origin/([0-9.]*)'},
{'name': 'SecondLife', 'regexp': r'SecondLife/([0-9.]*)'},
{'name': 'Valve Steam', 'regexp': r'Valve Steam'},
{'name': 'Songbird', 'regexp': r'Songbird/([0-9.]*)'},
{'name': 'Thunderbird', 'regexp': r'Thunderbird/([0-9.]*)'},
{'name': 'Abrowser', 'regexp': r'Abrowser/([0-9.]*)'},
{'name': 'arora', 'regexp': r'[Aa]rora/([0-9.]*)'},
{'name': 'Baidu Browser', 'regexp': r'M?BaiduBrowser/([0-9.]*)', 'flag':re.I},
{'name': 'Camino', 'regexp': r'Camino/([0-9.]*)'},
{'name': 'Canure', 'regexp': r'Canure/([0-9.]*)', 'details': 3},
{'name': 'CometBird', 'regexp': r'CometBird/([0-9.]*)'},
{'name': 'Comodo Dragon', 'regexp': r'Comodo_Dragon/([0-9.]*)', 'details': 2},
{'name': 'Conkeror', 'regexp': r'[Cc]onkeror/([0-9.]*)'},
{'name': 'CoolNovo', 'regexp': r'(?:CoolNovo|CoolNovoChromePlus)/([0-9.]*)', 'details': 3},
{'name': 'ChromePlus', 'regexp': r'ChromePlus(?:/([0-9.]*))?$', 'details': 3},
{'name': 'Daedalus', 'regexp': r'Daedalus ([0-9.]*)', 'details': 2},
{'name': 'Demobrowser', 'regexp': r'demobrowser/([0-9.]*)'},
{'name': 'Dooble', 'regexp': r'Dooble(?:/([0-9.]*))?'},
{'name': 'DWB', 'regexp': r'dwb(?:-hg)?(?:/([0-9.]*))?'},
{'name': 'Epiphany', 'regexp': r'Epiphany/([0-9.]*)'},
{'name': 'FireWeb', 'regexp': r'FireWeb/([0-9.]*)'},
{'name': 'Flock', 'regexp': r'Flock/([0-9.]*)', 'details': 3},
{'name': 'Galeon', 'regexp': r'Galeon/([0-9.]*)', 'details': 3},
{'name': 'Helium', 'regexp': r'HeliumMobileBrowser/([0-9.]*)'},
{'name': 'iCab', 'regexp': r'iCab/([0-9.]*)'},
{'name': 'Iceape', 'regexp': r'Iceape/([0-9.]*)'},
{'name': 'IceCat', 'regexp': r'IceCat ([0-9.]*)'},
{'name': 'Iceweasel', 'regexp': r'Iceweasel/([0-9.]*)'},
{'name': 'InternetSurfboard', 'regexp': r'InternetSurfboard/([0-9.]*)'},
{'name': 'Iron', 'regexp': r'Iron/([0-9.]*)', 'details': 2},
{'name': 'Isis', 'regexp': r'BrowserServer'},
{'name': 'Jumanji', 'regexp': r'jumanji'},
{'name': 'Kazehakase', 'regexp': r'Kazehakase/([0-9.]*)'},
{'name': 'KChrome', 'regexp': r'KChrome/([0-9.]*)', 'details': 3},
{'name': 'K-Meleon', 'regexp': r'K-Meleon/([0-9.]*)'},
{'name': 'Leechcraft', 'regexp': r'Leechcraft(?:/([0-9.]*))?', 'details': 2},
{'name': 'Lightning', 'regexp': r'Lightning/([0-9.]*)'},
{'name': 'Lunascape', 'regexp': r'Lunascape[/| ]([0-9.]*)', 'details': 3},
{'name': 'iLunascape', 'regexp': r'iLunascape/([0-9.]*)', 'details': 3},
{'name': 'Maxthon', 'regexp': r'Maxthon[/ ]([0-9.]*)', 'details': 3},
{'name': 'MiniBrowser', 'regexp': r'MiniBr?owserM/([0-9.]*)'},
{'name': 'MiniBrowser', 'regexp': r'MiniBrowserMobile/([0-9.]*)'},
{'name': 'MixShark', 'regexp': r'MixShark/([0-9.]*)'},
{'name': 'Motorola WebKit', 'regexp': r'MotorolaWebKit/([0-9.]*)', 'details': 3},
{'name': 'NetFront LifeBrowser', 'regexp': r'NetFrontLifeBrowser/([0-9.]*)'},
{'name': 'Netscape Navigator', 'regexp': r'Navigator/([0-9.]*)', 'details': 3},
{'name': 'Odyssey', 'regexp': r'OWB/([0-9.]*)'},
{'name': 'OmniWeb', 'regexp': r'OmniWeb'},
{'name': 'Orca', 'regexp': r'Orca/([0-9.]*)'},
{'name': 'Origyn', 'regexp': r'Origyn Web Browser'},
{'name': 'Palemoon', 'regexp': r'Pale[mM]oon/([0-9.]*)'},
{'name': 'Phantom', 'regexp': r'Phantom/V([0-9.]*)'},
{'name': 'Polaris', 'regexp': r'Polaris/v?([0-9.]*)', 'flag':re.I, 'details': 2},
{'name': 'QtCreator', 'regexp': r'QtCreator/([0-9.]*)'},
{'name': 'QtQmlViewer', 'regexp': r'QtQmlViewer'},
{'name': 'QtTestBrowser', 'regexp': r'QtTestBrowser/([0-9.]*)'},
{'name': 'QtWeb', 'regexp': r'QtWeb Internet Browser/([0-9.]*)'},
{'name': 'QupZilla', 'regexp': r'QupZilla/([0-9.]*)'},
{'name': 'Roccat', 'regexp': r'Roccat/([0-9]\.[0-9.]*)'},
{'name': 'Raven for Mac', 'regexp': r'Raven for Mac/([0-9.]*)'},
{'name': 'rekonq', 'regexp': r'rekonq'},
{'name': 'RockMelt', 'regexp': r'RockMelt/([0-9.]*)', 'details': 2},
{'name': 'Sleipnir', 'regexp': r'Sleipnir/([0-9.]*)', 'details': 3},
{'name': 'SMBrowser', 'regexp': r'SMBrowser'},
{'name': 'Sogou Explorer', 'regexp': r'SE 2.X MetaSr'},
{'name': 'Snowshoe', 'regexp': r'Snowshoe/([0-9.]*)', 'details': 2},
{'name': 'Sputnik', 'regexp': r'Sputnik/([0-9.]*)', 'flag':re.I, 'details': 3},
{'name': 'Stainless', 'regexp': r'Stainless/([0-9.]*)'},
{'name': 'SunChrome', 'regexp': r'SunChrome/([0-9.]*)'},
{'name': 'Surf', 'regexp': r'Surf/([0-9.]*)'},
{'name': 'TaoBrowser', 'regexp': r'TaoBrowser/([0-9.]*)', 'details': 2},
{'name': 'TaomeeBrowser', 'regexp': r'TaomeeBrowser/([0-9.]*)', 'details': 2},
{'name': 'TazWeb', 'regexp': r'TazWeb'},
{'name': 'Viera', 'regexp': r'Viera/([0-9.]*)'},
{'name': 'Villanova', 'regexp': r'Villanova/([0-9.]*)', 'details': 3},
{'name': 'Wavelink Velocity', 'regexp': r'Wavelink Velocity Browser/([0-9.]*)', 'details': 2},
{'name': 'WebPositive', 'regexp': r'WebPositive'},
{'name': 'WebRender', 'regexp': r'WebRender'},
{'name': 'Wyzo', 'regexp': r'Wyzo/([0-9.]*)', 'details': 3},
{'name': 'Zetakey', 'regexp': r'Zetakey Webkit/([0-9.]*)'},
{'name': 'Zetakey', 'regexp': r'Zetakey/([0-9.]*)'}
]
|
[
"woshiluolaifeng@hotmail.com"
] |
woshiluolaifeng@hotmail.com
|
561487ce846747b6d7fb0034befaeceaa9bf589e
|
4ae6e54a01e25d370929b49bbaa91c51b003d41a
|
/wwwroot/app/cgi-bin/AutograderBackEnd.py
|
a5ec7da5cc2a9cd932b912fdd77e998cb02ccbfb
|
[] |
no_license
|
rdasxy/programming-autograder
|
8197a827236dc5384f6f3ceeaf2fbadefdd5506c
|
f885c1cd37721e1cd0b3bf3b49cc44b9adb64d92
|
refs/heads/master
| 2021-01-22T05:33:28.971055
| 2012-12-27T21:53:24
| 2012-12-27T21:53:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,367
|
py
|
# 4th major iteration - refactoring to deal with changed authentication procedures
# and to deal with each problem in parallel.
import os, sys
import subprocess
import random
import string
import winprocess
import win32pipe
import win32file
import pickle
import autograde_utilities
import thread
import Queue
import time
import datetime
import smtplib
import collections
import zipfile
import autograder
def ArchiveResults(JobTriple):
''' Record this attempt in archive.
Gets 3-tuple: Job (itself a named tuple), result (string), error (string, possibly empty)
'''
D = dict()
D['UserID'] = JobTriple[0].UserID
D['CourseNum'] = JobTriple[0].CourseNum
D['ProblemNum'] = JobTriple[0].ProblemNum
D['ProblemID']= JobTriple[0].ProblemID
D['Timestamp'] = JobTriple[0].Timestamp
D['Files']= JobTriple[0].Files
D['Result'] = JobTriple[1]
Path = 'c:/users/public/archive'
Fname = JobTriple[0].UserID + JobTriple[0].CourseNum + "%04d"%JobTriple[0].ProblemID + str(JobTriple[0].Timestamp).replace(' ', '').replace(':','')
Fname = Fname +'.pkl'
Fullname = os.path.join(Path, Fname)
Zipname = os.path.join(Path, 'archive.zip')
F = open(Fullname, 'wb')
pickle.dump(D, F)
F.close()
Z = zipfile.ZipFile(Zipname, 'a', zipfile.ZIP_DEFLATED)
Z.write(Fullname, os.path.basename(Fullname))
Z.close()
os.remove(Fullname)
def EmailResults(AJob, Result, Error):
# includes code from: http://www.mkyong.com/python/how-do-send-email-in-python-via-smtplib/
# setup login information
#print "Emailing results."
prefix = AJob.UserID
if prefix in ('hareb', 'spatzs'):
suffix = '@umkc.edu'
else:
suffix = '@mail.umkc.edu'
Addy = prefix + suffix
gmail_acct = 'umkcautograder@gmail.com'
gmail_pwd = 'SaulAndBrian'
# build message
Body = "\nThis is an automatically generated email from the autograder. Do not reply to this address. "
Body += "Contact the course instructor if you have questions."
Body += "\nHere are the results from your submission for problem %s, %s:\n" % (AJob.ProblemNum, AJob.CourseNum)
Body += Result + '\n' + Error + '\n'
header = 'To:' + Addy + '\n' + 'From: ' + gmail_acct + '\n' + 'Subject:Autograder results \n'
msg = header + Body
# Now deal with the smtp server
smtpserver = smtplib.SMTP("smtp.gmail.com",587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
smtpserver.login(gmail_acct, gmail_pwd)
#print header
smtpserver.sendmail(gmail_acct, Addy, msg)
#print 'done!'
smtpserver.close()
def PostResults(ResultQueue):
''' pull results from queue, deal w/ logging etc.
This function is called as a separate thread. It blocks waiting for things to
be added to the queue; if nothing is added, it blocks until the main process
dies after a 30-sec or so timeout, taking this thread with it.
Queue contains 3-tuples: Job (namedtuple), Result (str), ErrMsg (str, may be empty)'''
# collections.namedtuple(JobType, ['UserID', 'CourseNum', 'ProblemNum', 'ProblemID', 'Timestamp', 'Files'])
while not ResultQueue.empty():
#print "Posting results, line 90"
# TODO: Add code to save student's submission in archive.
NextJob = ResultQueue.get() # this blocks as long as necessary.
ArchiveResults(NextJob)
# NextJob[0].Files = None
autograder.ReportGradeStatus(NextJob[0], NextJob[1])
EmailResults(NextJob[0], NextJob[1], NextJob[2])
def Grade(JobList):
''' called by chron job--gets a named tuple representing the list of pending jobs.
Spins off new threads for dealing with each job. Snoozes a bit, then dies.'''
ResultsQueue = Queue.Queue()
SandboxList = list()
while JobList:
Settings = dict()
ProblemDict = dict()
Job = JobList.pop(0)
if not Job.Files: # Student didn't turn anything in
ResultsQueue.put( (Job, 'SubmissionError', 'No files submitted'))
Settings, ProblemDict = SetUpSubmission(Job)
SandboxList.append(ProblemDict['SandboxDir'])
if not Settings: # Can't set up the problem
ResultsQueue.put( (Job, 'SystemError', "Can't set up problem; see administrator"))
return # and we're out of here.
# Otherwise paths are set up & sandbox is ready.
Settings['cs101'] = HandleSubmission
Settings['cs282'] = HandleMARSSubmission
try:
#IOFiles = ProblemDict['IOPairs']
ProblemDict['FileToRun']=os.path.join(ProblemDict['SandboxDir'], ProblemDict['Run'])
if 'ExtraFiles' in ProblemDict:
Extras = ProblemDict['ExtraFiles']
else:
Extras = []
except KeyError:
os.rmdir(ProblemDict['SandboxDir'])
ResultsQueue.put( (Job, 'SystemError', 'Misread configuration data; see administrator'))
return
#NextJob = JobList.pop(0)
## ReportGradeStatus(NextJob.UserID, NextJob.CourseNum, NextJob.ProblemNum,
## NextJob.Timestamp, 'Submitted')
try:
FuncToRun = Settings[Job.CourseNum]
except KeyError:
print "Course number not found, don't know which language to run."
print "Grade, line 138"
FuncToRun(Job, Settings, ProblemDict, ResultsQueue)
#thread.start_new_thread(HandleSubmission, (Job, Settings, ProblemDict, ResultQueue))
# HandleSubmission will post results to queue. Start 1 thread to handle
# results by pulling them off queue & dealing with them.
PostResults(ResultsQueue)
#thread.start_new_thread(PostResults, (ResultQueue,))
#time.sleep(15) # which should be more than enough for everything to finish.
# When this function ends, all threads and the queue they're operating on
# go away. In the vast majority of cases, they're long since done anyway;
# the producer threads (HandleSubmission) are done and the consumer
# (PostResults) is waiting for results that will never come. But just in case
# something was left over & blocked, the end of function will clean them up.
for Dir in SandboxList:
try:
autograde_utilities.Cleanup(Dir)
os.rmdir(Dir)
except Exception, e: # if anything goes wrong, ignore it; utility script will fix later.
#print e
os.chdir('..')
try:
os.rmdir(Dir)
except Exception, e:
pass #print "Still didn't work.", e
def ReadSystemConfig():
try:
F = open('c:/autograder.ini')
Stuff = dict()
for line in F:
Setting = line.split('=')
if Setting[0]:
Key = Setting[0].strip()
Val=Setting[1].strip()
Stuff[Key] = Val
F.close()
except IOError:
pass
except KeyError:
return None
return Stuff
def ReadProblemINI(ProblemPath):
try:
F=open(os.path.join(ProblemPath, 'template.txt'))
except IOError:
return False
ProblemDict=dict()
for line in F:
if len(line) > 2:
thingy = line.split(':')
if thingy[0]:
Key = thingy[0].strip()
Val=thingy[1].strip()
ProblemDict[Key]=Val
F.close()
# Note: Some things might be lists. Convert them.
try:
SubmitList=[F.lower().strip() for F in ProblemDict['SubmissionFiles'].split()]
ProblemDict['SubmissionFiles']=SubmitList
except KeyError:
pass
try:
ExtraList=[F.lower().strip() for F in ProblemDict['ExtraFiles'].split()]
ExtraPath=os.path.join(ProblemPath, 'ExtraFiles')
Extras = [os.path.join(ExtraPath, F) for F in ExtraList]
ProblemDict['ExtraFiles']=Extras
except KeyError:
pass
try:
SubmitList=[F.lower().strip() for F in ProblemDict['IOPairs'].split()]
TupList = list()
while SubmitList:
try:
(i, o) = SubmitList[0], SubmitList[1]
SubmitList.pop(0)
SubmitList.pop(0)
except IndexError:
pass
else:
TupList.append((i, o))
ProblemDict['IOPairs']=TupList
except KeyError:
pass
try:
IOPath=ProblemDict['IOPath']
except KeyError:
IOPath=''
ProblemDict['IOPath'] = os.path.join(ProblemPath, IOPath)
return ProblemDict
def SetUpSubmission(Job):
Settings = ReadSystemConfig()
if not Settings:
return False, "Can't read system configuration"
ProblemPath=os.path.join(Settings['ProblemPath'], Job.CourseNum, '%04d' % Job.ProblemID)
if not os.path.isdir(ProblemPath):
return False, "Can't find problem directory"
else:
Settings['ProblemPath'] = ProblemPath
ProblemDict=ReadProblemINI(ProblemPath)
if not ProblemDict:
return False, "Can't read problem configuration"
TimeStr = str(Job.Timestamp)
# Sandbox dir looks something like:
# Sandbox\abcxyz02072012-01-17120102030000\stuff goes here
# for problem 0207 submitted by student 'abcxyz' on 2012-01-17 at 12:01:02.030000 PM
# Timestamp is a datetime object, and the string version of it has characters
# that can't be part of a directory path. So fix it.
TempDir = Job.UserID + ('%04d' % Job.ProblemNum) + TimeStr
for ch in ' :.,':
TempDir = TempDir.replace(ch, '')
ProblemDict['SandboxDir'] = os.path.join(Settings['SandboxDir'], TempDir)
try:
os.mkdir(ProblemDict['SandboxDir'])
except WindowsError:
ProblemDict['SandboxDir'] = None
return False, "Can't configure problem."
return Settings, ProblemDict
def HandleSubmission(Job, Settings, ProblemDict, ResultsQueue):
''' handle the traffic-cop aspects of a submission.
Parameters:
Job : The job that we're about to process. a named tuple
ResultsQueue: The queue that we should post results to for later processing.
Actions:
For this problem, retrieve the list of system supplied files (if any) and list of (input,output) tuples.
Feed the HandleFile function the problem, submission, and single (i, o) pairs until either:
All input cases have been handled successfully; or
Any submission has returned anything other than 'Correct.'
If any case returned anything other than 'Correct':
Post this job, Status, ErrMsg to results queue.
Example: job, 'SyntaxError', traceback
or: job, 'OutputError', 'Excessive output detected.'
otherwise:
Post this job, 'Correct', '' to results queue
Returns: Nothing
'''
#InputDir = ProblemDict['IOPath']
# Now process each set of I/O files; continue until all done, or an error is hit.
for IOTuple in ProblemDict['IOPairs']:
if 'Extras' not in ProblemDict:
ProblemDict['Extras'] = None
Res, Err = HandleFile(Job,
os.path.join(ProblemDict['IOPath'], IOTuple[0]),
os.path.join(ProblemDict['IOPath'], IOTuple[1]),
ProblemDict)
if Res != 'Correct':
ResultsQueue.put((Job, Res, Err)) # Post results & exit early
#os.rmdir(ProblemDict['SandboxDir'])
return
# If we're here, then all files were processed correctly.
#autograde_utilities.ReportGradeStatus(StudentID, ProblemID, Res)
ResultsQueue.put( (Job, 'Correct', ''))
#os.rmdir(ProblemDict['SandboxDir'])
return
def HandleMARSSubmission(Job, Settings, ProblemDict, ResultsQueue):
'''
Process one student's submission on one set of input data using MARS.
Parameters:
Job: The named tuple containing, among other things, the files submitted by the student and their contents.
InputFileName: The name (including path if needed) of the ONE file with sample input for this test.
CorrectOutputFileName: The name (including path if needed) of the ONE file with correct output for
the specified input.
FileNameToRun: The name (excluding path) of the ONE file that is to run
to test the student's code. This must be present in Job or
SystemSuppliedFileList.
SystemSuppliedFileList: The (possibly empty or missing) list of other
files (including paths) which are needed to run this problem's code
(class files, driver programs, etc)
Returns:
tuple of strings (Res, Err). Res is a brief description ('Correct',
'Runtime exceeded', etc), and Err is an error message (possibly empty
string).
'''
# set up some labels for later (exit codes)
ExitMsg = {1:'Translation Error', 2:'Time Limit Exceeded', 3:'Windows Error', \
4:'Excessive Output', 5:'Submission Error', 6:'Assembly Error',\
7:'Runtime Error'}
# Make sure we've got everything we're expecting; if we don't, skip all this.
ExpectedFiles = [Filename for (Filename, contents) in Job.Files]
try:
ExpectedFiles += ProblemDict['Extras'] # SystemSuppliedFileList
except (TypeError, KeyError): # if there was no list of other needed files.
pass
Expected = [os.path.basename(name).lower().strip() for name in ExpectedFiles]
if os.path.basename(ProblemDict['Run']).lower().strip() not in Expected:
Res = "File " + ProblemDict['Run'] + " was expected, but not found."
Err = ExitMsg[5]
return Err, Res
# even if we're going ahead, we can free up some memory
del(ExpectedFiles)
del(Expected)
# Create working (temporary) directory, copy files into it
ProblemDict['WritePath'] = os.path.dirname(ProblemDict['FileToRun']) #FileNameToRun)
try:
for f in Job.Files:
Fname = f[0]
Code = f[1]
open(ProblemDict['WritePath']+'/'+os.path.basename(Fname),'w').write(Code)
try:
if ProblemDict['Extras']: # SystemSuppliedFileList:
for f in ProblemDict['Extras']:
Code = open(f).read()
open(ProblemDict['WritePath']+'/'+os.path.basename(f),'w').write(Code)
except KeyError:
pass
except IOError:
return ('SystemError', 'Contact Administrator or Instructor')
# Setup I/O for program we're testing.
Input = open(InputFileName).read()
os.chdir(ProblemDict['WritePath'])
open(os.path.join(ProblemDict['WritePath'], 'input.txt'),'w').write(Input)
In = open('input.txt')
Out = open('output.txt', 'w')
#Err = open('error.txt', 'w')
# Run that sucker!
try:
ExitCode = winprocess.run('java -jar c:\\Mars.jar nc p sm ae6 se7 %s' % ProblemDict['Run'], stdin=In, \
stdout=Out, mSec=5000, desktop='')
except WindowsError, msg:
if 'timeout exceeded' in str(msg):
ExitCode = 2 # time out
elif ExitCode not in (0, 6, 7):
ExitCode = 3 # some other Windows error
# Exit code of 0 indicates no error, as usual.
# Exit code 6 indicates assembly error
# Exit code 7 indicates runtime error
#Done with files.
In.close()
Out.close()
#Err.close()
# Grab output
if os.path.getsize('output.txt') < 5.0e6:
Out = open('output.txt').read()
else: # more than 5 megabytes output, something's wrong
ExitCode = 4 # so set error flag
Out = '' # & set Out to a safe value, but don't touch file.
# grab error message if any.
#Err = open('error.txt').read()
# Cleanup temporary directory
autograde_utilities.Cleanup(ProblemDict['WritePath'])
#os.chdir(StartPath)
# os.rmdir(WritePath)
# Check output for validity.
Correct = str(open(CorrectOutputFileName).read())
Out = Out.replace('\r', '')
Correct = Correct.replace('\r', '')
try:
Result = ExitMsg[ExitCode]
except KeyError:
Result = autograde_utilities.CompareWithFormatting(Correct, Out)
return Result, ''
def HandleFile(Job, InputFileName, CorrectOutputFileName, ProblemDict): #FileNameToRun, SystemSuppliedFileList=None):
'''
Process one student's submission on one set of input data.
Parameters:
Job: The named tuple containing, among other things, the files submitted by the student and their contents.
InputFileName: The name (including path if needed) of the ONE file with sample input for this test.
CorrectOutputFileName: The name (including path if needed) of the ONE file with correct output for
the specified input.
FileNameToRun: The name (excluding path) of the ONE file that is to run
to test the student's code. This must be present in Job or
SystemSuppliedFileList.
SystemSuppliedFileList: The (possibly empty or missing) list of other
files (including paths) which are needed to run this problem's code
(class files, driver programs, etc)
Returns:
tuple of strings (Res, Err). Res is a brief description ('Correct',
'Runtime exceeded', etc), and Err is an error message (possibly empty
string).
'''
# set up some labels for later (exit codes)
ExitMsg = {1:'Translation Error', 2:'Time Limit Exceeded', 3:'Windows Error', \
4:'Excessive Output', 5:'Submission Error'}
# Make sure we've got everything we're expecting; if we don't, skip all this.
ExpectedFiles = [Filename for (Filename, contents) in Job.Files]
try:
ExpectedFiles += ProblemDict['Extras'] # SystemSuppliedFileList
except (TypeError, KeyError): # if there was no list of other needed files.
pass
Expected = [os.path.basename(name).lower().strip() for name in ExpectedFiles]
if os.path.basename(ProblemDict['Run']).lower().strip() not in Expected:
Res = "File " + ProblemDict['Run'] + " was expected, but not found."
Err = ExitMsg[5]
return Err, Res
# even if we're going ahead, we can free up some memory
del(ExpectedFiles)
del(Expected)
# Create working (temporary) directory, copy files into it
ProblemDict['WritePath'] = os.path.dirname(ProblemDict['FileToRun']) #FileNameToRun)
try:
for f in Job.Files:
Fname = f[0]
Code = f[1]
open(ProblemDict['WritePath']+'/'+os.path.basename(Fname),'w').write(Code)
if ProblemDict['Extras']: # SystemSuppliedFileList:
for f in ProblemDict['Extras']:
Code = open(f).read()
open(ProblemDict['WritePath']+'/'+os.path.basename(f),'w').write(Code)
except IOError:
return ('SystemError', 'Contact Administrator or Instructor')
# Setup I/O for program we're testing.
Input = open(InputFileName).read()
os.chdir(ProblemDict['WritePath'])
open(os.path.join(ProblemDict['WritePath'], 'input.txt'),'w').write(Input)
In = open('input.txt')
Out = open('output.txt', 'w')
Err = open('error.txt', 'w')
# Run that sucker!
try:
ExitCode = winprocess.run('python %s' % ProblemDict['Run'], stdin=In, \
stdout=Out, stderr=Err, mSec=5000, desktop='')
except WindowsError, msg:
if 'timeout exceeded' in str(msg):
ExitCode = 2 # time out
else:
ExitCode = 3 # some other Windows error
# Exit code of 0 indicates no error, as usual.
#Done with files.
In.close()
Out.close()
Err.close()
# Grab output
if os.path.getsize('output.txt') < 5.0e6:
Out = open('output.txt').read()
else: # more than 5 megabytes output, something's wrong
ExitCode = 4 # so set error flag
Out = '' # & set Out to a safe value, but don't touch file.
# grab error message if any.
Err = open('error.txt').read()
# Cleanup temporary directory
autograde_utilities.Cleanup(ProblemDict['WritePath'])
#os.chdir(StartPath)
# os.rmdir(WritePath)
# Check output for validity.
Correct = str(open(CorrectOutputFileName).read())
Out = Out.replace('\r', '')
Correct = Correct.replace('\r', '')
try:
Result = ExitMsg[ExitCode]
except KeyError:
Result = autograde_utilities.CompareWithFormatting(Correct, Out)
return Result, Err
def RunTest():
JobType = collections.namedtuple('JobType', ['UserID', 'CourseNum', 'ProblemNum', 'ProblemID', 'Timestamp', 'Files'])
JobList = list()
UserID = 'hareb'
CourseNum="CS101"
ProblemNum='1'
ProblemID='0102'
Timestamp=str(time.localtime())
f = open('c:/users/public/problems/cs101/0102/example0102.py').read()
Files = list()
Files.append( ('solution.py', f))
Job = JobType(UserID, CourseNum, ProblemNum, ProblemID, Timestamp, Files)
JobList.append(Job)
f = open('c:/users/public/problems/cs101/0103/example0103.py').read()
Files = list()
Files.append( ('example0103.py', f) )
Timestamp = str(time.localtime())
Job = JobType(UserID, CourseNum, '002', '0103', Timestamp, Files)
JobList.append(Job)
Grade( JobList )
# print "Done."
if __name__ == '__main__':
connection = autograder.getConnection()
Cursor = connection.cursor()
cmd = """UPDATE Jobs SET Status = 'pending' WHERE SequenceNumber = 21"""
Cursor.execute(cmd)
connection.commit()
connection.close()
Jobs = autograder.getJobs()
Grade(Jobs)
#RunTest()
##
## OK, Res, Err = HandleSubmission(1, '0102', ['example0102.py'])
## print "Your result:", Res
## if Err:
## print "Error message:\n", Err
##
## if OK:
## print '\tNeed to update database if this is first success on this problem.'
## else:
## print '\tNeed to update database if this is first attempt on this problem.'
##
|
[
"rdasxy@gmail.com"
] |
rdasxy@gmail.com
|
a2ff8efb83a37d60e0d1299f437db3a37bd87b9a
|
1d943d6daf9c25a9737663091d81bb08a6de6ef6
|
/main.py
|
4ae6d7ee999c4d9cb970c7803bfe124add29fd61
|
[] |
no_license
|
Steveineiter/A-Star_visualization
|
1b29cbdc0dd3dafbc69d467a9cd2446a04d25336
|
50c24bdcbc3c85650dbe2459648bc20b73da08a9
|
refs/heads/main
| 2023-01-01T00:34:41.195400
| 2020-10-29T15:58:18
| 2020-10-29T15:58:18
| 308,378,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,684
|
py
|
try:
import pygame
import sys
import math
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
except:
pass
# Game field
WIDTH, HEIGHT = (900, 900)
WINDOW = pygame.display.set_mode((WIDTH, HEIGHT))
# Colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
PURPLE = (148, 62, 143)
# Utility
NUMBER_OF_ROWS = 30
NUMBER_OF_COLUMNS = 30
PIXEL_PER_BOX_WIDTH = WIDTH / NUMBER_OF_ROWS
PIXEL_PER_BOX_HEIGHT = HEIGHT / NUMBER_OF_COLUMNS
BOX_SIZE = 3
# TODO poder if we need this
grid = []
# Mit pygame machen wir die visualisieriung mit tkinter die eingabe usw
def start_up():
# Creating 2D Array
global grid
grid = [[0 for i in range(NUMBER_OF_ROWS)] for j in range(NUMBER_OF_COLUMNS)] # same as the next few lines
# Creating Spots
for i in range(NUMBER_OF_ROWS):
for j in range(NUMBER_OF_COLUMNS):
grid[i][j] = BoxInGrid(i, j)
# Set start and end node
start = grid[5][5]
end = grid[NUMBER_OF_ROWS - 6][NUMBER_OF_COLUMNS - 6]
start.color = end.color = PURPLE
start.is_changeable = end.is_changeable = False
def add_box_neighbor():
pass
class BoxInGrid:
def __init__(self, x, y):
self.x = x
self.y = y
self.color = WHITE
self.is_blocked = False
self.is_changeable = True
def draw(self, window, box_with):
pygame.draw.rect(window, self.color, (self.x * PIXEL_PER_BOX_WIDTH, self.y * PIXEL_PER_BOX_HEIGHT, 10, 10), box_with)
def redraw_window():
# print(grid)
for row in grid:
for box in row:
box.draw(WINDOW, BOX_SIZE)
pygame.display.update()
def handle_mouse_press(mouse_position):
x_axis, y_axis = mouse_position
x_pos = x_axis // PIXEL_PER_BOX_WIDTH
y_pos = y_axis // PIXEL_PER_BOX_HEIGHT
access_point = grid[int(x_pos)][int(y_pos)]
if not access_point.is_blocked and access_point.is_changeable:
access_point.color = BLUE
access_point.is_blocked = True
if __name__ == '__main__':
run = True
fps = 60
clock = pygame.time.Clock()
start_up()
box_in_grid = BoxInGrid(100, 100)
while(run):
clock.tick(fps)
redraw_window()
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if pygame.mouse.get_pressed()[0]:
mouse_position = pygame.mouse.get_pos()
handle_mouse_press(mouse_position)
if pygame.key.get_pressed()[pygame.K_RETURN]:
print("yes your majesti")
run = False
add_box_neighbor()
|
[
"noreply@github.com"
] |
noreply@github.com
|
ba730b6a4b4982aa4ff13b5059b8122ad718b1b3
|
9d615b7174eecd4c8401513ca8cc21fc498fef5a
|
/api/views.py
|
457826ebef472c327e82113a59326e527f03c40c
|
[] |
no_license
|
AlexeySub/prephack
|
ef13e637da181cd6e46d0ace20bd79c9438fba21
|
fe971421438d66f59cb2ce977dc736573c2e9ea3
|
refs/heads/master
| 2020-05-05T12:31:41.571292
| 2019-04-13T08:00:28
| 2019-04-13T08:00:28
| 180,032,825
| 0
| 0
| null | 2019-04-07T22:45:46
| 2019-04-07T22:45:46
| null |
UTF-8
|
Python
| false
| false
| 2,920
|
py
|
from api.models import User, UserAuthen, Message
from rest_framework import renderers, parsers
from django.views import View
from django.http import HttpResponse
from django.contrib.auth.hashers import make_password
from django.core import exceptions
from django import db
import jwt, time
from django.shortcuts import render
class UserRegister(View):
def post(self, request):
data = parsers.JSONParser().parse(request)
data['password'] = make_password(data['password'], salt='123')
user = User(name=data['username'].lower(), password=data['password'], email=data['email'].lower(), userType=data['usertype'])
try:
user.save()
except db.IntegrityError:
return HttpResponse('Conflict', status=409)
return HttpResponse('OK', status=200)
class UserAuth(View):
def post(self, request):
data = parsers.JSONParser().parse(request)
print(data['login'].lower())
try:
user = User.objects.get(name=data['login'].lower())
except exceptions.ObjectDoesNotExist:
return HttpResponse('Unauthorized', status=401)
if user.password == make_password(data['password'], salt='123'):
authtoken = jwt.encode(data, 'secret', algorithm='HS256').decode('UTF-8')
userAuth = UserAuthen(user_id=user.id, token=authtoken, is_authenticated=True)
userAuth.save()
return HttpResponse(renderers.JSONRenderer().render({'auth_token': authtoken}))
else:
return HttpResponse('Unauthorized', status=401)
class UserLogout(View):
def post(self, request):
data = parsers.JSONParser().parse(request)
userAuth = UserAuthen.objects.get(token=data['auth_token'])
userAuth.is_authenticated=False
userAuth.save()
return HttpResponse('Ok')
class Chat(View):
def post(self, request):
data = parsers.JSONParser().parse(request)
try:
jwt.decode(data['auth_token'], 'secret', algorithm='HS256')
except jwt.InvalidSignatureError:
return HttpResponse('Unathorized', status=401)
message = Message(user_id=UserAuthen.objects.get(token=data['auth_token']).user_id, text=data['text'])
try:
message.save()
except db.IntegrityError:
return HttpResponse('Conflict', status=409)
return HttpResponse('OK', status=200)
def get(self, request):
data = parsers.JSONParser().parse(request)
try:
jwt.decode(data['auth_token'], 'secret', algorithm='HS256')
except jwt.InvalidSignatureError:
return HttpResponse('Unathorized', status=401)
chat = Message.objects.all().filter(user_id=User.objects.get(name=data['login']).id)
return HttpResponse(renderers.JSONRenderer().render(chat.text))
def index(request):
return render(request, 'index.html')
|
[
"mr.317676@gmail.com"
] |
mr.317676@gmail.com
|
4c481d87668445176f6e0368afd5521ee3954e1e
|
395828af169b8d808057d16a399db7ef0f3bd11c
|
/first_django/blog/migrations/0003_auto_20200204_2042.py
|
29a281decf055703fc2480a376ed28e6059e50b4
|
[] |
no_license
|
yeonghan/yozora
|
88103cea289dfd4cbad8a120d822db83bf52a1eb
|
58174a2a7d5dab04f5736243e0789d628d250fc7
|
refs/heads/master
| 2020-05-23T01:41:30.325966
| 2020-02-24T15:51:22
| 2020-02-24T15:51:22
| 47,525,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
# Generated by Django 3.0.2 on 2020-02-04 11:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20200128_2032'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='Post',
new_name='post',
),
]
|
[
"dudgks29@naver.com"
] |
dudgks29@naver.com
|
2870cf1b425dae0d303dc3b910f2b3820bac2b3e
|
c1abf5c7dd599b25d84c2026f97eaccd03dc4e46
|
/movedown.py
|
1da89fb7214f214508b7e7d52b1b88c29c20d425
|
[
"MIT"
] |
permissive
|
oknalv/linky
|
09768abe96d95f2dcb67ff91c22663a4a69356cb
|
78fba19946e2212b10f3d1a5b27c7d9329556290
|
refs/heads/master
| 2016-09-13T01:33:29.192646
| 2016-04-29T15:41:13
| 2016-04-29T15:41:13
| 57,393,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,606
|
py
|
import webapp2
from base import BaseHandler
from link import Link, Container
from google.appengine.api import users
import time
class MoveDownHandler(BaseHandler):
def __init__(self, request = None, response = None):
self.initialize( request, response )
def get(self):
if not self.request.get("id"):
self.set_flash("danger", "forbidden-access")
self.redirect("/")
else:
user = users.get_current_user()
if user:
containers = Container.query(Container.user == user)
cont = None
if not containers.iter().has_next():
cont = Container(user = user)
cont.put()
else:
cont = containers.iter().next()
actual = None
for ind, link in enumerate(cont.links):
if link.name == self.request.get("id"):
actual = ind
break
if actual is not None and actual < len(cont.links):
cont.links[actual], cont.links[actual + 1] = cont.links[actual + 1], cont.links[actual]
cont.put()
time.sleep(1)
self.redirect("/")
else:
self.set_flash("danger", "not-logged-in")
self.redirect("/")
config = {}
config['webapp2_extras.sessions'] = {
'secret_key': 'merely remarkable came line',
}
app = webapp2.WSGIApplication([
('/movedown', MoveDownHandler)
], debug = True, config = config)
|
[
"thevlanko@gmail.com"
] |
thevlanko@gmail.com
|
d481c060b21ebf733f9e03348fe8dbb008dcb1a0
|
0f8e3eb9c3405409418428148f97f93627a886a5
|
/gui.py
|
4e644ecf6068f86453d27bbbc2aea5287e181a51
|
[] |
no_license
|
shikharsrivastava/Bot-Environment
|
dfd4a41ffc614f46eea129d102ba4441de39eae2
|
0afbfb61baae49ebe3bb22b3c257a251913c04e6
|
refs/heads/master
| 2020-06-12T15:44:21.965327
| 2018-01-30T08:01:53
| 2018-01-30T08:01:53
| 75,796,633
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,020
|
py
|
import pygame,sys
from pygame.locals import *
import random
import time
from math import log
import subprocess
pygame.init()
DISPLAY = pygame.display.set_mode((800,800))
pygame.display.set_caption('Checkers')
BLACK = (0,0,0)
WHITE = (255, 255, 255)
RED= (255,0,0)
GREEN = (0, 255,0)
BLUE = (0,0, 255)
AQUA=(0, 255, 255)
FUCHSIA=(255,0, 255)
GRAY=(128, 128, 128)
OLIVE=(128, 128,0)
PURPLE=(128,0, 128)
YELLOW=(255, 255,0)
TEAL=( 0, 128, 128)
row=8
col=8
WIDTH=120
INITIAL_X=120
INITIAL_Y=80
SIDE=80
DISPLAY.fill(WHITE)
col1=WHITE
col2=BLACK
currentColor = col1
colboard=[]
bbw = 0
bbb = 0
side = 'W'
def convertPos(pos):
row=pos/8
col=pos%8
return ((7-row)*8+col)
def buildSquare(i,j):
pygame.draw.rect(DISPLAY,currentColor,(i-SIDE/2,j-SIDE/2,SIDE,SIDE))
def init():
global currentColor;
pygame.draw.rect(DISPLAY,BLACK,(INITIAL_X-SIDE/2,INITIAL_Y-SIDE/2,SIDE*8,SIDE*8),5)
for i in range(0,8):
if i%2==0:
currentColor=col1
else:
currentColor=col2
for j in range(0,8):
x=INITIAL_X+j*SIDE
y=INITIAL_Y+i*SIDE
buildSquare(x,y)
colboard.append(currentColor)
currentColor=col2 if currentColor==col1 else col1
def first():
black = 0
for i in range(1, 8, 2):
row = i / 8
col = i % 8
pos = convertPos(i)
black |= (1 << pos)
for i in range(8, 15, 2):
row = i / 8
col = i % 8
black |= (1 << convertPos(i))
for i in range(17, 24, 2):
row = i / 8
col = i % 8
black |= (1 << convertPos(i))
white = 0;
for i in range(40, 47, 2):
row = i / 8
col = i % 8
white |= (1 << convertPos(i))
for i in range(49, 56, 2):
row = i / 8
col = i % 8
white |= (1 << convertPos(i))
for i in range(56, 63, 2):
row = i / 8
col = i % 8
white |= (1 << convertPos(i))
return (white, black)
def genFen(white, black, side):
board = [[0 for _ in range(8)] for _ in range(8)]
while white > 0:
pos = int(log(white & -white, 2))
pos = convertPos(pos)
row = pos / 8
col = pos % 8
board[row][col] = 1
white -= white & -white
while black > 0:
pos = int(log(black & -black, 2))
pos = convertPos(pos)
row = pos / 8
col = pos % 8
board[row][col] = 2
black -= black & -black
fen = ''
for i in range(8):
row = ''
count = 0
for j in range(8):
if board[i][j] == 0:
count += 1
elif board[i][j] == 1:
if count > 0:
row = row + str(count)
row = row + 'D'
count = 0
elif board[i][j] == 2:
if count > 0:
row = row + str(count)
row = row + 'd'
count = 0
if count > 0:
row = row + str(count)
row = row + '/'
fen = fen + row
fen = fen + side
return fen
def makeBoard(white, black):
init()
while white > 0:
pos = int(log(white & - white, 2))
pos = convertPos(pos)
row = pos / 8
col = pos % 8
pygame.draw.circle(DISPLAY, GREEN, (INITIAL_X+col*SIDE, INITIAL_Y+row*SIDE), SIDE/3)
white -= white & (-white)
while black > 0:
pos = int(log(black & - black, 2))
pos = convertPos(pos)
row = pos / 8
col = pos % 8
pygame.draw.circle(DISPLAY, RED, (INITIAL_X+col*SIDE, INITIAL_Y+row*SIDE), SIDE/3)
black -= black & (-black)
def isValid(move, exe):
fen = genFen(bbw, bbb, side)
out = subprocess.check_output([exe, 'fen', fen, 'isvalid', move])
out = out.split()
if out[0] == '0':
return False
else:
bbw, bbb = int(out[1]), int(out[2])
return True
def bestMove(bw, bb, side, exe):
fen = genFen(bw, bb, side)
out = subprocess.check_output([exe, 'fen', fen, 'best'])
out = out.split()
return map(int, out)
def botfight():
bw, bb = first()
makeBoard(bw, bb)
pygame.display.update()
side = 'W'
while True:
bw, bb = bestMove(bw, bb, side, './a.out')
if bw == -1 and bb == -1:
print "Game over, {} loses".format(side)
break
else:
makeBoard(bw, bb)
pygame.display.update()
time.sleep(0.5)
side = 'B' if side == 'W' else 'W'
if __name__ == "__main__":
botfight()
"""
white, black = first()
bbw = white
bbb = black
makeBoard(bbw, bbb)
print(genFen(bbw, bbb, 'W'))
prev = None
killed = 0
move = 0
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit(0)
elif event.type==MOUSEBUTTONDOWN:
mousePos=list(pygame.mouse.get_pos())
mousePos[0]-=INITIAL_X-SIDE/2;
mousePos[1]-=INITIAL_Y-SIDE/2;
col=mousePos[0]/SIDE
row=mousePos[1]/SIDE
if 0<=row<=7 and 0<=col<=7:
print row, col
if prev == None:
pos = convertPos(row*8+col)
move |= pos
prev = pos
print "prev = ", prev
else:
cur = convertPos(row*8+col)
print cur, prev
if cur - prev == 7 or cur - prev == 9:
print "hello"
move |= (cur << 6)
elif cur - prev == 14:
killed += 1
s = 0
for i in range(6, 16):
s += (1 << i)
move = move & (~s)
move |= (cur << 6)
move |= (killed << 12)
elif cur - prev == 18:
move |= (1 << (killed + 17))
killed += 1
s = 0
for i in range(6, 16):
s += (1 << i)
move = move & (~s)
move |= (cur << 6)
move |= (killed << 12)
else:
s = 0
for i in range(6, 12):
s += (1 << i)
move = move & (~s)
move |= (cur << 6)
prev = cur
if (isValid(move, './a.out')):
makeBoard(bbw, bbb)
print((move & 0x3f), ((move >> 6) & 0x3f), ((move >> 12) & 0xf))
killed = ((move >> 12) & 0xf)
for i in range(killed):
print ((move >> (17+i)) & 1)
else:
prev = None
killed = 0
move = 0
pygame.display.update()
"""
|
[
"noreply@github.com"
] |
noreply@github.com
|
4ce257895e21ccb0c844c1e6aa51c30a9ac4fe4d
|
202f3112b74e0c46f906c95a3914e24a734aa5ea
|
/polls/models.py
|
d6bc4118f5fb516f0ed4046cca01b3b02017e094
|
[] |
no_license
|
markadeev/djangoapp
|
36e19cfac9cff07bffec5f54a903b4ee2d64dded
|
ac94bfff12a47994f0d2af3a924ef76430d1bf80
|
refs/heads/master
| 2021-01-06T14:17:23.121078
| 2020-02-20T12:01:52
| 2020-02-20T12:01:52
| 241,357,222
| 0
| 0
| null | 2020-02-20T12:01:54
| 2020-02-18T12:37:04
|
Python
|
UTF-8
|
Python
| false
| false
| 643
|
py
|
import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.question_text
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
|
[
"markadeev@ukr.net"
] |
markadeev@ukr.net
|
cb1af45e6576cfaa0436075b85515ae1b2b235e3
|
bd5e4b1317e741e2c241a7285f632e2beefb8cf4
|
/bdd_example/settings.py
|
8e9ebdb9d57d1d0f441518deb6fff4b0a41d7f97
|
[] |
no_license
|
asleao/bdd-django-tutorial-2
|
c7e5c19af55c31097d7f705568f7b18b8405acd3
|
fe79cf4dded6328290ce34a8b221b8276bc733cb
|
refs/heads/master
| 2020-12-05T17:43:59.062935
| 2016-08-23T14:44:35
| 2016-08-23T14:44:35
| 66,375,593
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,143
|
py
|
"""
Django settings for bdd_example project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_^o_m+ne4=ht(efkt$dpd40-%px!qs++w#g(x8$0%2aa_qj)2@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'behave_django',
'login',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bdd_example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bdd_example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
[
"andre.sp.leao@gmail.com"
] |
andre.sp.leao@gmail.com
|
bd143b97ac92cf6eef6bbe7e91edb34eafbf4540
|
60e9be8297b98075afb304ebae929f9cac30cf42
|
/leetCode/Array/Easy/K-diff Pairs in an Array.py
|
95442868e44479b644d955b4d724e4873131c538
|
[] |
no_license
|
sifact/Leet-Code-Problems
|
e4bbd0ab2d1349de32521650c9eeaa5ad3b8085f
|
eb62e8407dd0931841fbbb351aca5c415c226a07
|
refs/heads/main
| 2023-01-30T05:07:57.904604
| 2020-11-30T16:54:19
| 2020-11-30T16:54:19
| 317,285,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
from collections import Counter
def findPairs(nums, k):
# Count the elements with Counter
# if k > 0 for each element i, check if i + k exist
# if k == 0 for each element i, check if count[i] > 1
hash_map = Counter(nums)
count = 0
for key in hash_map:
if k > 1 and key + k in hash_map or k == 0 and hash_map[key] > 1:
count += 1
return count
# Generator expression
def findPairs2(nums, k):
hash_map = Counter(nums)
return sum(k > 0 and key + k in hash_map or k == 0 and hash_map[key] > 1 for key in hash_map)
a = list(map(int, input().split()))
num = int(input())
print(findPairs2(a, num))
|
[
"noreply@github.com"
] |
noreply@github.com
|
1123236231c7d7542bb38bab826fbc2184d101e5
|
01b77be351755b7f2b49d40744751cf22f3953cf
|
/tools/json_schema_compiler/compiler.py
|
38235e07f9c9833705f99c341b718ad1db3fdb11
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
bwahn/Havana
|
4159876f98850fbfe873ccaaa3dc38739537e9f3
|
5e8bc991ea7e251e98efb6e54e0b8573e5503aa6
|
refs/heads/master
| 2020-05-31T21:40:08.597468
| 2013-09-03T15:40:14
| 2013-09-03T15:40:14
| 12,556,726
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,190
|
py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generator for C++ structs from api json files.
The purpose of this tool is to remove the need for hand-written code that
converts to and from base::Value types when receiving javascript api calls.
Originally written for generating code for extension apis. Reference schemas
are in chrome/common/extensions/api.
Usage example:
compiler.py --root /home/Work/src --namespace extensions windows.json
tabs.json
compiler.py --destdir gen --root /home/Work/src
--namespace extensions windows.json tabs.json
"""
import cc_generator
import cpp_type_generator
import h_generator
import idl_schema
import json_schema
import model
import schema_bundle_generator
import optparse
import os.path
import sys
def load_schema(schema):
schema_filename, schema_extension = os.path.splitext(schema)
if schema_extension == '.json':
api_defs = json_schema.Load(schema)
elif schema_extension == '.idl':
api_defs = idl_schema.Load(schema)
else:
sys.exit("Did not recognize file extension %s for schema %s" %
(schema_extension, schema))
return api_defs
def handle_single_schema(filename, dest_dir, root, root_namespace):
schema = os.path.normpath(filename)
schema_filename, schema_extension = os.path.splitext(schema)
api_defs = load_schema(schema)
api_model = model.Model()
for target_namespace in api_defs:
referenced_schemas = target_namespace.get('dependencies', [])
# Load type dependencies into the model.
# TODO(miket): do we need this in IDL?
for referenced_schema in referenced_schemas:
referenced_schema_path = os.path.join(
os.path.dirname(schema), referenced_schema + '.json')
referenced_api_defs = json_schema.Load(referenced_schema_path)
for namespace in referenced_api_defs:
api_model.AddNamespace(namespace,
os.path.relpath(referenced_schema_path, opts.root))
# Gets the relative path from opts.root to the schema to correctly determine
# the include path.
relpath = os.path.relpath(schema, opts.root)
namespace = api_model.AddNamespace(target_namespace, relpath)
if not namespace:
continue
# The output filename must match the input filename for gyp to deal with it
# properly.
out_file = namespace.name
type_generator = cpp_type_generator.CppTypeGenerator(
root_namespace, namespace, namespace.unix_name)
for referenced_namespace in api_model.namespaces.values():
if referenced_namespace == namespace:
continue
type_generator.AddNamespace(
referenced_namespace,
referenced_namespace.unix_name)
h_code = (h_generator.HGenerator(namespace, type_generator)
.Generate().Render())
cc_code = (cc_generator.CCGenerator(namespace, type_generator)
.Generate().Render())
if dest_dir:
with open(
os.path.join(dest_dir, namespace.source_file_dir, out_file + '.cc'),
'w') as cc_file:
cc_file.write(cc_code)
with open(
os.path.join(dest_dir, namespace.source_file_dir, out_file + '.h'),
'w') as h_file:
h_file.write(h_code)
else:
print '%s.h' % out_file
print
print h_code
print
print '%s.cc' % out_file
print
print cc_code
def handle_bundle_schema(filenames, dest_dir, root, root_namespace):
# Merge the source files into a single list of schemas.
api_defs = []
for filename in filenames:
schema = os.path.normpath(filename)
schema_filename, schema_extension = os.path.splitext(schema)
api_defs.extend(load_schema(schema))
api_model = model.Model()
relpath = os.path.relpath(os.path.normpath(filenames[0]), root)
for target_namespace in api_defs:
api_model.AddNamespace(target_namespace, relpath)
type_generator = cpp_type_generator.CppTypeGenerator(root_namespace)
for referenced_namespace in api_model.namespaces.values():
type_generator.AddNamespace(
referenced_namespace,
referenced_namespace.unix_name)
generator = schema_bundle_generator.SchemaBundleGenerator(
api_model, api_defs, type_generator)
api_h_code = generator.GenerateAPIHeader().Render()
schemas_h_code = generator.GenerateSchemasHeader().Render()
schemas_cc_code = generator.GenerateSchemasCC().Render()
if dest_dir:
basedir = os.path.join(dest_dir, 'chrome/common/extensions/api')
with open(os.path.join(basedir, 'generated_api.h'), 'w') as h_file:
h_file.write(api_h_code)
with open(os.path.join(basedir, 'generated_schemas.h'), 'w') as h_file:
h_file.write(schemas_h_code)
with open(os.path.join(basedir, 'generated_schemas.cc'), 'w') as cc_file:
cc_file.write(schemas_cc_code)
else:
print 'generated_api.h'
print
print api_h_code
print
print 'generated_schemas.h'
print
print schemas_h_code
print
print 'generated_schemas.cc'
print
print schemas_cc_code
if __name__ == '__main__':
parser = optparse.OptionParser(
description='Generates a C++ model of an API from JSON schema',
usage='usage: %prog [option]... schema')
parser.add_option('-r', '--root', default='.',
help='logical include root directory. Path to schema files from specified'
'dir will be the include path.')
parser.add_option('-d', '--destdir',
help='root directory to output generated files.')
parser.add_option('-n', '--namespace', default='generated_api_schemas',
help='C++ namespace for generated files. e.g extensions::api.')
parser.add_option('-b', '--bundle', action="store_true", help=
'''if supplied, causes compiler to generate bundle files for the given set of
source files.''')
(opts, args) = parser.parse_args()
if not args:
sys.exit(0) # This is OK as a no-op
dest_dir = opts.destdir
root_namespace = opts.namespace
if opts.bundle:
handle_bundle_schema(args, dest_dir, opts.root, root_namespace)
else:
handle_single_schema(args[0], dest_dir, opts.root, root_namespace)
|
[
"BW@BW-PC.(none)"
] |
BW@BW-PC.(none)
|
373f9f9cd537df8df9fb85fee9220607f78f2be6
|
de5adea6b67660bfc45150ee56b6cf4957c8c4e7
|
/main_app/migrations/0001_initial.py
|
f522eb7c2263895a61cc3153af186e867e0d5fdf
|
[] |
no_license
|
arthuroe/treasure_gram
|
70049a25009318d947488dea28505f65816d9d84
|
5ce93ed21284fee17640b15546011848de3115ac
|
refs/heads/develop
| 2020-03-18T02:16:19.413381
| 2018-05-23T17:18:58
| 2018-05-23T17:24:16
| 134,182,468
| 0
| 0
| null | 2018-05-28T18:52:48
| 2018-05-20T20:02:49
|
Python
|
UTF-8
|
Python
| false
| false
| 824
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-20 21:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Treasure',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('value', models.DecimalField(decimal_places=2, max_digits=10)),
('materials', models.CharField(max_length=100)),
('location', models.CharField(max_length=100)),
('img_url', models.CharField(max_length=100)),
],
),
]
|
[
"arthur.orache@gmail.com"
] |
arthur.orache@gmail.com
|
85c85d8ad12001b13683eb1cd155223c1da9f3cf
|
d69b96f8a2d4a0025b2513d49ad1726d53a9adcc
|
/sow/console.py
|
033266b0b1c668bcddc24cc81ce0afa56dd6f3d4
|
[] |
no_license
|
mekhami/Sow
|
3ed4fa82f2016899924692c979d5e2ed0ca20166
|
aa55d69fa1d18ac9a35a24f67b126e39ca69b721
|
refs/heads/master
| 2021-01-10T19:16:59.841592
| 2015-06-26T15:50:00
| 2015-06-26T15:50:00
| 30,369,588
| 1
| 1
| null | 2015-12-31T20:00:36
| 2015-02-05T17:55:29
|
Python
|
UTF-8
|
Python
| false
| false
| 932
|
py
|
#/usr/bin/env python
###################################
## A Harvest Command Line App ##
###################################
'''Harvest.
Usage:
sow [options]
sow add [(<alias> <hours> <note>)] [-d|--date <date>]
sow show (today|yesterday|week | --date <date>)
sow reauth
sow delete [-a|--all] [(-d|--date <date>)]
Options:
-h --help Show this screen.
--version Show the version.
'''
from docopt import docopt
from commands import add, show, delete
from utils import get_timesheet, get_config, reauth
def _main(args, config, timesheet):
if args['add']:
add(args, config, timesheet)
if args['show']:
show(args, timesheet)
if args['reauth']:
reauth(config)
if args['delete']:
delete(args, timesheet)
def main():
args = docopt(__doc__)
config = get_config()
timesheet = get_timesheet()
_main(args, config, timesheet)
|
[
"Lawrence.vanderpool@gmail.com"
] |
Lawrence.vanderpool@gmail.com
|
639a8318adc71b502d3f0053794000dbc4d50a3c
|
6f8b9e95b2833de2a4f2c8413fe45133e540a5cf
|
/Sequences/tuples_examples.py
|
93edf2eb0dd93f374233d64740dad825f86671a4
|
[] |
no_license
|
riteshelias/UMC
|
a658665d8653ef1ba72d65030b38da7462783ae7
|
e30d42192290905b0a878b66f7634500868b174d
|
refs/heads/master
| 2023-01-20T19:32:51.885534
| 2020-11-27T04:10:22
| 2020-11-27T04:10:22
| 316,398,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,709
|
py
|
dishes = [
("Kaju Katli", "Desi", "Dessert",
(
(1, "Cashew Nuts"),
(2, "Mawa"),
(3, "Sugar")
)
),
("Machow Soup", "Chinese", "Soup",
(
(1, "Noodles"),
(2, "Chicken"),
(3, "Chopped Veggies"),
(4, "Soya Sauce")
)
),
("Hara bhara Kebab", "Desi", "Starters",
(
(1, "Spinach"),
(2, "Corn"),
(3, "Cheese"),
(4, "Potatoes")
)
),
("Tandoori Chicken", "Mughlai", "Starters",
(
(1, "Chicken"),
(2, "Spices"),
(3, "Butter")
)
),
("Navratan Pulav", "Awadhi", "Main Course",
(
(1, "Mix Veggies"),
(2, "Basmati Rice"),
(3, "Dry Fruits")
)
),
("Rogan Josh", "Kashmiri", "Main Course",
(
(1, "Mutton"),
(2, "Spices"),
(3, "Oil"),
(4, "Onions")
)
),
("Rosogolla", "Bengali", "Dessert",
(
(1, "Milk"),
(2, "Sugar"),
(3, "Water"),
(4, "Rose essence")
)
),
]
print(len(dishes))
print()
# for dish in dishes:
# name, ingredients, category = dish
for name, cuisine, category, ingredients in dishes:
# print("Name: {}, Ingredients: {}, Category: {}".format(dish[0], dish[1], dish[2]))
print("Name: {}, Cuisine: {}, Category: {}, Ingredients: {}".format(name, cuisine, category, ingredients))
dish = dishes[1]
print(dish)
print()
ingredient = dish[3]
print(ingredient)
item = ingredient[1]
print(item)
print()
spitem = item[1]
print(spitem)
spitem1 = dishes[1][3][1][1]
print(spitem1)
print(dishes[1][3][1][1])
# for item in ingredient:
# print(item)
|
[
"ritesh.elias@gmail.com"
] |
ritesh.elias@gmail.com
|
000950e05d418733d1aee53faa55ce0a11927353
|
87ef03b1ff43333361771976397908abeb56e496
|
/venv/Lib/site-packages/gunicorn/http/body.py
|
afde36854d1b6ce7e58bdb115b34e09dbed4eee6
|
[
"MIT"
] |
permissive
|
pran01/AlgoVision
|
cba938db1f56c3b52e4868bcdda5283492b2902e
|
40e85f3c55266f43ee103dfa0852a63af306a8d4
|
refs/heads/master
| 2023-04-05T21:01:39.513718
| 2021-04-30T18:56:33
| 2021-04-30T18:56:33
| 281,875,751
| 33
| 9
|
MIT
| 2021-03-20T04:56:44
| 2020-07-23T06:58:41
|
Python
|
UTF-8
|
Python
| false
| false
| 7,297
|
py
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import io
import sys
from gunicorn.http.errors import (NoMoreData, ChunkMissingTerminator,
InvalidChunkSize)
class ChunkedReader(object):
def __init__(self, req, unreader):
self.req = req
self.parser = self.parse_chunked(unreader)
self.buf = io.BytesIO()
def read(self, size):
if not isinstance(size, int):
raise TypeError("size must be an integral type")
if size < 0:
raise ValueError("Size must be positive.")
if size == 0:
return b""
if self.parser:
while self.buf.tell() < size:
try:
self.buf.write(next(self.parser))
except StopIteration:
self.parser = None
break
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = io.BytesIO()
self.buf.write(rest)
return ret
def parse_trailers(self, unreader, data):
buf = io.BytesIO()
buf.write(data)
idx = buf.getvalue().find(b"\r\n\r\n")
done = buf.getvalue()[:2] == b"\r\n"
while idx < 0 and not done:
self.get_data(unreader, buf)
idx = buf.getvalue().find(b"\r\n\r\n")
done = buf.getvalue()[:2] == b"\r\n"
if done:
unreader.unread(buf.getvalue()[2:])
return b""
self.req.trailers = self.req.parse_headers(buf.getvalue()[:idx])
unreader.unread(buf.getvalue()[idx + 4:])
def parse_chunked(self, unreader):
(size, rest) = self.parse_chunk_size(unreader)
while size > 0:
while size > len(rest):
size -= len(rest)
yield rest
rest = unreader.read()
if not rest:
raise NoMoreData()
yield rest[:size]
# Remove \r\n after chunk
rest = rest[size:]
while len(rest) < 2:
rest += unreader.read()
if rest[:2] != b'\r\n':
raise ChunkMissingTerminator(rest[:2])
(size, rest) = self.parse_chunk_size(unreader, data=rest[2:])
def parse_chunk_size(self, unreader, data=None):
buf = io.BytesIO()
if data is not None:
buf.write(data)
idx = buf.getvalue().find(b"\r\n")
while idx < 0:
self.get_data(unreader, buf)
idx = buf.getvalue().find(b"\r\n")
data = buf.getvalue()
line, rest_chunk = data[:idx], data[idx + 2:]
chunk_size = line.split(b";", 1)[0].strip()
try:
chunk_size = int(chunk_size, 16)
except ValueError:
raise InvalidChunkSize(chunk_size)
if chunk_size == 0:
try:
self.parse_trailers(unreader, rest_chunk)
except NoMoreData:
pass
return (0, None)
return (chunk_size, rest_chunk)
def get_data(self, unreader, buf):
data = unreader.read()
if not data:
raise NoMoreData()
buf.write(data)
class LengthReader(object):
def __init__(self, unreader, length):
self.unreader = unreader
self.length = length
def read(self, size):
if not isinstance(size, int):
raise TypeError("size must be an integral type")
size = min(self.length, size)
if size < 0:
raise ValueError("Size must be positive.")
if size == 0:
return b""
buf = io.BytesIO()
data = self.unreader.read()
while data:
buf.write(data)
if buf.tell() >= size:
break
data = self.unreader.read()
buf = buf.getvalue()
ret, rest = buf[:size], buf[size:]
self.unreader.unread(rest)
self.length -= size
return ret
class EOFReader(object):
def __init__(self, unreader):
self.unreader = unreader
self.buf = io.BytesIO()
self.finished = False
def read(self, size):
if not isinstance(size, int):
raise TypeError("size must be an integral type")
if size < 0:
raise ValueError("Size must be positive.")
if size == 0:
return b""
if self.finished:
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = io.BytesIO()
self.buf.write(rest)
return ret
data = self.unreader.read()
while data:
self.buf.write(data)
if self.buf.tell() > size:
break
data = self.unreader.read()
if not data:
self.finished = True
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = io.BytesIO()
self.buf.write(rest)
return ret
class Body(object):
def __init__(self, reader):
self.reader = reader
self.buf = io.BytesIO()
def __iter__(self):
return self
def __next__(self):
ret = self.readline()
if not ret:
raise StopIteration()
return ret
next = __next__
def getsize(self, size):
if size is None:
return sys.maxsize
elif not isinstance(size, int):
raise TypeError("size must be an integral type")
elif size < 0:
return sys.maxsize
return size
def read(self, size=None):
size = self.getsize(size)
if size == 0:
return b""
if size < self.buf.tell():
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = io.BytesIO()
self.buf.write(rest)
return ret
while size > self.buf.tell():
data = self.reader.read(1024)
if not data:
break
self.buf.write(data)
data = self.buf.getvalue()
ret, rest = data[:size], data[size:]
self.buf = io.BytesIO()
self.buf.write(rest)
return ret
def readline(self, size=None):
size = self.getsize(size)
if size == 0:
return b""
data = self.buf.getvalue()
self.buf = io.BytesIO()
ret = []
while 1:
idx = data.find(b"\n", 0, size)
idx = idx + 1 if idx >= 0 else size if len(data) >= size else 0
if idx:
ret.append(data[:idx])
self.buf.write(data[idx:])
break
ret.append(data)
size -= len(data)
data = self.reader.read(min(1024, size))
if not data:
break
return b"".join(ret)
def readlines(self, size=None):
ret = []
data = self.read()
while data:
pos = data.find(b"\n")
if pos < 0:
ret.append(data)
data = b""
else:
line, data = data[:pos + 1], data[pos + 1:]
ret.append(line)
return ret
|
[
"pran.sinha1.0@gmail.com"
] |
pran.sinha1.0@gmail.com
|
6db6eac332058fd6e1c5a656fd107b838cd08767
|
2e2494148f19a2f51383a7eb8853c746a60b6db9
|
/MemoryBlock.py
|
d5ff3a06afac5bbd0652a8c34e08b6793ac6c744
|
[] |
no_license
|
GrimaldoMike/Compiladores
|
a79614d77ac9baed3837d76ccfa70f664b62b3ee
|
2d01512b537f523d608d79e91ec163ee7e2ab529
|
refs/heads/master
| 2021-01-10T17:40:55.376425
| 2016-05-06T20:23:58
| 2016-05-06T20:23:58
| 53,536,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,580
|
py
|
#NO FUE IMPLEMENTADO PARA EL PROYECTO
class MemoryBlock:
def __init__(self, start_dir, ints_start_dir, floats_start_dir, chars_start_dir, strings_start_dir, limit):
'''All blocks have to be in ascending order and non overlapping on init. We leave that to the developer that uses this class'''
self.bools = [ start_dir, 0 ]
self.ints = [ ints_start_dir, 0 ]
self.floats = [ floats_start_dir, 0 ]
self.chars = [ chars_start_dir, 0 ]
self.strings = [ strings_start_dir, 0 ]
self.limit = limit
def __str__(self):
return "MemoryBlock ({start}-{end}): {boolno} bools, {intno} ints, {floatno} floats, {charno} chars, {stringno} strings".format( start=self.bools[0], end=self.limit, boolno=self.bools[1], intno=self.ints[1], floatno=self.floats[1], charno=self.chars[1], stringno=self.strings[1])
def add_bool(self, num=1):
'''Adds a var to the memory block'''
if ( self.bools[0] + self.bools[1] + num ) < self.ints[0]:
self.bools[1] += num
return ( self.bools[0] + self.bools[1] - num )
else:
print ('Stackoverflow: Se intenta exceder el limite de memoria para el boolean.')
def add_int(self, num=1):
'''Adds a var to the memory block'''
if ( self.ints[0] + self.ints[1] + num ) < self.floats[0]:
self.ints[1] += num
return ( self.ints[0] + self.ints[1] - num )
else:
print ('Stackoverflow: Se intenta exceder el limite de memoria para el int.')
def add_float(self, num=1):
'''Adds a var to the memory block'''
if ( self.floats[0] + self.floats[1] + num ) < self.chars[0]:
self.floats[1] += num
return ( self.floats[0] + self.floats[1] - num )
else:
print ('Stackoverflow: Se intenta exceder el limite de memoria para el foat.')
def add_char(self, num=1):
'''Adds a var to the memory block'''
if ( self.chars[0] + self.chars[1] + num ) < self.strings[0]:
self.chars[1] += num
return ( self.chars[0] + self.chars[1] - num )
else:
print ('Stackoverflow: Se intenta exceder el limite de memoria para el char.')
def add_string(self, num=1):
'''Adds a var to the memory block'''
if ( self.strings[0] + self.strings[1] + num ) < self.limit:
self.strings[1] += num
return ( self.strings[0] + self.strings[1] - num )
else:
print ('Stackoverflow: Se intenta exceder el limite de memoria para el string.')
|
[
"grimaldo.mike@hotmail.com"
] |
grimaldo.mike@hotmail.com
|
3881ce11f6a9512b8d49fa4fb9fdd8eedf5e4ae6
|
954f9a154066c65374b475f925f2e5a138a14162
|
/bigdatamining/text_based/parser.py
|
548979216515cbe8f5a21eb422fa2f33c8bfa6f7
|
[] |
no_license
|
reloadbrain/recommEngine
|
8263673945b5af1f73d26c22625a3090e071e952
|
9d3a4227916cb8583e1faef572824a54a067e7d1
|
refs/heads/master
| 2020-03-11T01:49:48.925712
| 2015-12-03T13:31:48
| 2015-12-03T13:31:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,998
|
py
|
import nltk
import sys
import gzip
import json
######################################################
#
# Parser that take a text and reduce it to a tokenized/stemmed
# string.
#
# To debug call python parser.py
#
######################################################
def extract_terms(text):
#divide text in tokens and labeld them based on what type of word they are
tokens = nltk.word_tokenize(text)
tagged = nltk.pos_tag(tokens)
#filter the list and select only nouns
filtered = []
for tupla in tagged:
if 'NN' in tupla[1]:
filtered.append(tupla[0])
#stemm the list and join it in a string
stemmer = nltk.SnowballStemmer("english")
lst = []
for i in range(len(filtered)):
lst.append(stemmer.stem(filtered[i]))
return ' '.join(lst)
def extract_reviews(path):
set = gzip.open(path, 'r')
list = []
already_parsed = {}
for line in set:
temp = {}
parsedline = json.loads(line)
try:
if 'asin' in parsedline and 'reviewText' in parsedline: #ASIN not exists skip
temp['asin'] = parsedline['asin']
temp['text'] = parsedline['reviewText']
#if this item has already a review concat this with the otherone
if temp['asin'] in already_parsed:
index = next(i for (i, d) in enumerate(list) if d["asin"] == temp['asin'])
list[index]['text'] = list[index]['text'] + " " + temp['text']
else:
already_parsed[temp['asin']] = True
list.append(temp)
except (RuntimeError, TypeError, NameError):
print "EXCEPTION: error " + str(RuntimeError)
#print "Found " + str(len(list))
return list
if __name__ == '__main__':
test_string = raw_input("Please enter something: ")
result = extract_terms (test_string)
print result
|
[
"Martintoni@MacBook-Pro-di-Martintoni.local"
] |
Martintoni@MacBook-Pro-di-Martintoni.local
|
32f142de985f427b2e3ecba10fa765f0c368c943
|
9460f8e795d65ff8667a9c1b0da7a141d2a9c849
|
/blog/views.py
|
ebff93e410cfe142a882386fce08dfa8d44d8c0c
|
[] |
no_license
|
wadewow/myblog
|
3a06614872637d502a5b24c802429cfdb7b8e0a8
|
af498894b3379bc876a93142122a29d31119735c
|
refs/heads/master
| 2021-01-01T16:54:40.704270
| 2017-07-21T13:40:51
| 2017-07-21T13:40:51
| 97,951,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,473
|
py
|
# coding:utf-8
from django.shortcuts import render
# from django.http import HttpResponse
import models
# Create your views here.
def home(request):
'''
下面的语句相当于 select * from article where id = 1
'''
articles = models.Article.objects.all()
return render(request, 'blog/blog_home.html', {'articles': articles})
def content(request, article_id):
article = models.Article.objects.get(pk = article_id)
return render(request, 'blog/blog_content.html', {'article_content': article})
def edit(request,article_id):
print 'id等于:', article_id
if str(article_id) == '0':
return render(request,'blog/blog_edit.html')
article = models.Article.objects.get(pk = article_id)
return render(request,'blog/blog_edit.html',{'article':article})
def form_action(request):
title = request.POST.get('title') # 这里get('title')的title是根据input的name属性值title
content = request.POST.get('content')
article_id = request.POST.get('article_id', '0')
if article_id == '0':
models.Article.objects.create(title = title, content = content)
articles = models.Article.objects.all()
return render(request,'blog/blog_home.html',{'articles':articles})
article = models.Article.objects.get(pk = article_id)
article.title = title
article.content = content
article.save()
return render(request, 'blog/blog_content.html', {'article_content': article})
|
[
"949768106@qq.com"
] |
949768106@qq.com
|
866fcd777ed57198ecc587fa85d3a71e6974ea99
|
9d1491368c5e87760131ba27d252ee2d10620433
|
/gammapy/spectrum/powerlaw.py
|
39edaeca1329962422682f6d153c6cf79d653ff1
|
[
"BSD-3-Clause"
] |
permissive
|
cnachi/gammapy
|
f9295306a8e81d0b7f4d2111b3fa3679a78da3f7
|
3d3fc38c111d2f490d984082750f8003580fe06c
|
refs/heads/master
| 2021-01-20T23:37:59.409914
| 2016-06-09T08:36:33
| 2016-06-09T08:36:33
| 60,764,807
| 0
| 0
| null | 2016-06-09T09:55:54
| 2016-06-09T09:55:54
| null |
UTF-8
|
Python
| false
| false
| 6,540
|
py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Power law spectrum helper functions.
Convert differential and integral fluxes with error propagation.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
__all__ = [
'power_law_evaluate',
'power_law_pivot_energy',
'df_over_f',
'power_law_flux',
'power_law_integral_flux',
'g_from_f',
'g_from_points',
'I_from_points',
'f_from_points',
'f_with_err',
'I_with_err',
'compatibility',
]
E_INF = 1e10 # practically infinitely high flux
g_DEFAULT = 2
def power_law_evaluate(energy, norm, gamma, energy_ref):
r"""Differential flux at a given energy.
.. math:: f(energy) = N (E / E_0) ^ - \Gamma
with norm ``N``, energy ``E``, reference energy ``E0`` and spectral index :math:`\Gamma`.
Parameters
----------
energy : array_like
Energy at which to compute the differential flux
gamma : array_like
Power law spectral index
"""
return norm * (energy / energy_ref) ** (-gamma)
def power_law_pivot_energy(energy_ref, f0, d_gamma, cov):
"""Compute pivot (a.k.a. decorrelation) energy.
Defined as smallest df / f.
Reference: http://arxiv.org/pdf/0910.4881
"""
pivot_energy = energy_ref * np.exp(cov / (f0 * d_gamma ** 2))
return pivot_energy
def df_over_f(e, e0, f0, df0, dg, cov):
"""Compute relative flux error at any given energy.
Used to draw butterflies.
Reference: http://arxiv.org/pdf/0910.4881 Equation (1)
"""
term1 = (df0 / f0) ** 2
term2 = 2 * cov / f0 * np.log(e / e0)
term3 = (dg * np.log(e / e0)) ** 2
return np.sqrt(term1 - term2 + term3)
def _conversion_factor(g, e, e1, e2):
"""Conversion factor between differential and integral flux."""
# In gamma-ray astronomy only falling power-laws are used.
# Here we force this, i.e. give "correct" input even if the
# user gives a spectral index with an incorrect sign.
g = np.abs(g)
term1 = e / (-g + 1)
term2 = (e2 / e) ** (-g + 1) - (e1 / e) ** (-g + 1)
return term1 * term2
def power_law_flux(I=1, g=g_DEFAULT, e=1, e1=1, e2=E_INF):
"""Compute differential flux for a given integral flux.
Parameters
----------
I : array_like
Integral flux in ``energy_min``, ``energy_max`` band
g : array_like
Power law spectral index
e : array_like
Energy at which to compute the differential flux
e1 : array_like
Energy band minimum
e2 : array_like
Energy band maximum
Returns
-------
flux : `numpy.array`
Differential flux at ``energy``.
"""
return I / _conversion_factor(g, e, e1, e2)
def power_law_integral_flux(f=1, g=g_DEFAULT, e=1, e1=1, e2=E_INF):
"""Compute integral flux for a given differential flux.
Parameters
----------
f : array_like
Differential flux at ``energy``
g : array_like
Power law spectral index
e : array_like
Energy at which the differential flux is given
e1 : array_like
Energy band minimum
e2 : array_like
Energy band maximum
Returns
-------
flux : `numpy.array`
Integral flux in ``energy_min``, ``energy_max`` band
"""
return f * _conversion_factor(g, e, e1, e2)
def g_from_f(e, f, de=1):
"""Spectral index at a given energy e for a given function f(e)"""
e1, e2 = e, e + de
f1, f2 = f(e1), f(e2)
return g_from_points(e1, e2, f1, f2)
def g_from_points(e1, e2, f1, f2):
"""Spectral index for two given differential flux points"""
return -np.log(f2 / f1) / np.log(e2 / e1)
def I_from_points(e1, e2, f1, f2):
"""Integral flux in energy bin for power law"""
g = g_from_points(e1, e2, f1, f2)
pl_int_flux = (f1 * e1 / (-g + 1) *
((e2 / e1) ** (-g + 1) - 1))
return pl_int_flux
def f_from_points(e1, e2, f1, f2, e):
"""Linear interpolation"""
e1 = np.asarray(e1, float)
e2 = np.asarray(e2, float)
f1 = np.asarray(f1, float)
f2 = np.asarray(f2, float)
e = np.asarray(e, float)
logdy = np.log(f2 / f1)
logdx = np.log(e2 / e1)
logy = np.log(f1) + np.log(e / e1) * (logdy / logdx)
return np.exp(logy)
def f_with_err(I_val=1, I_err=0, g_val=g_DEFAULT, g_err=0,
e=1, e1=1, e2=E_INF):
"""Wrapper for f so the user doesn't have to know about
the uncertainties module"""
from uncertainties import unumpy
I = unumpy.uarray(I_val, I_err)
g = unumpy.uarray(g_val, g_err)
_f = power_law_flux(I, g, e, e1, e2)
f_val = unumpy.nominal_values(_f)
f_err = unumpy.std_devs(_f)
return f_val, f_err
def I_with_err(f_val=1, f_err=0, g_val=g_DEFAULT, g_err=0,
e=1, e1=1, e2=E_INF):
"""Wrapper for f so the user doesn't have to know about
the uncertainties module"""
from uncertainties import unumpy
f = unumpy.uarray(f_val, f_err)
g = unumpy.uarray(g_val, g_err)
_I = power_law_integral_flux(f, g, e, e1, e2)
I_val = unumpy.nominal_values(_I)
I_err = unumpy.std_devs(_I)
return I_val, I_err
def compatibility(par_low, par_high):
"""Quantify spectral compatibility of power-law
measurements in two energy bands.
Reference: 2008ApJ...679.1299F Equation (2)
Compute spectral compatibility parameters for the
situation where two power laws were measured in a low
and a high spectral energy band.
par_low and par_high are the measured parameters,
which must be lists in the following order:
e, f, f_err, g, g_err
where e is the pivot energy, f is the flux density
and g the spectral index
"""
# Unpack power-law paramters
e_high, f_high, f_err_high, g_high, g_err_high = par_high
e_low, f_low, f_err_low, g_low, g_err_low = par_low
log_delta_e = np.log10(e_high) - np.log10(e_low)
log_delta_f = np.log10(f_high) - np.log10(f_low)
# g_match is the index obtained by connecting the two points
# with a power law, i.e. a straight line in the log_e, log_f plot
g_match = -log_delta_f / log_delta_e
# sigma is the number of standar deviations the match index
# is different from the measured index in one band.
# (see Funk et al. (2008ApJ...679.1299F) eqn. 2)
sigma_low = (g_match - g_low) / g_err_low
sigma_high = (g_match - g_high) / g_err_high
sigma_comb = np.sqrt(sigma_low ** 2 + sigma_high ** 2)
return g_match, sigma_low, sigma_high, sigma_comb
|
[
"Deil.Christoph@gmail.com"
] |
Deil.Christoph@gmail.com
|
6fe7640c64822df4cca889a856f9099d33231595
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02860/s554783475.py
|
ba781c1a512917a311a200fc59b2e495d4dab5c5
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
n = int(input())
s = input()
if (n%2 == 1):
print("No")
else:
c = 0
for i in range(int(n/2)):
if (s[i] != s[i + int(n/2)]):
c = 1
if (c == 0):
print("Yes")
else:
print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
023f6b987e1a2d0d2183da7b4e4d3ffb07f79497
|
ac0dc4a4c9960bbbdca2db0eaf7c839f552b0546
|
/nomdivertit.py
|
e0d0b5f3828c2a5978d6db4a5ee2855b81e97965
|
[] |
no_license
|
HectorGarciaPY/primer1.py
|
b7d237b82d6e3ca2cd09ea771a6e152c34fb55ff
|
802aac5f442b4e1956cdd4f63a7767b94c30a775
|
refs/heads/master
| 2023-05-14T16:39:53.271639
| 2021-06-02T10:17:09
| 2021-06-02T10:17:09
| 297,622,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
contador=0
while True:
print("Escribe un nombre divertido:")
x=input()
if x[0]==x[(len(x)-1)] and x[1] == x[(len(x)-2)]:
print("Es un nom divertit")
else:
print("Ets un avorrit, el nom no mola")
contador=contador+1
if contador==2:
print("No tens gens d'originalitat. No pots tenir gos, no pots sortir al carrer.\n"" Adéu!")
break
|
[
""
] | |
f2a7ee60c707d01abd0cb97f85cf647ce9ebf4e3
|
a6df74bc7c139734bd9ce9f48d51e08fdc7d7efb
|
/article/migrations/0006_auto_20210311_1721.py
|
116d4f2900f9f0f393ad9eb58894d557a6c11b5c
|
[] |
no_license
|
Erlan1998/python_group_7_homework_68_Erlan_Kurbanaliev
|
5a7f210e51f1998e5d52cdeb42538f2786af3f9f
|
fdc92be2c5187c78fecdc713f58e0e3e9fc62cb1
|
refs/heads/master
| 2023-05-03T17:01:59.066596
| 2021-05-26T13:28:41
| 2021-05-26T13:28:41
| 368,165,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
# Generated by Django 3.1.6 on 2021-03-11 17:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0005_auto_20210311_1319'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='tags',
new_name='tags_old',
),
]
|
[
"kurbanalieverlan@gmail.com"
] |
kurbanalieverlan@gmail.com
|
3b1a469d9c82b2869b62462652c2a0c924e3bb31
|
470e0a9dc07edfe13ca68f2a1b6d60d0e395e095
|
/3-2.py
|
b67172d7abbc097ec46a4caa894c73eba80c02c4
|
[] |
no_license
|
mj08021/ThisIsCodingTestforGetaJob
|
77ce8edab2bd855db9b96597982f58251d0bd31e
|
ad98b368956937065c6c396b2806351a4eaf12a2
|
refs/heads/main
| 2023-04-28T10:51:02.012344
| 2021-05-16T05:51:58
| 2021-05-16T05:51:58
| 316,853,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
# N, M, K를 공백으로 구분하여 입력받기
n, m, k = map(int, input().split())
# N개의 수를 공백으로 구분하여 입력받기
data = list(map(int, input().split()))
data.sort() # 입력받은 수 정렬
first = data[n - 1] # 가장 큰 수
second = data[n - 2] # 두 번째로 큰 수
# 가장 큰 수가 더해지는 횟수 계산
count = int(m / (k + 1)) * k
count += m % (k + 1)
result = 0
result += (count) * first # 가장 큰 수 더하기
result += (m - count) * second # 두 번째로 큰 수 더하기
print(result) # 최종 답안 출력
# ex) input
# 5 8 3
# 2 4 5 4 6
|
[
"replituser@example.com"
] |
replituser@example.com
|
5e0f3c0a44b787914d3dce78b805204bdbc0bee6
|
45ff5b1fc0414693087050cc738010a39833a1c6
|
/backend/app/models/user_model.py
|
40b513eef4cb444ed6161121188fa11d2eab1dd3
|
[] |
no_license
|
hanson190505/full-stack-fastapi-vue
|
8606971d86dddc341bd98fa8310c70e4aaf54560
|
37121a3ddc50bcabea69433ac1d8318f7c9d870e
|
refs/heads/main
| 2023-03-17T20:22:13.373644
| 2021-03-01T16:23:49
| 2021-03-01T16:23:49
| 327,338,913
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,081
|
py
|
from app.db.base_class import Base
from sqlalchemy.orm import relationship
from sqlalchemy import Column, String, JSON, Integer, ForeignKey
class DepartmentModel(Base):
name = Column(String(64), index=True)
parent_department = Column(Integer, ForeignKey('departmentmodel.id'), nullable=True)
sub_department = relationship('DepartmentModel', lazy='joined', join_depth=3)
# users = relationship('UserModel', back_populates='department')
class UserModel(Base):
name = Column(String(128), index=True)
hashed_password = Column(String(1024))
mail = Column(String(64), nullable=True, index=True)
phone = Column(String(32), nullable=True, index=True)
detail = Column(JSON, nullable=True)
# department = relationship('DepartmentModel', back_populates='users')
class RouteModel(Base):
name = Column(String(64))
path = Column(String(64))
pid = Column(Integer, ForeignKey('routemodel.id'), nullable=True)
title = Column(String(64), nullable=True)
detail = Column(JSON, nullable=True)
children = relationship('RouteModel')
|
[
"413506012@qq.com"
] |
413506012@qq.com
|
d8863ebbb7cfbc6f46a2659c40eff9f0092bdcf6
|
eb8660d8a7c7557af0fd681a4cce305e1fc73ef9
|
/grpc/stt_client.py
|
3179370f360474849612723415585af7faaf5ca0
|
[
"Apache-2.0"
] |
permissive
|
morfeusys/vosk-server
|
d4639eaaae7b2e171bd99618513100100d94e773
|
955517bfcc8a7ef3f93ed5ace50052234aa3bf74
|
refs/heads/master
| 2021-01-14T19:18:55.803416
| 2020-02-24T12:57:55
| 2020-02-24T12:57:55
| 242,727,733
| 1
| 0
|
Apache-2.0
| 2020-02-24T12:14:17
| 2020-02-24T12:14:16
| null |
UTF-8
|
Python
| false
| false
| 1,633
|
py
|
#!/usr/bin/python3
import argparse
import grpc
import stt_service_pb2
import stt_service_pb2_grpc
CHUNK_SIZE = 4000
def gen(audio_file_name):
specification = stt_service_pb2.RecognitionSpec(
partial_results=True,
audio_encoding='LINEAR16_PCM',
sample_rate_hertz=8000
)
streaming_config = stt_service_pb2.RecognitionConfig(specification=specification)
yield stt_service_pb2.StreamingRecognitionRequest(config=streaming_config)
with open(audio_file_name, 'rb') as f:
data = f.read(CHUNK_SIZE)
while data != b'':
yield stt_service_pb2.StreamingRecognitionRequest(audio_content=data)
data = f.read(CHUNK_SIZE)
def run(audio_file_name):
channel = grpc.insecure_channel('localhost:5001')
stub = stt_service_pb2_grpc.SttServiceStub(channel)
it = stub.StreamingRecognize(gen(audio_file_name))
try:
for r in it:
try:
print('Start chunk: ')
for alternative in r.chunks[0].alternatives:
print('alternative: ', alternative.text)
print('words: ', alternative.words)
print('Is final: ', r.chunks[0].final)
print('')
except LookupError:
print('No available chunks')
except grpc._channel._Rendezvous as err:
print('Error code %s, message: %s' % (err._state.code, err._state.details))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', required=True, help='audio file path')
args = parser.parse_args()
run(args.path)
|
[
"nshmyrev@gmail.com"
] |
nshmyrev@gmail.com
|
c82dcbc9cc057d4d5d64d87082af0f1e59d0a74b
|
1eddf34d87d1c8fa06a71dd934bfdc4de8fd6752
|
/binary_files_generation/stdp_table_generator.py
|
c928969be2abe27e46d842b9ea2238768d31ef9e
|
[] |
no_license
|
galluppf/spackage_conv
|
b6367f0cd93ef02891512733e83a70f984f9b2a3
|
902c6d3be1a4fb7692056814eafd4d94a75a59d6
|
refs/heads/master
| 2021-01-13T02:32:09.383591
| 2013-09-30T12:04:41
| 2013-09-30T12:04:41
| 12,670,028
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,185
|
py
|
#!/usr/bin/python
"""
Utility to generate an STDP table for SpiNNaker
__author__="francesco"
__date__ ="$22-Mar-2011 18:01:14$"
"""
BINARY_DIRECTORY = '../binaries/'
import ConfigParser, sys
from numpy import arange, zeros
from math import exp, log
from struct import pack
from pacman import *
# packs an array with a given mask for every element. maybe there's a python function doing this? like [ out += pack(mask,i) for i in array ]
def packArray(array, mask):
out = ""
for i in array:
out += pack(mask,i) # h = 4bit words
return out
DEBUG = pacman_configuration.getboolean('stdp_table_generator', 'debug')
p1 = 256
# packs an array with a given mask for every element
def packArray(array, mask):
out = ""
for i in array:
out += pack(mask,i) # h = 4bit words
# print out
return out
def setHeaders(w_min, w_max, ltp_time_window, ltd_time_window, resolution, words):
s = pack("<h", w_min*p1)
s += pack("<h", w_max*p1)
s += pack("<b", ltp_time_window)
s += pack("<b", ltd_time_window)
s += pack("<b", resolution)
s += pack("<b", int( log(resolution, 2)) )
s += pack("<b", words)
return s
def calc_STDP_table(ltp_time_window, ltd_time_window, resolution, A_plus, A_minus, tau_plus, tau_minus, words, zero_value=0):
# print ltd_time_window+ltp_time_window, resolution*words
assert ltd_time_window+ltp_time_window < resolution*words*32*2, "Time window exceeds maxmimum size of %d msec. Decrease ltd/ltp time window or resolution" % (resolution*words*32*2+1)
ltd = arange(resolution*32*4, 0, -resolution)
ltp = arange(resolution, resolution*4*32+resolution, resolution)
if DEBUG: print "[ stdp_table_generator ] :" ,ltd, ltp
out = []
for l in ltd:
out.append( (A_minus*exp(float(-l)/tau_minus) + A_minus*exp(float(-(l+1))/tau_minus))/2 )
if zero_value != 0: print "[ stdp_table_generator ] : setting value in dt = 0 to %f" % zero_value
out.append(zero_value)
for l in ltp:
out.append( (A_plus*exp(float(-l)/tau_plus) + A_plus*exp(float(-(l+1))/tau_plus))/2 )
# Scaling
out = [ int(i*p1) for i in out ]
# words*32 is the size of the ltp and ltd window. The whole table is words*32 + 1 (value in 0) + words*32 = words*32*2+1 bytes long
# left_bound = int(resolution*32*4-ltd_time_window/resolution)
# right_bound = int(words*32+1+ltp_time_window/resolution)
left_bound = 128-ltd_time_window
right_bound = 129 + ltp_time_window
# Truncating the time window with the one specified by ltd/ltp_time_window
# print left_bound, right_bound
out[:left_bound] = zeros(left_bound, 'int')
out[right_bound:] = zeros(128-ltp_time_window, 'int')
if DEBUG: print out
return out
def compile_stdp_table(cfg, out_filename):
"""
compiles an stdp table given dictionary cfg and an output file name
cfg is in the format
cfg['ltp_time_window'],
cfg['ltd_time_window'],
cfg['resolution'],
cfg['A_plus'],
cfg['A_minus'],
cfg['tau_plus'],
cfg['tau_minus'],
cfg['words'],
cfg['zero_value']
"""
print "[ stdp_table_generator ] : Writing file", out_filename
f = open(out_filename, mode='w+')
print "[ stdp_table_generator ] : Writing headers"
f.write(setHeaders(cfg['w_min'], cfg['w_max'], cfg['ltd_time_window'], cfg['ltp_time_window'], cfg['resolution'], cfg['words']))
s = calc_STDP_table(cfg['ltp_time_window'],
cfg['ltd_time_window'],
cfg['resolution'],
cfg['A_plus'],
cfg['A_minus'],
cfg['tau_plus'],
cfg['tau_minus'],
cfg['words'],
cfg['zero_value'])
f.write(packArray(s,'<b'))
f.close()
print "[ stdp_table_generator ] : Done!"
def compile_stdp_tts_table(cfg, out_filename):
"""
compiles an stdp table given dictionary cfg and an output file name
cfg is in the format
cfg['ltp_time_window'],
cfg['ltd_time_window'],
cfg['resolution'],
cfg['A_plus'],
cfg['A_minus'],
cfg['tau_plus'],
cfg['tau_minus'],
cfg['words'],
cfg['zero_value']
"""
print "[ stdp_table_generator ] : Writing file", out_filename
f = open(out_filename, mode='w+')
print "Writing headers"
f.write(setHeaders(cfg['w_min'], cfg['w_max'], cfg['ltd_time_window'], cfg['ltp_time_window'], cfg['resolution'], cfg['words']))
s = calc_STDP_table(cfg['ltp_time_window'],
cfg['ltd_time_window'],
cfg['resolution'],
cfg['A_plus'],
cfg['A_minus'],
cfg['tau_plus'],
cfg['tau_minus'],
cfg['words'],
cfg['zero_value'])
f.write(packArray(s,'<b'))
f.write(pack("<h", cfg['L_parameter']))
f.close()
print "Done!"
def compile_stdp_table_from_db(db):
print "\n[ stdp_table_generator ] : calculating STDP tables"
plasticity_parameters = db.get_plasticity_parameters()
if len(plasticity_parameters) < 1:
print "[ stdp_table_generator ] : Nothing to do...\n"
return
for p in plasticity_parameters:
if DEBUG: print p
out_file_name = BINARY_DIRECTORY + "stdp_table_" + str(p['x']) + "_" + str(p['y']) + "_" + str(p['p']) + ".dat"
# FIXME read defaults from pacman cfg
parameters = eval (p['parameters'])
if DEBUG: print parameters
if 'ltd_time_window' not in parameters.keys(): parameters['ltd_time_window'] = pacman_configuration.getint('stdp_table_generator', 'ltd_time_window')
if 'ltp_time_window' not in parameters.keys(): parameters['ltp_time_window'] = pacman_configuration.getint('stdp_table_generator', 'ltp_time_window')
if 'words' not in parameters.keys(): parameters['words'] = pacman_configuration.getint('stdp_table_generator', 'words')
if 'zero_value' not in parameters.keys(): parameters['zero_value'] = eval(pacman_configuration.get('stdp_table_generator', 'zero_value'))
if DEBUG:
print "[ stdp_table_generator ] : parameters: ", parameters
print "[ stdp_table_generator ] : p: ", p
if p['method'] == 'FullWindow':
print "[ stdp_table_generator ] : computing STDP table for FullWindow rule"
compile_stdp_table(parameters, out_file_name)
if p['method'] == 'SpikePairRule':
print "[ stdp_table_generator ] : computing STDP table for SpikePair rule"
compile_stdp_table(parameters, out_file_name)
if p['method'] == 'TimeToSpike':
print "[ stdp_table_generator ] : computing STDP table for TimeToSpike rule"
compile_stdp_tts_table(parameters, out_file_name)
if __name__ == "__main__":
db = load_db(sys.argv[1]) # IMPORTS THE DB (it will also load the model libraray by default)
compile_stdp_table_from_db(db)
|
[
"francesco@inspiron.local"
] |
francesco@inspiron.local
|
ab6ea8ec66229564a0cc2f4945f5415503dcfec8
|
1bb20fd77f973f23878c04b1784569ebe76ca645
|
/model/distrib_state.py
|
3138fe08510eea286b89c9cba72bb01bc01931c2
|
[] |
no_license
|
keyofdeath/Tp-conceprion-objet
|
af25a838231547678e24aea7bd59533946a554e7
|
45e03ef694684c364f38f2592c5d6675fde04bdd
|
refs/heads/master
| 2020-04-23T10:26:19.035227
| 2019-02-17T09:32:46
| 2019-02-17T09:32:46
| 171,104,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,344
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging.handlers
import os
PYTHON_LOGGER = logging.getLogger(__name__)
if not os.path.exists("log"):
os.mkdir("log")
HDLR = logging.handlers.TimedRotatingFileHandler("log/DistribState.log",
when="midnight", backupCount=60)
STREAM_HDLR = logging.StreamHandler()
FORMATTER = logging.Formatter("%(asctime)s %(filename)s [%(levelname)s] %(message)s")
HDLR.setFormatter(FORMATTER)
STREAM_HDLR.setFormatter(FORMATTER)
PYTHON_LOGGER.addHandler(HDLR)
PYTHON_LOGGER.addHandler(STREAM_HDLR)
PYTHON_LOGGER.setLevel(logging.DEBUG)
# Absolute path to the folder location of this python file
FOLDER_ABSOLUTE_PATH = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
class DistrbState:
def __init__(self, distrib):
"""
:param distrib: (Distrib) distributeur
"""
self.distrib = distrib
def inserer_carte(self, card_number):
"""
Méthode pour inserer une carte dans la machine. Puis met dans l'attribut carte_inseree la carte inserée.
:param card_number: (string) Numéro de ma carte inserée
:return: (bool) True la carte est trouvée. False la carte n'a pas été trouvée
"""
raise Exception("Inserer carte: Can call this function in this state")
def saisire_code(self, code):
"""
Regarde si le code saisi est correct
:param code: (string) code entré
:return: (bool) True code correct sinon False
"""
raise Exception("Saisire Code: Can call this function in this state")
def menu(self, action):
"""
Menu ou l'utilisateur choisie se qu'il veut faire
:param action: (int) Utiliser les constantes dans la classe Distrib
:return: (object) Retourn les infos en fonction de l'action choisie
"""
raise Exception("Menu: Can call this function in this state")
def attente_compt_choisit(self, acount_number):
"""
Fonction pour obtenir les données d'un compte.
:param acount_number: (string) numero du compte
:return: (Dictionnaire) info sur le compte en dictionnaire [numéro, solde, operations]
"""
raise Exception("Attente compt Choisit: Can call this function in this state")
def compt_afficher(self):
"""
Pour retourner aux menu une fois la consultation des compts fini
"""
raise Exception("Compt afficher: Can call this function in this state")
def attente_information_transfer(self, acount_number, credit_to_transfer):
"""
Recupaire les informations entrée pas l'utilisateur est attend qu'il valide
:param acount_number: (int) Numeros de compt a créditer
:param credit_to_transfer: (float) Montant a transferer
"""
raise Exception("Attente information virement: Can call this function in this state")
def confimer_le_virement(self, confirm_transfer):
"""
L'utilisateur valide les information entrée on effectue le transfer
:param confirm_transfer: (bool) True confirm le transfer
:return: (bool) True transfer effectuer
"""
raise Exception("Confirmer le virement: Can call this function in this state")
|
[
"swan.blanc.pro@gmail.com"
] |
swan.blanc.pro@gmail.com
|
781f1bed425ed743952b93b27a6dea0e2e1a1bad
|
a78e2aa069c38bb197a023df179d0c7e3f4c8469
|
/Button.py
|
8dd397c672b7d2b5da43f5e93177746ba2b64e14
|
[] |
no_license
|
KouhouMohamed/pythonProject
|
74f6fa1051e109538d77904cc8a54da2c0a8d8ac
|
662e10887bc4272e28a56bc43e953cf1492da5e8
|
refs/heads/master
| 2023-01-06T04:18:20.244340
| 2020-11-07T09:47:09
| 2020-11-07T09:47:09
| 310,811,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
from tkinter import *
from tkinter import ttk #tkk is a class where buttons
def main():
root = Tk() #create a root ( a window)
but1 = ttk.Button(root, text="GetText")
Ent1 = ttk.Entry(root, width=30)
Ent1.pack()
but1.pack() # Add button to root
def ButtClick():
print(Ent1.get()) #get the contenant of Ent1
Ent1.delete(0,END) #clear the entery from begennin to end
but1.config(command=ButtClick)
Logo = PhotoImage(file='help.png')
Logo_r=Logo.subsample(10,10) #resize the image
Logo_r.zoom(15,20)
but1.config(image=Logo_r,compound=LEFT)
root.mainloop() #pour afficher root
if __name__ == '__main__':main()
|
[
"m.kouhou-etu@enset-media.ac.ma"
] |
m.kouhou-etu@enset-media.ac.ma
|
df952844481362845f3f8fd712d4e353b5c9b969
|
cbe4c2c2d163d2e5c611a77258ec1eb2e92b6479
|
/api/migrations/0006_auto__add_field_configset_delta_name__del_unique_configset_hwtype_id_c.py
|
ca28f0e726f18e3e7cf855cddfb86694ed061bf4
|
[] |
no_license
|
radhakrishnaa/DCP
|
20bcd6ce8143b5011310c42be858d139fb0cfa7a
|
c7970393811ef6686aafa4a49b96115b05ac86b6
|
refs/heads/main
| 2023-08-14T03:47:14.841160
| 2021-09-13T14:40:50
| 2021-09-13T14:40:50
| 406,011,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,862
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'ConfigSet', fields ['hwtype_id', 'category_id', 'region_id', 'carrier_id']
db.delete_unique('config_set', ['hwtype_id', 'category_id', 'region_id', 'carrier_id'])
# Adding field 'ConfigSet.delta_name'
db.add_column('config_set', 'delta_name',
self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True),
keep_default=False)
# Adding unique constraint on 'ConfigSet', fields ['hwtype_id', 'delta_name', 'category_id', 'region_id', 'carrier_id']
db.create_unique('config_set', ['hwtype_id', 'delta_name', 'category_id', 'region_id', 'carrier_id'])
def backwards(self, orm):
# Removing unique constraint on 'ConfigSet', fields ['hwtype_id', 'delta_name', 'category_id', 'region_id', 'carrier_id']
db.delete_unique('config_set', ['hwtype_id', 'delta_name', 'category_id', 'region_id', 'carrier_id'])
# Deleting field 'ConfigSet.delta_name'
db.delete_column('config_set', 'delta_name')
# Adding unique constraint on 'ConfigSet', fields ['hwtype_id', 'category_id', 'region_id', 'carrier_id']
db.create_unique('config_set', ['hwtype_id', 'category_id', 'region_id', 'carrier_id'])
models = {
u'api.carrier': {
'Meta': {'ordering': "['code']", 'object_name': 'Carrier', 'db_table': "'carrier'"},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'old_code': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
},
u'api.cloudenv': {
'Meta': {'ordering': "['order', 'short_name']", 'object_name': 'CloudEnv', 'db_table': "'cloud_env'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'env_type': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
u'api.configset': {
'Meta': {'ordering': "['category_id', 'hwtype_id', 'carrier_id', 'region_id']", 'unique_together': "(('category_id', 'hwtype_id', 'carrier_id', 'region_id', 'delta_name'),)", 'object_name': 'ConfigSet', 'db_table': "'config_set'"},
'carrier_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Carrier']", 'null': 'True', 'db_column': "'carrier_id'", 'blank': 'True'}),
'category_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.SettingCategory']", 'db_column': "'category_id'"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'delta_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'fallback_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.ConfigSet']", 'null': 'True', 'db_column': "'fallback_id'", 'blank': 'True'}),
'hwtype_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Hwtype']", 'null': 'True', 'db_column': "'hwtype_id'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Region']", 'null': 'True', 'db_column': "'region_id'", 'blank': 'True'})
},
u'api.configsetting': {
'Meta': {'object_name': 'ConfigSetting', 'db_table': "'config_setting'"},
'config_version_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.ConfigVersion']", 'db_column': "'config_version_id'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'setting_value_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.SettingValue']", 'db_column': "'setting_value_id'"})
},
u'api.configversion': {
'Meta': {'object_name': 'ConfigVersion', 'db_table': "'config_version'"},
'approved': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'approver_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'config_version_approver'", 'null': 'True', 'db_column': "'approver_id'", 'to': u"orm['api.User']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'committed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'committer_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'config_version_committer'", 'null': 'True', 'db_column': "'committer_id'", 'to': u"orm['api.User']"}),
'config_set_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.ConfigSet']", 'db_column': "'config_set_id'"}),
'fallback_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.ConfigVersion']", 'null': 'True', 'db_column': "'fallback_id'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_editor_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'config_version_last_editor'", 'null': 'True', 'db_column': "'last_editor_id'", 'to': u"orm['api.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publisher_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'config_version_publisher'", 'null': 'True', 'db_column': "'publisher_id'", 'to': u"orm['api.User']"}),
'setting_value': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['api.SettingValue']", 'null': 'True', 'through': u"orm['api.ConfigSetting']", 'blank': 'True'}),
'version_number': ('django.db.models.fields.IntegerField', [], {})
},
u'api.envtransform': {
'Meta': {'ordering': "['order', 'env_pat']", 'object_name': 'EnvTransform', 'db_table': "'env_transform'"},
'carrier_region_pat': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'env_pat': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hwtype_pat': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'setting_name_pat': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'value_pat': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'value_sub': ('django.db.models.fields.CharField', [], {'max_length': '8000', 'null': 'True', 'blank': 'True'})
},
u'api.hwtype': {
'Meta': {'ordering': "['code']", 'object_name': 'Hwtype', 'db_table': "'hwtype'"},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'marketing_name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'model_number': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
},
u'api.region': {
'Meta': {'ordering': "['code']", 'object_name': 'Region', 'db_table': "'region'"},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
u'api.settingcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'SettingCategory', 'db_table': "'setting_category'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'})
},
u'api.settingdef': {
'Meta': {'ordering': "['group', 'order', 'display_name']", 'object_name': 'SettingDef', 'db_table': "'setting_def'"},
'category_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.SettingCategory']", 'db_column': "'category_id'"}),
'datatype': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'rules': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'short_help': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'})
},
u'api.settingvalue': {
'Meta': {'object_name': 'SettingValue', 'db_table': "'setting_value'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'setting_def_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.SettingDef']", 'db_column': "'setting_def_id'"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '8000', 'null': 'True', 'blank': 'True'})
},
u'api.user': {
'Meta': {'ordering': "['username']", 'object_name': 'User', 'db_table': "'user'"},
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'approver': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
}
}
complete_apps = ['api']
|
[
"rtalluri@motorola.com"
] |
rtalluri@motorola.com
|
1d6007a5ebcba5fca71c8d3808860c34ac1f9ede
|
0f0f8b3b027f412930ca1890b0666538358a2807
|
/dotop/addons/base/ir/ir_filters.py
|
7e792068539ec5262791dfa23e1034b0a6500c7e
|
[] |
no_license
|
konsoar/dotop_pos_v11
|
741bd5ca944dfd52eb886cab6f4b17b6d646e131
|
576c860917edd25661a72726d0729c769977f39a
|
refs/heads/master
| 2021-09-06T13:25:34.783729
| 2018-02-07T02:11:12
| 2018-02-07T02:11:12
| 111,168,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,584
|
py
|
# -*- coding: utf-8 -*-
# Part of dotop. See LICENSE file for full copyright and licensing details.
import ast
from dotop import api, fields, models, _
from dotop.exceptions import UserError
class IrFilters(models.Model):
_name = 'ir.filters'
_description = 'Filters'
_order = 'model_id, name, id desc'
name = fields.Char(string='Filter Name', translate=True, required=True)
user_id = fields.Many2one('res.users', string='User', ondelete='cascade', default=lambda self: self._uid,
help="The user this filter is private to. When left empty the filter is public "
"and available to all users.")
domain = fields.Text(default='[]', required=True)
context = fields.Text(default='{}', required=True)
sort = fields.Text(default='[]', required=True)
model_id = fields.Selection(selection='_list_all_models', string='Model', required=True)
is_default = fields.Boolean(string='Default filter')
action_id = fields.Many2one('ir.actions.actions', string='Action', ondelete='cascade',
help="The menu action this filter applies to. "
"When left empty the filter applies to all menus "
"for this model.")
active = fields.Boolean(default=True)
@api.model
def _list_all_models(self):
self._cr.execute("SELECT model, name FROM ir_model ORDER BY name")
return self._cr.fetchall()
@api.multi
def copy(self, default=None):
self.ensure_one()
default = dict(default or {}, name=_('%s (copy)') % self.name)
return super(IrFilters, self).copy(default)
@api.multi
def _get_eval_domain(self):
self.ensure_one()
return ast.literal_eval(self.domain)
@api.model
def _get_action_domain(self, action_id=None):
"""Return a domain component for matching filters that are visible in the
same context (menu/view) as the given action."""
if action_id:
# filters specific to this menu + global ones
return [('action_id', 'in', [action_id, False])]
# only global ones
return [('action_id', '=', False)]
@api.model
def get_filters(self, model, action_id=None):
"""Obtain the list of filters available for the user on the given model.
:param action_id: optional ID of action to restrict filters to this action
plus global filters. If missing only global filters are returned.
The action does not have to correspond to the model, it may only be
a contextual action.
:return: list of :meth:`~osv.read`-like dicts containing the
``name``, ``is_default``, ``domain``, ``user_id`` (m2o tuple),
``action_id`` (m2o tuple) and ``context`` of the matching ``ir.filters``.
"""
# available filters: private filters (user_id=uid) and public filters (uid=NULL),
# and filters for the action (action_id=action_id) or global (action_id=NULL)
action_domain = self._get_action_domain(action_id)
filters = self.search(action_domain + [('model_id', '=', model), ('user_id', 'in', [self._uid, False])])
user_context = self.env.user.context_get()
return filters.with_context(user_context).read(['name', 'is_default', 'domain', 'context', 'user_id', 'sort'])
@api.model
def _check_global_default(self, vals, matching_filters):
""" _check_global_default(dict, list(dict), dict) -> None
Checks if there is a global default for the model_id requested.
If there is, and the default is different than the record being written
(-> we're not updating the current global default), raise an error
to avoid users unknowingly overwriting existing global defaults (they
have to explicitly remove the current default before setting a new one)
This method should only be called if ``vals`` is trying to set
``is_default``
:raises dotop.exceptions.UserError: if there is an existing default and
we're not updating it
"""
domain = self._get_action_domain(vals.get('action_id'))
defaults = self.search(domain + [
('model_id', '=', vals['model_id']),
('user_id', '=', False),
('is_default', '=', True),
])
if not defaults:
return
if matching_filters and (matching_filters[0]['id'] == defaults.id):
return
raise UserError(_("There is already a shared filter set as default for %(model)s, delete or change it before setting a new default") % {'model': vals.get('model_id')})
@api.model
@api.returns('self', lambda value: value.id)
def create_or_replace(self, vals):
action_id = vals.get('action_id')
current_filters = self.get_filters(vals['model_id'], action_id)
matching_filters = [f for f in current_filters
if f['name'].lower() == vals['name'].lower()
# next line looks for matching user_ids (specific or global), i.e.
# f.user_id is False and vals.user_id is False or missing,
# or f.user_id.id == vals.user_id
if (f['user_id'] and f['user_id'][0]) == vals.get('user_id')]
if vals.get('is_default'):
if vals.get('user_id'):
# Setting new default: any other default that belongs to the user
# should be turned off
domain = self._get_action_domain(action_id)
defaults = self.search(domain + [
('model_id', '=', vals['model_id']),
('user_id', '=', vals['user_id']),
('is_default', '=', True),
])
if defaults:
defaults.write({'is_default': False})
else:
self._check_global_default(vals, matching_filters)
# When a filter exists for the same (name, model, user) triple, we simply
# replace its definition (considering action_id irrelevant here)
if matching_filters:
matching_filter = self.browse(matching_filters[0]['id'])
matching_filter.write(vals)
return matching_filter
return self.create(vals)
_sql_constraints = [
# Partial constraint, complemented by unique index (see below). Still
# useful to keep because it provides a proper error message when a
# violation occurs, as it shares the same prefix as the unique index.
('name_model_uid_unique', 'unique (name, model_id, user_id, action_id)', 'Filter names must be unique'),
]
@api.model_cr_context
def _auto_init(self):
result = super(IrFilters, self)._auto_init()
# Use unique index to implement unique constraint on the lowercase name (not possible using a constraint)
self._cr.execute("DROP INDEX IF EXISTS ir_filters_name_model_uid_unique_index") # drop old index w/o action
self._cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = 'ir_filters_name_model_uid_unique_action_index'")
if not self._cr.fetchone():
self._cr.execute("""CREATE UNIQUE INDEX "ir_filters_name_model_uid_unique_action_index" ON ir_filters
(lower(name), model_id, COALESCE(user_id,-1), COALESCE(action_id,-1))""")
return result
|
[
"Administrator@20nuo003-PC"
] |
Administrator@20nuo003-PC
|
b2e9aef98ce8e65f58c90611607ae2f1481b8d51
|
3bbf917e4525d84f4c42752cda3b072d83fbd77d
|
/Labs/Persisting Data/MoviesItemOps06.py
|
88a449484d1f16322df20543e2fe93b17deb4983
|
[] |
no_license
|
renan-suetsugu/WorkshopPythonOnAWS
|
2fe891efe779802bdf497ce57c9a042886fbe3a2
|
0dee38d6cb24f5a33c5ac48409c6112fb57bab0a
|
refs/heads/main
| 2023-08-03T07:45:07.785458
| 2021-09-08T19:21:37
| 2021-09-08T19:21:37
| 400,149,268
| 0
| 0
| null | 2021-09-08T19:17:40
| 2021-08-26T11:47:11
|
Python
|
UTF-8
|
Python
| false
| false
| 993
|
py
|
import boto3
from botocore.exceptions import ClientError
import json
import decimal
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb.Table('Movies')
title = "The Big New Movie"
year = 2015
print("Tentando uma exclução condicional...")
try:
response = table.delete_item(
Key={
'year': year,
'title': title
},
)
except ClientError as e:
if e.response['Error']['Code'] == "ConditionalCheckFailedException":
print(e.response['Error']['Message'])
else:
raise
else:
print("Item deletado com sucesso:")
print(json.dumps(response, indent=4, cls=DecimalEncoder))
|
[
"noreply@github.com"
] |
noreply@github.com
|
1ff773919aec1f3c3dc117cc8f3db600db5c9e89
|
ad5b4790cf04b65f93729c56961d2feb3c6194cb
|
/tools/cpplint/setup.py
|
030ea14ef2092c83e99ebe0ecf65808f32aac0ba
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
BoogeeDoo/mt19937
|
5b795e1f7221ef5a331824e745dc89610ead1f7e
|
56f0f3f80cee8ec76d08c84a413b9dfc8928b8f7
|
refs/heads/master
| 2023-07-19T21:41:39.414715
| 2022-06-01T14:45:23
| 2022-06-01T14:45:23
| 117,514,446
| 4
| 1
|
MIT
| 2022-11-20T13:19:26
| 2018-01-15T07:53:45
|
C++
|
UTF-8
|
Python
| false
| false
| 2,955
|
py
|
#! /usr/bin/env python
from setuptools import setup, Command
from subprocess import check_call
from distutils.spawn import find_executable
import cpplint as cpplint
class Cmd(Command):
'''
Superclass for other commands to run via setup.py, declared in setup.cfg.
These commands will auto-install setup_requires in a temporary folder.
'''
user_options = [
('executable', 'e', 'The executable to use for the command')
]
def initialize_options(self):
self.executable = find_executable(self.executable)
def finalize_options(self):
pass
def execute(self, *k):
check_call((self.executable,) + k)
class Lint(Cmd):
'''run with python setup.py lint'''
description = 'Run linting of the code'
user_options = Cmd.user_options + [
('jobs', 'j', 'Use multiple processes to speed up the linting')
]
executable = 'pylint'
def run(self):
self.execute('cpplint.py')
# some pip versions bark on comments (e.g. on travis)
def read_without_comments(filename):
with open(filename) as f:
return [line for line in f.read().splitlines() if not len(line) == 0 and not line.startswith('#')]
test_required = read_without_comments('test-requirements')
setup(name='cpplint',
version=cpplint.__VERSION__,
py_modules=['cpplint'],
# generate platform specific start script
entry_points={
'console_scripts': [
'cpplint = cpplint:main'
]
},
install_requires=[],
url='https://github.com/cpplint/cpplint',
download_url='https://github.com/cpplint/cpplint',
keywords=['lint', 'python', 'c++'],
maintainer='cpplint Developers',
maintainer_email='see_github@nospam.com',
classifiers=['Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: C++',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Topic :: Software Development :: Quality Assurance',
'License :: Freely Distributable'],
description='Automated checker to ensure C++ files follow Google\'s style guide',
long_description=open('README.rst').read(),
license='BSD-3-Clause',
setup_requires=[
"pytest-runner"
],
tests_require=test_required,
# extras_require allow pip install .[dev]
extras_require={
'test': test_required,
'dev': read_without_comments('dev-requirements') + test_required
},
cmdclass={
'lint': Lint
})
|
[
"i@2333.moe"
] |
i@2333.moe
|
e4d288a30baec61e2e198b96b3163e0cf87504db
|
b62b673d9ade27f3e924f822d5b075e38ae28aa1
|
/tag-generator.py
|
0b3a2f6604bfbf6b8e487b1159ac54c2965410c0
|
[
"MIT"
] |
permissive
|
Bhupesh-V/Bhupesh-V.github.io
|
7cad5f3dac12ecab9613780713a18fd9fb466ac2
|
8efd2afe3a5e76df45caf796222a0e498e569ed6
|
refs/heads/master
| 2023-05-29T01:27:50.233594
| 2023-04-30T13:14:16
| 2023-04-30T13:14:16
| 182,211,988
| 3
| 2
|
MIT
| 2019-08-27T21:10:42
| 2019-04-19T06:13:58
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,349
|
py
|
#!/usr/bin/env python
'''
tag_generator.py
Copyright 2017 Long Qian
Contact: lqian8@jhu.edu
Source: https://github.com/qian256/qian256.github.io/blob/master/tag_generator.py
This script creates tags for your Jekyll blog hosted by Github page.
No plugins required.
'''
import glob
import os
post_dir = '_posts/'
tag_dir = 'tag/'
filenames = glob.glob(post_dir + '*md')
total_tags = []
for filename in filenames:
f = open(filename, 'r', encoding='utf8')
crawl = False
for line in f:
if crawl:
current_tags = line.strip().split()
if current_tags[0] == 'tags:':
total_tags.extend(current_tags[1:])
crawl = False
break
if line.strip() == '---':
if not crawl:
crawl = True
else:
crawl = False
break
f.close()
total_tags = set(total_tags)
old_tags = glob.glob(tag_dir + '*.md')
for tag in old_tags:
os.remove(tag)
if not os.path.exists(tag_dir):
os.makedirs(tag_dir)
for tag in total_tags:
tag_filename = tag_dir + tag + '.md'
f = open(tag_filename, 'a')
write_str = '---\nlayout: tagpage\ntitle: \"Tag: ' + tag + '\"\ntag: ' + tag + '\nrobots: noindex\n---\n'
f.write(write_str)
f.close()
print("Tags generated, count", total_tags.__len__())
|
[
"varshneybhupesh@gmail.com"
] |
varshneybhupesh@gmail.com
|
6ae2804678615a3a1654175705d975799f861089
|
0b67530ca1ed53251c343b38332ea7f61c18c1c5
|
/cmd123.py
|
84d2c5a452f8cf1d8fddb2175c5cc96c0b61a40f
|
[] |
no_license
|
NikhilChaudhari11/nik1
|
c4b27d305956e1560d333f962e0ee8db7760fa1a
|
fc129980c6f484e0c6f797fa3d20c4c0e095ef74
|
refs/heads/master
| 2020-12-11T09:07:47.073064
| 2018-01-12T17:21:49
| 2018-01-12T17:21:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
import re
import time
import os
def cmd1(line):
line = re.split("\n+", line)
def batch1(line):
list11 = []
for i in line:
#print(i)
if(type(i) != 'None' and len(i) != 0):
#print(i.split()[0][0])
e = i.split()[0][0]
if( e != '#'):
i=i.lstrip().rstrip()
list11.append(i)
return(list11)
list2 = batch1(line)
for i in list2:
time.sleep(0.15)
os.popen(i)
time.sleep(0.15)
|
[
"nikhilc11@gmail.com"
] |
nikhilc11@gmail.com
|
1a7193f2ab76638143eedaff1d1f49fa6a2291d0
|
b0504df295f3738827184f1aed86b48c0303e7ca
|
/data/pdbbind/example_conjoint_pdb/ecfp-pdb-refined2019-pocket.py
|
7e4cc1038cb051f05001a762b7f2bed7a465b735
|
[] |
no_license
|
jank3/AlogP-DL
|
3c47098cff36551518eb23629ceca809b4e366ea
|
e4029cc76dce5b196c4eebe4a66a560b7fe14e0c
|
refs/heads/master
| 2023-03-18T17:47:40.055718
| 2021-02-28T07:49:46
| 2021-02-28T07:49:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
import pandas as pd
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import MACCSkeys
ligand = Chem.MolFromPDBFile('11gs_ligand.pdb')
pocketr1 = Chem.MolFromPDBFile('11gs_pocket_clean.pdb')
featureL=AllChem.GetMorganFingerprintAsBitVect(ligand,2,nBits = 1024)
featureL1=AllChem.GetMorganFingerprintAsBitVect(pocketr1, 2,nBits = 1024)
features=[]
features=['11gs']
features.extend(featureL.ToBitString())
features.extend(featureL1.ToBitString())
with open('ecfp-pocket-refined2019.txt', 'a') as f:
f.write(','.join([str(x) for x in features]))
f.write('\n')
exit()
|
[
"xlxsdu@163.com"
] |
xlxsdu@163.com
|
ddcaf6e28b533963df17ac8f9f13f4ce3c77631f
|
1581f1d66d6835b2c271295e3251c2dde239fec8
|
/payment_gateway/pg_utils.py
|
6036c701e7036016bef878326b20e168433fab8a
|
[] |
no_license
|
abinash-kumar/pythod
|
527659e3bdd161f9abcaaa9182dfe58044b3ff66
|
1469dc0cd9d6d72b2fe2e69f99542e470bea807b
|
refs/heads/master
| 2023-01-30T02:54:10.729606
| 2020-02-24T07:18:51
| 2020-02-24T07:18:51
| 242,670,715
| 0
| 0
| null | 2023-01-25T13:57:52
| 2020-02-24T07:16:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,318
|
py
|
from motor_product import prod_utils as mpu
from health_product import prod_utils as hpu
HEALTH_INSURER_SLUG = {
'the-oriental-insurance-company-ltd': 'oriental'
}
def resolve_utils(transaction):
if transaction.product_type == 'motor':
return mpu
elif transaction.product_type == 'health':
return hpu
else:
return None
def process_payment_response(request, response, transaction):
if transaction.product_type == 'motor':
return mpu.process_payment_response(
request,
mpu.VEHICLE_TYPE_SLUG[transaction.vehicle_type],
get_insurer_slug(transaction),
response,
transaction.transaction_id
)
elif transaction.product_type == 'health':
return hpu.process_payment_response(
transaction.slab.health_product.insurer.id,
response,
transaction
)
else:
return None
def get_insurer_slug(transaction):
if transaction.product_type == 'motor':
return transaction.insurer.slug
elif transaction.product_type == 'health':
return HEALTH_INSURER_SLUG[transaction.slab.health_product.insurer.slug]
else:
return None
def get_error_url(transaction):
if transaction.product_type == 'motor':
vehicle_type = mpu.VEHICLE_TYPE_SLUG[transaction.vehicle_type]
return '/motor/' + vehicle_type + '/product/failure/'
elif transaction.product_type == 'health':
return '/health-plan/payment/transaction/%s/failure/' % transaction.transaction_id
else:
return None
def todict(obj, classkey=None):
if isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = todict(v, classkey)
return data
elif hasattr(obj, "_ast"):
return todict(obj._ast())
elif hasattr(obj, "__iter__"):
return [todict(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
data = dict([(key, todict(value, classkey))
for key, value in obj.__dict__.iteritems()
if not callable(value) and not key.startswith('_')])
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj
|
[
"abinashlv@AbinashSymboMac.local"
] |
abinashlv@AbinashSymboMac.local
|
8303bfe6a087932e19cea98165604264c4a08b9a
|
f1244dd9a531a40f61c57acf7a7f11f9b2c9cb1f
|
/3-2/CSE 3210 (Artificial Intelligence)/Lab 6/3. Triangle by star.py
|
2f99de4ed6a484bc04e642627c9f8dfd1a0cd4cf
|
[] |
no_license
|
SabirKhanAkash/RUET-Lab-Works
|
7e8be66e2d435108bed57b0335feb54d76ba23ef
|
3f094a5ca364d92ef42831e9f2dfb75c3baad506
|
refs/heads/master
| 2022-08-30T10:38:04.209187
| 2022-08-26T18:02:26
| 2022-08-26T18:02:26
| 240,317,974
| 2
| 3
| null | 2022-08-22T05:15:33
| 2020-02-13T17:17:38
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 226
|
py
|
def main():
n = int(input("Enter the value of n: "))
k = 2*n - 2
for i in range(0, n):
for j in range(0, k):
print(end=" ")
k = k - 1
for j in range(0, i+1):
print("* ", end="")
print("\r")
main()
|
[
"39434260+SabirKhanAkash@users.noreply.github.com"
] |
39434260+SabirKhanAkash@users.noreply.github.com
|
66f7f0ea804830c3090d2f78537d4b535a84b454
|
0bcfdf3ba3a0083a5254388bd8bd1d24bdb70e2a
|
/app/models.py
|
c44d7700e6af934c5cdb1ecf709c97b00c275fe9
|
[] |
no_license
|
Quinnan-Gill/microblog
|
1d216d6a6f49162080a13209e51d13c2c7169af7
|
c0b34a529b3434b1f29c139a90082c1b41b511ff
|
refs/heads/master
| 2022-12-10T15:29:17.055685
| 2019-09-12T03:01:58
| 2019-09-12T03:01:58
| 165,971,072
| 0
| 0
| null | 2022-12-08T01:33:25
| 2019-01-16T04:03:10
|
Python
|
UTF-8
|
Python
| false
| false
| 10,901
|
py
|
import json
import redis
import rq
import base64
import os
from time import time
from datetime import datetime, timedelta
from hashlib import md5
from time import time
from flask import current_app, url_for
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
from app import db, login
from app.search import add_to_index, remove_from_index, query_index
class SearchableMixin(object):
@classmethod
def search(cls, expression, page, per_page):
ids, total = query_index(cls.__tablename__, expression, page, per_page)
if total == 0:
return cls.query.filter_by(id=0), 0
when = []
for i in range(len(ids)):
when.append((ids[i], i))
return cls.query.filter(cls.id.in_(ids)).order_by(
db.case(when, value=cls.id)), total
@classmethod
def before_commit(cls, session):
session._changes = {
'add': list(session.new),
'update': list(session.dirty),
'delete': list(session.deleted)
}
@classmethod
def after_commit(cls, session):
for obj in session._changes['add']:
if isinstance(obj, SearchableMixin):
add_to_index(obj.__tablename__, obj)
for obj in session._changes['update']:
if isinstance(obj, SearchableMixin):
add_to_index(obj.__tablename__, obj)
for obj in session._changes['delete']:
if isinstance(obj, SearchableMixin):
remove_from_index(obj.__tablename__, obj)
@classmethod
def reindex(cls):
for obj in cls.query:
add_to_index(cls.__tablename__, obj)
class PaginatedAPIMixin(object):
@staticmethod
def to_collection_dict(query, page, per_page, endpoint, **kwargs):
resources = query.paginate(page, per_page, False)
data = {
'items': [item.to_dict() for item in resources.items],
'_meta': {
'page': page,
'per_page': per_page,
'total_page': resources.pages,
'total_items': resources.total
},
'_links': {
'self': url_for(endpoint, page=page, per_page=per_page,
**kwargs),
'next': url_for(endpoint, page=page + 1, per_page=per_page,
**kwargs) if resources.has_next else None,
'prev': url_for(endpoint, page=page - 1, per_page=per_page,
**kwargs) if resources.has_prev else None
}
}
return data
db.event.listen(db.session, 'before_commit', SearchableMixin.before_commit)
db.event.listen(db.session, 'after_commit', SearchableMixin.after_commit)
followers = db.Table(
'followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),
db.Column('followed_id', db.Integer, db.ForeignKey('user.id'))
)
class User(PaginatedAPIMixin, UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
posts = db.relationship('Post', backref='author', lazy='dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime, default=datetime.utcnow)
followed = db.relationship(
'User', secondary=followers,
primaryjoin=(followers.c.follower_id == id),
secondaryjoin=(followers.c.followed_id == id),
backref=db.backref('followers', lazy='dynamic'), lazy='dynamic')
messages_sent = db.relationship('Message', foreign_keys='Message.sender_id',
backref='author', lazy='dynamic')
messages_received = db.relationship('Message',
foreign_keys='Message.recipient_id',
backref='recipient', lazy='dynamic')
last_message_read_time = db.Column(db.DateTime)
notifications = db.relationship('Notification', backref='user',
lazy='dynamic')
tasks = db.relationship('Task', backref='user', lazy='dynamic')
token = db.Column(db.String(32), index=True, unique=True)
token_expiration = db.Column(db.DateTime)
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def avatar(self, size):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(
digest, size)
def follow(self, user):
if not self.is_following(user):
self.followed.append(user)
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)
def is_following(self, user):
return self.followed.filter(
followers.c.followed_id == user.id).count() > 0
def followed_posts(self):
followed = Post.query.join(
followers, (followers.c.followed_id == Post.user_id)).filter(
followers.c.follower_id == self.id)
own = Post.query.filter_by(user_id=self.id)
return followed.union(own).order_by(Post.timestamp.desc())
def get_reset_password_token(self, expires_in=600):
return jwt.encode(
{'reset_password': self.id, 'exp': time() + expires_in},
app.config['SECRET_KEY'], algorithm='HS256').decode('utf-8')
def new_messages(self):
last_read_time = self.last_message_read_time or datetime(1900, 1, 1)
return Message.query.filter_by(recipient=self).filter(
Message.timestamp > last_read_time).count()
def add_notification(self, name, data):
self.notifications.filter_by(name=name).delete()
n = Notification(name=name, payload_json=json.dumps(data), user=self)
db.session.add(n)
return n
def launch_task(self, name, description, *args, **kwargs):
rq_job = current_app.task_queue.enqueue('app.tasks.' + name, self.id,
*args, **kwargs)
task = Task(id=rq_job.get_id(), name=name, description=description,
user=self)
db.session.add(task)
return task
def get_tasks_in_progress(self):
return Task.query.filter_by(user=self, complete=False).all()
def get_task_in_progress(self, name):
return Task.query.filter_by(name=name, user=self,
complete=False).first()
def get_tasks_in_progress(self, name):
return Task.query.filter_by(name=name, user=self,
complete=False).first()
def to_dict(self, include_email=False):
data = {
'id': self.id,
'username': self.username,
'last_seen': self.last_seen.isoformat() + 'Z',
'about_me': self.about_me,
'post_count': self.posts.count(),
'follower_count': self.followers.count(),
'followed_count': self.followed.count(),
'_links': {
'self': url_for('api.get_user', id=self.id),
'followers': url_for('api.get_followers', id=self.id),
'followed': url_for('api.get_followed', id=self.id),
'avatar': self.avatar(128)
}
}
if include_email:
data['email'] = self.email
return data
def from_dict(self, data, new_user=False):
for field in ['username', 'email', 'about_me']:
if field in data:
setattr(self, field, data[field])
if new_user and 'password' in data:
self.set_password(data['password'])
def get_token(self, expires_in=3600):
now = datetime.utcnow()
if self.token and self.token_expiration > now + timedelta(seconds=60):
return self.token
self.token = base64.b64encode(os.urandom(24)).decode('utf-8')
self.token_expiration = now + timedelta(seconds=expires_in)
db.session.add(self)
return self.token
def revoke_token(self):
self.token_expiration = datetime.utcnow() - timedelta(seconds=1)
@staticmethod
def verify_reset_password_token(token):
try:
id = jwt.decode(token, app.config['SECRET_KEY'],
algorithms=['HS256'])['reset_password']
except:
return
return User.query.get(id)
@staticmethod
def check_token(token):
user = User.query.filter_by(token=token).first()
if user is None or user.token_expiration < datetime.utcnow():
return None
return user
@login.user_loader
def load_user(id):
return User.query.get(int(id))
class Post(SearchableMixin, db.Model):
__searchable__ = ['body']
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
language = db.Column(db.String(5))
def __repr__(self):
return '<Post {}>'.format(self.body)
class Message(db.Model):
id = db.Column(db.Integer, primary_key=True)
sender_id = db.Column(db.Integer, db.ForeignKey('user.id'))
recipient_id = db.Column(db.Integer, db.ForeignKey('user.id'))
body = db.Column(db.String(150))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<Message {}>'.format(self.body)
class Notification(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), index=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
timestamp = db.Column(db.Float, index=True, default=time)
payload_json = db.Column(db.Text)
def get_data(self):
return json.loads(str(self.payload_json))
class Task(db.Model):
id = db.Column(db.String(36), primary_key=True)
name = db.Column(db.String(128), index=True)
description = db.Column(db.String(128))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
complete = db.Column(db.Boolean, default=False)
def get_rq_job(self):
try:
rq_job = rq.job.Job.fetch(self.id, connection=current_app.redis)
except (redis.exceptions.RedisError, rq.exceptions.NoSuchJobError):
return None
return rq_job
def get_process(self):
job = self.get_rq_job()
return job.meta.get('progress', 0) if job is not None else 100
|
[
"quinnan.gill@gmail.com"
] |
quinnan.gill@gmail.com
|
79505b2c69220a3b4844e0e3ff6288faa3bd033b
|
006f73f4cc37dda59904a85d346186897f00834a
|
/sorteo/urls.py
|
3807308aa78b5b230eff8c9c0cdb1306d361e6ce
|
[] |
no_license
|
nathanbernal/sorteo_django
|
4ecd2cd85f5d2dcf7d0b825c5d6327ff080c3380
|
b8c4ea6b674b3cdff3cd5aed002222955c592a6e
|
refs/heads/main
| 2023-04-26T02:39:50.711438
| 2021-05-19T03:01:33
| 2021-05-19T03:01:33
| 368,727,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
from django.urls import include, path
from rest_framework import routers
from api import views
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'groups', views.GroupViewSet)
router.register(r'usuario', views.UsuarioViewSet)
router.register(r'activacion', views.ActivacionViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path('', include(router.urls)),
path('api-auth/{email}', include('rest_framework.urls', namespace='rest_framework')),
]
|
[
"nathanbernal@gmail.com"
] |
nathanbernal@gmail.com
|
0e188befbac224d8224dc6e6649007c2d0ccc5b5
|
8b1dcac39acfcee0f573dc71d608671dea2062a2
|
/tools/hikyuu/interactive/draw/__init__.py
|
fcdb11396c845625805c5eebb3c406cd9deb7ab1
|
[
"MIT"
] |
permissive
|
eightwind/hikyuu
|
4c876170b1e298105e7eaf9675b310ad378dd9a4
|
4dab98a93e2a9847f77d615d6900067fbf90b73d
|
refs/heads/master
| 2021-08-26T05:32:39.813080
| 2017-11-21T18:59:16
| 2017-11-21T18:59:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33
|
py
|
__version__ = "Only for pip dist"
|
[
"fasiondog@163.com"
] |
fasiondog@163.com
|
af415894f66167bbebd63ee550eeff6774fea102
|
c9837ea5229fce8a13dc28b8efe583e6b1f80f06
|
/tests/acceptance/test_async.py
|
13e29325278b60432ce81e30cf21f808f32fc48d
|
[
"MIT"
] |
permissive
|
appetito/procrastinate
|
51de7e4e7e216514c4c417e0d496fdf968332092
|
5e47d99ede5fafc5717765ebde3e2782b131672a
|
refs/heads/master
| 2022-09-18T19:49:24.958649
| 2020-06-03T08:03:19
| 2020-06-03T08:03:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
import pytest
import procrastinate
pytestmark = pytest.mark.asyncio
@pytest.fixture
def pg_app(pg_connector):
return procrastinate.App(connector=pg_connector)
async def test_defer(pg_app):
sum_results = []
product_results = []
@pg_app.task(queue="default", name="sum_task")
def sum_task(a, b):
sum_results.append(a + b)
@pg_app.task(queue="default", name="product_task")
async def product_task(a, b):
product_results.append(a * b)
await sum_task.defer_async(a=1, b=2)
await sum_task.configure().defer_async(a=3, b=4)
await pg_app.configure_task(name="sum_task").defer_async(a=5, b=6)
await product_task.defer_async(a=3, b=4)
await pg_app.run_worker_async(queues=["default"], wait=False)
assert sum_results == [3, 7, 11]
assert product_results == [12]
|
[
"joachim.jablon@people-doc.com"
] |
joachim.jablon@people-doc.com
|
1572ed7e2b86b6dc9bc339d9cf970e352a1bdfa1
|
f5a2059897f30a77244c0e8426f54ad5bf0db0e3
|
/resources/store.py
|
82c62a12820e1ed584f44fccfd130f9033054a21
|
[] |
no_license
|
colemanGH319/stores-rest-api
|
c25a01199af1cfcfb3a899f7d4752c382c8870a8
|
44004f0f64a6bcae4fdc80245cb524b4d7958024
|
refs/heads/master
| 2020-04-01T15:04:55.172168
| 2018-10-17T19:14:47
| 2018-10-17T19:14:47
| 153,320,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
from flask_restful import Resource
from models.store import StoreModel
class Store(Resource):
def get(self, name):
store = StoreModel.find_by_name(name)
if store:
return store.json()
return {'message': 'Store not found.'}, 404
def post(self, name):
if StoreModel.find_by_name(name):
return {'message': "A store with the name '{}' already exists.".format(name)}, 400
store = StoreModel(name)
try:
store.save_to_db()
except:
return {'message': 'An error occurred while creating the store.'}, 500
return store.json()
def delete(self, name):
store = StoreModel.find_by_name(name)
if store:
store.delete_from_db()
return {'message': 'Store deleted'}
class StoreList(Resource):
def get(self):
return {'stores': [store.json() for store in StoreModel.query.all()]}
|
[
"coleman.matt319@gmail.com"
] |
coleman.matt319@gmail.com
|
c2a7eee1f1f4756acddc4b286a978d0c08f441ef
|
968970ca6a39c6cdc02cf8a79280630afa5ebc4f
|
/src/main/python/countTravelTime.py
|
d6476dd199a1c139cd1a6e86d4e433554f0a391c
|
[] |
no_license
|
jdcc2/mbdtraffic
|
91a5bc4347062971057eb9ec27bc40601240117e
|
ad2167cf5af63a0089f69d70f35340f68040b6ab
|
refs/heads/master
| 2021-01-20T01:04:19.137668
| 2017-01-25T13:37:06
| 2017-01-25T13:37:06
| 79,126,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,902
|
py
|
#!/usr/bin/env python
import sys
import os
import json
import pyspark
# add actual job
def doJob(rdd):
print('traffic job')
#Map column names to column indices
columns = ['measurementSiteReference','measurementSiteVersion','index','periodStart','periodEnd','numberOfIncompleteInputs','numberOfInputValuesused','minutesUsed','computationalMethod','standardDeviation','supplierCalculatedDataQuality','sCDQ_Low','sCDQ_SD','number_of_sCDQ','dataError','travelTimeType','avgVehicleFlow','avgVehicleSpeed','avgTravelTime','computationMethod','measurementEquipmentTypeUsed','measurementSiteName1','measurementSiteName2','measurementSiteNumberOfLanes', 'measurementSiteIdentification','measurementSide','accuracy','period','specificLane','specificVehicleCharacteristics','startLocatieForDisplayLat','startLocatieForDisplayLong','LocationCountryCode','LocationTableNumber','LocationTableVersion','alertCDirectionCoded','specificLocation','offsetDistance','LOC_TYPE','LOC_DES','ROADNUMBER','ROADNAME,FIRST_NAME,SECND_NAME','messageType','publicationTime','deducedNoTrafficMinutes','carriageway']
columnToIndex = {}
for index, column in enumerate(columns):
columnToIndex[column] = index
#print(columnToIndex)
#Filter rows with data errors
clean = rdd.map(lambda line: line.split(',')).filter(lambda row: len(row) > 18 and row[columnToIndex['dataError']] != '1')
#total = clean.count()
usable = clean.filter(lambda row: row[columnToIndex['avgTravelTime']] != '')
print("Row count with avgTravelTime: ", usable.count())
return usable
def main():
# parse arguments
in_dir, out_dir = sys.argv[1:]
conf = pyspark.SparkConf().setAppName("%s %s %s" % (os.path.basename(__file__), in_dir, out_dir))
sc = pyspark.SparkContext(conf=conf)
# invoke job and put into output directory
doJob(sc.textFile(in_dir)).saveAsTextFile(out_dir)
if __name__ == '__main__':
main()
|
[
"jd@leetbook"
] |
jd@leetbook
|
a96020623f1f41176402c5c4583499aab4707dc0
|
7620448f67684c814121a6b772a824b792e43b5f
|
/utilities/annotate_from_genomic_features.py
|
f8ed6df46a00f868b17132d811bd70ae0311144d
|
[
"Apache-2.0"
] |
permissive
|
Sisov/AlignQC
|
2c2dd952d0d864a8d84daa86260b8ac5e8d1d9eb
|
f0677876408371ced09ba15b586489b9139828f4
|
refs/heads/master
| 2021-01-11T07:52:07.209342
| 2016-09-02T19:19:11
| 2016-09-02T19:19:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,263
|
py
|
#!/usr/bin/python
import sys, argparse, gzip, re, os, inspect
#bring in the folder to the path for our utilities
pythonfolder_loc = "../pylib"
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe() ))[0],pythonfolder_loc)))
if cmd_subfolder not in sys.path:
sys.path.insert(0,cmd_subfolder)
from Bio.Format.GPD import GPDStream
from Bio.Range import merge_ranges, GenomicRange, subtract_ranges, BedArrayStream, sort_ranges
from Bio.Stream import MultiLocusStream
def main(args):
inf = None
chrlens = {}
chrbed = []
if re.search('\.gz$',args.chromosome_lengths):
inf = gzip.open(args.chromosome_lengths)
else:
inf = open(args.chromosome_lengths)
for line in inf:
f = line.rstrip().split("\t")
chrlens[f[0]] = f[1]
chrbed.append(GenomicRange(f[0],1,f[1]))
inf.close()
inf = None
exonbed = []
txbed = []
sys.stderr.write("Reading Exons\n")
if re.search('\.gz$',args.annotation_gpd):
inf = gzip.open(args.annotation_gpd)
else:
inf = open(args.annotation_gpd)
gs = GPDStream(inf)
for gpd in gs:
exonbed += [x.get_range() for x in gpd.exons]
txbed.append(gpd.get_range())
inf.close()
sys.stderr.write("Merging "+str(len(txbed))+" transcripts\n")
txbed = merge_ranges(txbed)
sys.stderr.write(str(len(txbed))+" transcripts after merging\n")
sys.stderr.write("Finding intergenic\n")
intergenicbed = subtract_ranges(chrbed,txbed)
sys.stderr.write("Found "+str(len(intergenicbed))+" intergenic regions\n")
intergenicbp = sum([x.length() for x in intergenicbed])
sys.stderr.write("Intergenic size: "+str(intergenicbp)+"\n")
sys.stderr.write("Merging "+str(len(exonbed))+" exons\n")
exonbed = merge_ranges(exonbed)
sys.stderr.write(str(len(exonbed))+" exons after merging\n")
sys.stderr.write("Finding introns\n")
intronbed = subtract_ranges(txbed,exonbed)
sys.stderr.write("Found "+str(len(intronbed))+" introns\n")
chrbp = sum([x.length() for x in chrbed])
sys.stderr.write("Genome size: "+str(chrbp)+"\n")
txbp = sum([x.length() for x in txbed])
sys.stderr.write("Tx size: "+str(txbp)+"\n")
exonbp = sum([x.length() for x in exonbed])
sys.stderr.write("Exon size: "+str(exonbp)+"\n")
intronbp = sum([x.length() for x in intronbed])
sys.stderr.write("Intron size: "+str(intronbp)+"\n")
#sys.stderr.write(str(txbp+intergenicbp)+"\n")
if args.output_beds:
if not os.path.exists(args.output_beds): os.makedirs(args.output_beds)
with open(args.output_beds+'/chrs.bed','w') as of1:
for rng in chrbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
with open(args.output_beds+'/exon.bed','w') as of1:
for rng in exonbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
with open(args.output_beds+'/intron.bed','w') as of1:
for rng in intronbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
with open(args.output_beds+'/intergenic.bed','w') as of1:
for rng in intergenicbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
with open(args.output_beds+'/tx.bed','w') as of1:
for rng in txbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n")
inf = None
if re.search('\.gz$',args.reads_gpd):
inf = gzip.open(args.reads_gpd)
else:
inf = open(args.reads_gpd)
reads = {}
gs = GPDStream(inf)
for gpd in gs:
reads[gpd.get_gene_name()] = {}
sys.stderr.write("Checking "+str(len(reads.keys()))+" Aligned Reads\n")
#now we know all features we can annotate reads
sys.stderr.write("Read through our reads and bed entries\n")
sys.stderr.write("Annotate exons\n")
exons = annotate_gpds(args,exonbed)
exonnames = set(exons.keys())
sys.stderr.write("Annotate intron\n")
intron = annotate_gpds(args,intronbed)
intronnames = set(intron.keys())
sys.stderr.write("Annotate intergenic\n")
intergenic = annotate_gpds(args,intergenicbed)
intergenicnames = set(intergenic.keys())
allnames = exonnames|intronnames|intergenicnames
sys.stderr.write(str(len(allnames))+" reads attributed to a feature\n")
vals = set(reads.keys())-allnames
if len(vals) > 0:
sys.stderr.write("WARNING unable to ascribe annotation to "+str(len(vals))+" reads\n")
donenames = set()
of = sys.stdout
if args.output:
if re.search('\.gz$',args.output):
of = gzip.open(args.output,'w')
else:
of = open(args.output,'w')
for name in allnames:
exonfrac = 0
intronfrac = 0
intergenicfrac = 0
readlen = 0
exoncount = 0
if name in exons:
exonfrac = float(exons[name][1])/float(exons[name][0])
readlen = exons[name][0]
exoncount = exons[name][2]
if name in intron:
intronfrac = float(intron[name][1])/float(intron[name][0])
readlen = intron[name][0]
exoncount = intron[name][2]
if name in intergenic:
intergenicfrac = float(intergenic[name][1])/float(intergenic[name][0])
readlen = intergenic[name][0]
exoncount = intergenic[name][2]
vals = {'exon':exonfrac,'intron':intronfrac,'intergenic':intergenicfrac}
type = None
if exonfrac >= 0.5:
type = 'exon'
elif intronfrac >= 0.5:
type = 'intron'
elif intergenicfrac >= 0.5:
type = 'intergenic'
else:
type = sorted(vals.keys(),key=lambda x: vals[x])[-1]
if vals[type] == 0:
sys.stderr.write("WARNING trouble setting type\n")
if not type: continue
of.write(name+"\t"+type+"\t"+str(exoncount)+"\t"+str(readlen)+"\n")
of.close()
def annotate_gpds(args,inputbed):
bas = BedArrayStream(sort_ranges(inputbed))
inf = None
if re.search('\.gz$',args.reads_gpd):
inf = gzip.open(args.reads_gpd)
else:
inf = open(args.args.reads_gpd)
gs = GPDStream(inf)
mls = MultiLocusStream([gs,bas])
results = {}
for es in mls:
[gpds,inbeds] = es.get_payload()
if len(gpds) == 0 or len(inbeds) == 0:
continue
v = annotate_inner(gpds,inbeds)
for res in v:
results[res[0]]=res[1:]
inf.close()
return results
def annotate_inner(gpds,inbeds):
results = []
for gpd in gpds:
orig = gpd.get_length()
tot = 0
for rng1 in [x.get_range() for x in gpd.exons]:
tot += sum([y.overlap_size(rng1) for y in inbeds])
if tot > 0:
results.append([gpd.get_gene_name(),orig,tot,gpd.get_exon_count()])
return results
def do_inputs():
parser = argparse.ArgumentParser(description="Assign genomic features to reads based on where they majority of the read lies. In the event of a tie prioritize exon over intron and intron over intergenic.",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('reads_gpd',help="reads gpd")
parser.add_argument('annotation_gpd',help="reference annotations gpd")
parser.add_argument('chromosome_lengths',help="reference lengths table")
parser.add_argument('--output_beds',help="save features")
parser.add_argument('-o','--output',help="output results")
args = parser.parse_args()
return args
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd.split()
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
args = do_inputs()
main(args)
|
[
"jason.weirather@gmail.com"
] |
jason.weirather@gmail.com
|
770f1a9f35b1bc5cf04b2acf5eb206b60f5e0aa8
|
f2851c0d6125fc93f6dcd9c731180484bcf3299e
|
/Simple_baidu_baike/baike_spider/test.py
|
62c20de996f7529ee4beed2a7108f60b418a465b
|
[] |
no_license
|
FrankYang3110/Simple_baidu_baike_spider
|
da0b7a3478486d3fd36d46f04c01536d1751ecb8
|
a54161f20f002615d0454da4f6c63359b083fdee
|
refs/heads/master
| 2020-05-03T02:09:59.214644
| 2019-04-12T03:05:26
| 2019-04-12T03:05:26
| 178,360,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 814
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# a = set()
# print(a is None)
# def get():
# return
# a = get()
# print(a is None)
from fake_useragent import UserAgent
import re
import requests
from urllib.parse import urljoin
from lxml import etree
headers = {'User-Agent': UserAgent().random}
url = 'https://baike.baidu.com/item/Python/407313?fr=aladdin'
r = requests.get(url, headers=headers)
r.encoding = r.apparent_encoding
base_url = r.url
html = r.text
tree = etree.HTML(html)
hrefs = tree.xpath('//a[contains(@href,"/item")]/@href')
# pattern = re.compile(r'href="(/item.*?)"')
# urls = re.findall(pattern, html)
# for url in urls:
# new_url = urljoin(base_url, url)
# print(new_url)
# href="/item/%E8%AE%A1%E7%AE%97%E6%9C%BA%E7%A8%8B%E5%BA%8F%E8%AE%BE%E8%AE%A1%E8%AF%AD%E8%A8%80/7073760"
|
[
"43460484+FrankYang3110@users.noreply.github.com"
] |
43460484+FrankYang3110@users.noreply.github.com
|
17fa82a9093701e46b8648bd51b5684c11c5f8c9
|
5d6365f4cc81272f8c481ee31f1111e8eca6dca5
|
/alipay/aop/api/domain/BizActionLogDTO.py
|
bdaee8dcf4791f2ea8f5f6ac64c0cb3184f154de
|
[
"Apache-2.0"
] |
permissive
|
barrybbb/alipay-sdk-python-all
|
9e99b56138e6ca9c0b236707c79899d396ac6f88
|
1b63620431d982d30d39ee0adc4b92463cbcee3c
|
refs/heads/master
| 2023-08-22T20:16:17.242701
| 2021-10-11T08:22:44
| 2021-10-11T08:22:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,378
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BizActionLogDTO(object):
def __init__(self):
self._amount = None
self._biz_budget_apply_code = None
self._biz_budget_id = None
self._biz_name = None
self._biz_type = None
self._biz_uk_id = None
self._gmt_create = None
self._gmt_modified = None
self._id = None
self._modify_type = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def biz_budget_apply_code(self):
return self._biz_budget_apply_code
@biz_budget_apply_code.setter
def biz_budget_apply_code(self, value):
self._biz_budget_apply_code = value
@property
def biz_budget_id(self):
return self._biz_budget_id
@biz_budget_id.setter
def biz_budget_id(self, value):
self._biz_budget_id = value
@property
def biz_name(self):
return self._biz_name
@biz_name.setter
def biz_name(self, value):
self._biz_name = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def biz_uk_id(self):
return self._biz_uk_id
@biz_uk_id.setter
def biz_uk_id(self, value):
self._biz_uk_id = value
@property
def gmt_create(self):
return self._gmt_create
@gmt_create.setter
def gmt_create(self, value):
self._gmt_create = value
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def modify_type(self):
return self._modify_type
@modify_type.setter
def modify_type(self, value):
self._modify_type = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.biz_budget_apply_code:
if hasattr(self.biz_budget_apply_code, 'to_alipay_dict'):
params['biz_budget_apply_code'] = self.biz_budget_apply_code.to_alipay_dict()
else:
params['biz_budget_apply_code'] = self.biz_budget_apply_code
if self.biz_budget_id:
if hasattr(self.biz_budget_id, 'to_alipay_dict'):
params['biz_budget_id'] = self.biz_budget_id.to_alipay_dict()
else:
params['biz_budget_id'] = self.biz_budget_id
if self.biz_name:
if hasattr(self.biz_name, 'to_alipay_dict'):
params['biz_name'] = self.biz_name.to_alipay_dict()
else:
params['biz_name'] = self.biz_name
if self.biz_type:
if hasattr(self.biz_type, 'to_alipay_dict'):
params['biz_type'] = self.biz_type.to_alipay_dict()
else:
params['biz_type'] = self.biz_type
if self.biz_uk_id:
if hasattr(self.biz_uk_id, 'to_alipay_dict'):
params['biz_uk_id'] = self.biz_uk_id.to_alipay_dict()
else:
params['biz_uk_id'] = self.biz_uk_id
if self.gmt_create:
if hasattr(self.gmt_create, 'to_alipay_dict'):
params['gmt_create'] = self.gmt_create.to_alipay_dict()
else:
params['gmt_create'] = self.gmt_create
if self.gmt_modified:
if hasattr(self.gmt_modified, 'to_alipay_dict'):
params['gmt_modified'] = self.gmt_modified.to_alipay_dict()
else:
params['gmt_modified'] = self.gmt_modified
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.modify_type:
if hasattr(self.modify_type, 'to_alipay_dict'):
params['modify_type'] = self.modify_type.to_alipay_dict()
else:
params['modify_type'] = self.modify_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BizActionLogDTO()
if 'amount' in d:
o.amount = d['amount']
if 'biz_budget_apply_code' in d:
o.biz_budget_apply_code = d['biz_budget_apply_code']
if 'biz_budget_id' in d:
o.biz_budget_id = d['biz_budget_id']
if 'biz_name' in d:
o.biz_name = d['biz_name']
if 'biz_type' in d:
o.biz_type = d['biz_type']
if 'biz_uk_id' in d:
o.biz_uk_id = d['biz_uk_id']
if 'gmt_create' in d:
o.gmt_create = d['gmt_create']
if 'gmt_modified' in d:
o.gmt_modified = d['gmt_modified']
if 'id' in d:
o.id = d['id']
if 'modify_type' in d:
o.modify_type = d['modify_type']
return o
|
[
"jiandong.jd@antfin.com"
] |
jiandong.jd@antfin.com
|
e5131ff29aa41698036707a61a86466d77e7d3b9
|
6c50bced6fb4474e4eb2e4f3c27a5ce38b0e6048
|
/manage.py
|
e1fbda688388d8db4449c6abeb1423356d40d79b
|
[] |
no_license
|
NMShihab/WebChatApp
|
0d5651fe38baccfee186e59e32c2c79de2bb39a4
|
2dda4e750c370e74bbfbc42dce02432268194d46
|
refs/heads/master
| 2023-02-01T22:57:53.738222
| 2020-12-15T17:09:14
| 2020-12-15T17:09:14
| 319,082,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ChatApi.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"nmshihabislam@gmail.com"
] |
nmshihabislam@gmail.com
|
f0558330618b47efd52ea7dae4624354fe0c32ac
|
89b45e528f3d495f1dd6f5bcdd1a38ff96870e25
|
/pyneng/exercises/09_functions/task_9_2.py
|
e2a25f74f4ea48dd6a5f51879221d1048f8a5c94
|
[] |
no_license
|
imatyukin/python
|
2ec6e712d4d988335fc815c7f8da049968cc1161
|
58e72e43c835fa96fb2e8e800fe1a370c7328a39
|
refs/heads/master
| 2023-07-21T13:00:31.433336
| 2022-08-24T13:34:32
| 2022-08-24T13:34:32
| 98,356,174
| 2
| 0
| null | 2023-07-16T02:31:48
| 2017-07-25T22:45:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,935
|
py
|
# -*- coding: utf-8 -*-
"""
Задание 9.2
Создать функцию generate_trunk_config, которая генерирует
конфигурацию для trunk-портов.
У функции должны быть такие параметры:
- intf_vlan_mapping: ожидает как аргумент словарь с соответствием интерфейс-VLANы
такого вида:
{'FastEthernet0/1': [10, 20],
'FastEthernet0/2': [11, 30],
'FastEthernet0/4': [17]}
- trunk_template: ожидает как аргумент шаблон конфигурации trunk-портов в виде
списка команд (список trunk_mode_template)
Функция должна возвращать список команд с конфигурацией на основе указанных портов
и шаблона trunk_mode_template. В конце строк в списке не должно быть символа
перевода строки.
Проверить работу функции на примере словаря trunk_config
и списка команд trunk_mode_template.
Если предыдущая проверка прошла успешно, проверить работу функции еще раз
на словаре trunk_config_2 и убедится, что в итоговом списке правильные номера
интерфейсов и вланов.
Пример итогового списка (перевод строки после каждого элемента сделан
для удобства чтения):
[
'interface FastEthernet0/1',
'switchport mode trunk',
'switchport trunk native vlan 999',
'switchport trunk allowed vlan 10,20,30',
'interface FastEthernet0/2',
'switchport mode trunk',
'switchport trunk native vlan 999',
'switchport trunk allowed vlan 11,30',
...]
Ограничение: Все задания надо выполнять используя только пройденные темы.
"""
from pprint import pprint
trunk_mode_template = [
"switchport mode trunk",
"switchport trunk native vlan 999",
"switchport trunk allowed vlan",
]
trunk_config = {
"FastEthernet0/1": [10, 20, 30],
"FastEthernet0/2": [11, 30],
"FastEthernet0/4": [17],
}
trunk_config_2 = {
"FastEthernet0/11": [120, 131],
"FastEthernet0/15": [111, 130],
"FastEthernet0/14": [117],
}
def generate_trunk_config(intf_vlan_mapping, trunk_template):
cfg = []
for intf, vlans in intf_vlan_mapping.items():
cfg.append("interface " + intf)
for s in trunk_template:
if s.endswith('allowed vlan'):
s = s + ' ' + str(vlans)[1:-1].replace(" ", "")
cfg.append(s)
return cfg
pprint(generate_trunk_config(trunk_config, trunk_mode_template))
pprint(generate_trunk_config(trunk_config_2, trunk_mode_template))
|
[
"i.matyukin@gmail.com"
] |
i.matyukin@gmail.com
|
265049dd5c7273612076608f805ee6f00e3f2430
|
82e0fb055637e3181c7b1c25b2c199213c130f1b
|
/Python/Funciones de Alto orden/Ejemplo4.py
|
eae624cd1e8e739a75e60ef4e2f55f361ea537d6
|
[] |
no_license
|
DangerousCode/DAM-2-Definitivo
|
ffd7d99a385e9d9a821887676ecd81d3e2e1ddfc
|
6fcaad2342a68a6005e062bdd8603b900dcdf147
|
refs/heads/master
| 2021-01-10T17:58:44.570045
| 2015-12-16T15:19:25
| 2015-12-16T15:19:25
| 47,215,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
__author__ = 'AlumnoT'
'''Funcion dada una lista de numeros y un numero cota superior,
queremos devolver aquellos elementos menores a dicha
cota'''
lista=list(range(-5,5))
'''1)Modificar la sintaxis anterior para que solo nos muestre los numeros negativos'''
print filter(lambda x:x<0,lista)
'''2)Crear funcion a la que le vamos a pasar una lista de los valores 0,1,2,3,4 y esa funcion
tiene que devolvernos una lista formada por el cuadrado del primer valor con el cubo del primer valor
(con todos los valores)'''
print map(lambda x:[x*x,x*x*x],[0,1,2,3,4])
'''3)Generar dos listas una con valores numericos del 0 al 5 y otra con tres cadenas cuando ejecutemos la funcion
queremos que nnos muestre la media de la lista que contiene los numeros y que las tres cadenas de la segunda lista
aparezcan como una sola frase'''
lista=list(range(0,6))
listacad=["hola","que","tal"]
print (reduce(lambda x,z:x+z,lista))/len(lista)
print reduce(lambda a,b:a+" "+b,listacad)
'''4)Se nos va a facilitar una lista y una tupla con numeros debemos realizar una funcion que sume cada numero de la lista
con el correspondiente numero de su misma posicion en la tupla todo ello usando map,reduce,filter, lambda'''
lis=[1,2,3]
tup=(3,2,1)
print map(lambda x,y:x+y,lis,tup)
|
[
"asantosq1@gmail.com"
] |
asantosq1@gmail.com
|
1989906ee223d14319cc93f1ef9c3f3bb7ce946e
|
addb8ac420db7328afd209639204b526edcf9a15
|
/W-Maze/Tabular-Q/env.py
|
4d3c5a70dc6ee69c0e79b15372fd8f60f18fcf08
|
[] |
no_license
|
logic2code/DelayResolvedRL
|
ee704c8d4e9b1df2867dbe0ac77ab112ee4c0d89
|
a291875417a0e52fe09294d7f78ef9b3c9045b9c
|
refs/heads/main
| 2023-07-14T04:39:42.115756
| 2021-08-13T06:57:31
| 2021-08-13T06:57:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,162
|
py
|
import numpy as np
from collections import deque
class Environment:
"""Initialize Environment"""
def __init__(self, seed, delay):
np.random.seed(seed)
self.breadth = 7
self.length = 11
self.state_space = np.empty([self.breadth, self.length], dtype='<U1')
'''Environment Configuration'''
self.state_space[:] = 'E'
self.state_space[0] = 'X'
self.state_space[1:4, self.length // 2 - 2] = 'X'
self.state_space[1:4, self.length // 2 + 2] = 'X'
self.state_space[0, self.length // 2 - 1:self.length // 2 + 2] = 'G'
self.state_space[self.breadth - 1, 0] = 'P'
'''Actions'''
self.actions = [0, 1, 2, 3] # UP, DOWN, LEFT, RIGHT
self.num_actions = len(self.actions)
self.turn_limit = 300
self.delay = delay
self.actions_in_buffer = deque(maxlen=self.delay)
self.fill_up_buffer()
self.delayed_action = 0
self.state = self.reset()
def reset(self):
x = np.random.randint(self.breadth)
y = 0
starting_state = [x, y]
self.state_space[x, y] = 'P'
self.fill_up_buffer()
return starting_state
def fill_up_buffer(self):
for _ in range(self.delay):
action = np.random.choice(self.num_actions)
self.actions_in_buffer.append(action)
def step(self, state, action):
done = False
player_position = state
reward = -1
"""UP"""
if action == 0:
if player_position[0] - 1 >= 0 and self.state_space[player_position[0] - 1, player_position[1]] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0] - 1, player_position[1]] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = [player_position[0] - 1, player_position[1]]
self.state_space[player_position[0] - 1, player_position[1]] = 'P'
"""DOWN"""
if action == 1:
if player_position[0] + 1 < self.breadth \
and self.state_space[player_position[0] + 1, player_position[1]] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0] + 1, player_position[1]] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = player_position[0] + 1, player_position[1]
self.state_space[player_position[0] + 1, player_position[1]] = 'P'
"""LEFT"""
if action == 2:
if player_position[1] - 1 >= 0 and self.state_space[player_position[0], player_position[1] - 1] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0], player_position[1] - 1] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = player_position[0], player_position[1] - 1
self.state_space[player_position[0], player_position[1] - 1] = 'P'
"""RIGHT"""
if action == 3:
if player_position[1] + 1 < self.length \
and self.state_space[player_position[0], player_position[1] + 1] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0], player_position[1] + 1] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = [player_position[0], player_position[1] + 1]
self.state_space[player_position[0], player_position[1] + 1] = 'P'
return self.state, reward, done
|
[
"noreply@github.com"
] |
noreply@github.com
|
1109d37bf0366a1327a89aea8da48513a50ab171
|
62ccd6d2d3e10a4587c8e35a98879840656afe6a
|
/Bike.py
|
30cd82c283c91fe6fe4cddc05843116def60d616
|
[] |
no_license
|
Swills2/python_OOp
|
b37bd5ee08f6f7e710219f1e6cb55f354f65bf5e
|
8642fb765f784412f9b9d085f19d5810eef7e941
|
refs/heads/master
| 2020-04-07T10:56:39.132877
| 2018-11-20T00:22:56
| 2018-11-20T00:22:56
| 158,306,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
class Bike:
def __init__(self, price, max_speed, miles = 0):
self.price = price
self.max_speed = []
self.miles = miles
def displayInfo(self, price, max_speed, miles):
print(price, max_speed, miles)
return self
def ride(self, miles):
print("Riding")
miles += 10
return self
def reverse(self, miles):
print("Reversing")
miles -= 5
if miles < 0:
miles = 0
return self
|
[
"swills0055@gmail.com"
] |
swills0055@gmail.com
|
8224ec2ea7bc83f7d68a0df94cbee6f1ccdee3ae
|
6fa6288bd21694bb144798d63b77a8e2924603e5
|
/DataStructures/arrays/codility/cheap_letter_deletion.py
|
3a1e1b0a156b1406efd8ebc982701ecdfd622bd2
|
[] |
no_license
|
akshatakulkarni98/ProblemSolving
|
649ecd47cec0a29ccff60edb60f3456bf982c4a1
|
6765dbbde41cfc5ee799193bbbdfb1565eb6a5f5
|
refs/heads/master
| 2023-01-03T19:03:49.249794
| 2020-10-27T06:28:02
| 2020-10-27T06:28:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,663
|
py
|
"""
You are given a string S. Deletion of the K-th letter of S costs C[K]. After deleting a letter, the costs of deleting other letters do not change. For example, for S =
"ab" and C = [1, 3], after deleting 'a', deletion of 'b' will still cost 3.
You want to delete some letters from S to obtain a string without two identical letters next to each other. What is the minimum total cost of deletions to achieve
such a string?
Write a function:
def solution(S, C)
that, given string S and array C of integers, both of length N, returns the minimum cost of all necessary deletions.
Examples:
1. Given S = "abccbd" and C = [0, 1, 2, 3, 4, 5], the function should return 2. You can delete the rst occurrence of 'c' to achieve "abcbd".
2. Given S = "aabbcc" and C = [1, 2, 1, 2, 1, 2], the function should return 3. By deleting all letters with a cost of 1, you can achieve string "abc".
3. Given S = "aaaa" and C = [3, 4, 5, 6], the function should return 12. You need to delete all but one letter 'a', and the lowest cost of deletions is 3+4+5=12.
4. Given S = "ababa" and C = [10, 5, 10, 5, 10], the function should return 0. There is no need to delete any letter.
Write an ecient algorithm for the following assumptions:
string S and array C have length equal to N;
N is an integer within the range [1..100,000];
string S consists only of lowercase letters ('a'−'z');
each element of array C is an integer within the range [0..1,000]
"""
def solution(S, C):
if not S or not C:
return -1
result=0
j=0
for i in range(1,len(S)):
if S[j]!=S[i]:
j=i
else:
min_value = min(C[i], C[j])
result+=min_value
if C[j] < C[i]:
j=i
return result
|
[
"noreply@github.com"
] |
noreply@github.com
|
f34674523abde90b1c1b6d237f4f30d76afc7788
|
ff9c5a10eea701b6b6be1ec7076b5dfab91b6a38
|
/ex08.py
|
6a748ab59f8096a77c05fccec2e922c387077bf3
|
[] |
no_license
|
CFEsau/learnpython
|
21707fc15bcb09098e83b116bd77203158a65353
|
0616bf36c1a0e3b81cb6d0f5edc70c142904ca65
|
refs/heads/master
| 2021-01-18T15:51:13.948611
| 2017-08-15T15:09:24
| 2017-08-15T15:09:24
| 100,386,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
#ex08: Printing, printing
formatter = "%r %r %r %r"
print formatter % (1, 2, 3, 4)
print formatter % ("one", "two", "three", "four")
print formatter % (True, False, False, True)
print formatter % (formatter, formatter, formatter, formatter)
print formatter % (
"I had this thing.",
"That you could type up right.",
"But it didn't sing.",
# The above line contains an apostrophe which affects the output
"So I said goodnight."
)
|
[
"c.esau@shef.ac.uk"
] |
c.esau@shef.ac.uk
|
ac9c7f15ea1547bd32a8c41e2f64470813bf0d52
|
70054615f56be28373b00c9df96544ec822be683
|
/res/scripts/client/gui/scaleform/daapi/view/meta/questswindowmeta.py
|
66a92293420cda94a63d878facfa96ffceb268d2
|
[] |
no_license
|
wanyancan/WOTDecompiled
|
c646ad700f5ec3fb81fb4e87862639ce0bdf0000
|
9ffb09007a61d723cdb28549e15db39c34c0ea1e
|
refs/heads/master
| 2020-04-17T23:13:15.649069
| 2013-11-15T16:37:10
| 2013-11-15T16:37:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
from gui.Scaleform.framework.entities.DAAPIModule import DAAPIModule
class QuestsWindowMeta(DAAPIModule):
pass
|
[
"james.sweet88@googlemail.com"
] |
james.sweet88@googlemail.com
|
f602f55691918872b41f72f9e122627a0f538a6e
|
931841bd1de963e0dcfcf69114cec8c8c3f17323
|
/search/avltree.py
|
943eeeade92e526f2ae671c999a55416ecfaff7b
|
[] |
no_license
|
sancheng/py-algos
|
6f2e1e700c60224963f77b01c706a399b9897c1d
|
1bb73d32a1cf4f95358df5d6e0aae2828098fd07
|
refs/heads/master
| 2021-08-21T21:07:33.199863
| 2017-11-29T03:10:26
| 2017-11-29T03:10:26
| 111,628,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,048
|
py
|
class Node(object):
def __init__(self,val,parent,isleft):
self.val = val
self.parent = parent
self.leftchild,self.rightchild=None,None
if parent is not None:
if isleft:
parent.setleftChild(self)
else:
parent.setrightChild(self)
def setleftChild(self,left):
self.leftchild = left
if left is not None:
left.parent = self
def setrightChild(self,right):
self.rightchild = right
if right is not None:
right.parent = self
class AvlTree(object):
def __init__(self,cmp_func,rootval):
self.cmp = cmp_func
self.root = Node(rootval,None,False)
def height(self, node):
if node is None:
return 0
else:
return 1 + max(self.height(node.leftchild),self.height(node.rightchild))
def insert(self,element):
n = self.root
#insert directly
inode = None
while True:
if self.cmp(element,n.val) < 0:
if n.leftchild is None:
inode = Node(element,n,True)
break
else:
n = n.leftchild
else:
if n.rightchild is None:
inode = Node(element,n,False)
break
else:
n = n.rightchild
#find unbalanced subtree
isleft_rotate = True
path_directs = []
while inode is not None:
lh = self.height(inode.leftchild)
rh = self.height(inode.rightchild)
hdiff = lh -rh
if hdiff > 1:
isleft_rotate = False
break
elif hdiff < -1:
break
if inode.parent is not None:
if inode == inode.parent.leftchild:
path_directs.append(0)
else:
path_directs.append(1)
inode = inode.parent
#rebalance
if inode is not None:
if path_directs[-2] == 1 and path_directs[-1] == 1:
self.left_rotate(inode.rightchild,inode)
elif path_directs[-2] == 0 and path_directs[-1] == 0:
self.right_rotate(inode.leftchild,inode)
elif path_directs[-2] == 1 and path_directs[-1] == 0:
self.left_right_rotate(inode.leftchild, inode)
elif path_directs[-2] == 0 and path_directs[-1] == 1:
self.right_left_rotate(inode.leftchild,inode)
def search(self,value):
n = self.root
while n is not None and n.val != value:
if n.val > value:
n = n.leftchild
else:
n = n.rightchild
return n is not None
def left_rotate(self,node,pnode):
pp = pnode.parent
if pp is None:
self.root = node
node.parent = None
else:
if pp.leftchild == pnode:
pp.setleftChild(node)
else:
pp.setrightChild(node)
pnode.setrightChild(node.leftchild)
node.setleftChild(pnode)
def right_rotate(self,node,pnode):
pp = pnode.parent
if pp is None:
self.root = node
node.parent = None
else:
if pp.leftchild == pnode:
pp.setleftChild(node)
else:
pp.setrightChild(node)
pnode.setleftChild(node.rightchild)
node.setrightChild(pnode)
def printTree(self):
self.printNode(self.root)
def printNode(self,node):
print node.val
if node.leftchild is not None:
self.printNode(node.leftchild)
if node.rightchild is not None:
self.printNode(node.rightchild)
#test right rotation
tree = AvlTree(lambda x,y:x-y,1)
tree.insert(5)
tree.insert(8)
tree.insert(10)
tree.insert(11)
tree.insert(12)
tree.printTree()
print tree.search(11)
print tree.search(7)
|
[
"sancheng@cisco.com"
] |
sancheng@cisco.com
|
0a7ff4211eaca98470e2742585ac72c1dbe492de
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02415/s303347384.py
|
f7612caa107b4023d41f174a9952151845dbb81a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41
|
py
|
word = input()
print(str.swapcase(word))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
46fd7987e76562876a9df13d571ec26da2089cf7
|
bcc90e2a3ef609caf24fa427061750cb7ed807ba
|
/Decorator/ConcreteComponent.py
|
5b6989308ff4c75e35df9b51e663518ce6ef0f15
|
[] |
no_license
|
vudt93/DesignPattern
|
9140eb16544b1a02da1f889f5713b499166e9046
|
3f21df6be2b46fd4f5648b6d30b450699faabcbf
|
refs/heads/master
| 2021-03-24T01:19:56.332113
| 2020-03-17T09:36:26
| 2020-03-17T09:36:26
| 247,502,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
from Decorator.Component import Component
class ConcreteComponent(Component):
def do_operation(self):
print("Operation")
|
[
"vu.do@cj.net"
] |
vu.do@cj.net
|
43730854b668cdc8e523b81d756b9615a915f5d5
|
2ff113af86a2cde69ccf114a98e3a2092f751993
|
/Aula18/B - Replacing Digits/b.py
|
12b37f10be0ef9aafdf7b37695fe1680e768444c
|
[] |
no_license
|
Math-Gomes/ProgramacaoCompetitiva
|
7874e6a3cbcfadb7d4c2366f178d69b02909daf8
|
4ce79f5cb564ba7e07fdcee0995aa476b883c7a3
|
refs/heads/master
| 2023-08-13T10:26:43.481381
| 2021-09-22T12:46:07
| 2021-09-22T12:46:07
| 380,554,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
if __name__ == '__main__':
a = list(map(int, list(input())))
s = list(map(int, list(input())))
while True:
max_s = max(s)
min_a = min(a)
if max_s < min_a:
break
s.remove(max_s)
a[a.index(min_a)] = max_s
print(*a, sep = '')
|
[
"mathjvmf@gmail.com"
] |
mathjvmf@gmail.com
|
88881e340fd70a3969fd1822c2d1552ff989a8c7
|
c56fdac92304316ebe52796d619e5e72c564475b
|
/hw2/perceptron.py
|
f55a6de69ef7e861414fa307acc847777cedc155
|
[] |
no_license
|
RamisesM/Learning-From-Data
|
0cf043b0474992441d746c2aff8ef6c7134bc7e6
|
dde2677773fea2bff48899371a58cebda75449ae
|
refs/heads/master
| 2020-03-07T11:30:45.894709
| 2018-05-14T20:14:20
| 2018-05-14T20:14:20
| 127,457,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,740
|
py
|
import random
import numpy
class Point:
def __init__(self):
self.c = 1
self.x = random.uniform(-1,1)
self.y = random.uniform(-1,1)
self.vec = (self.c, self.x, self.y)
class Function:
def __init__(self):
self.f = [0, 0, 0]
def randomize(self):
points = [Point(), Point()]
self.f[2] = -1
self.f[1] = (points[1].y - points[0].y)/(points[1].x - points[0].x)
self.f[0] = points[0].y - self.f[1]*points[0].x
def classify(self, point):
value = numpy.sign(numpy.inner(point.vec, self.f))
if value == 0:
value = -1
return value
def perceptron(target_function, hypothesis, data_set):
f_set = []
for point in data_set:
f_set += [target_function.classify(point)]
h_set = []
for point in data_set:
h_set += [hypothesis.classify(point)]
# misclassified points
misclassified_set = []
for index in range(len(data_set)):
if h_set[index] != f_set[index]:
misclassified_set += [index]
number_of_iterations = 0
while len(misclassified_set) != 0:
test_index = misclassified_set[random.randint(0, len(misclassified_set)-1)]
test_point = data_set[test_index]
hypothesis.f = [hypothesis.f[i] + f_set[test_index]*test_point.vec[i] for i in range(3)]
# updating h_set
h_set = []
for point in data_set:
h_set += [hypothesis.classify(point)]
# updating misclassified_set
misclassified_set = []
for index in range(len(data_set)):
if h_set[index] != f_set[index]:
misclassified_set += [index]
number_of_iterations += 1
return number_of_iterations
|
[
"ramises.martins@gmail.com"
] |
ramises.martins@gmail.com
|
2a132a7f304bf03097919aab6ebca25961224c39
|
4558f88bc7b48a692599aac4d2316201e6c95a02
|
/scud/plt/er_log/phil.py
|
0a245e28e09a42bb507e5403f76287bb172726c9
|
[] |
no_license
|
kroon-lab/scud
|
bb3f7dc05c1000c0816d1b458d1c74bd74413053
|
b55423edb4b0e33110cf96fbd3828f86166924c9
|
refs/heads/master
| 2020-03-18T06:49:26.989684
| 2019-05-01T14:56:14
| 2019-05-01T14:56:14
| 134,412,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
import libtbx.phil
from scud.general.phil_methods import init_command_line_phil
def phil_parse(args=None,log=None):
'''
Contains default parameters, will process commandline params and
changes them
'''
# Default parameters
master_phil = libtbx.phil.parse("""
er_log
{
input
.help = "Input files"
{
log = None
.type = path
.help = 'File name of PDB containing ensemble to be converted to supercell'
}
params
.help = "Control running"
{
title = None
.type = str
.help = 'Plot Title'
show = False
.type = bool
.help = 'show plot or not'
}
output
.help = "output files"
{
plot_out = plt.eps
.type = path
.help = 'Name of output plot'
}
}
""")
working_phil = init_command_line_phil(master_phil=master_phil,
args=args,
log=log)
return working_phil.extract()
|
[
"l.m.j.kroon-batenburg@uu.nl"
] |
l.m.j.kroon-batenburg@uu.nl
|
670e94a3bedc2fa474c3d44db8f5ae1bab732516
|
f3515dd05089b6491ccb4c28ac6654b7f86e77b0
|
/hw_4/final_submittal/cross_validation_script.py
|
11b87a12e232f801086c9ba118854eadc8e211eb
|
[] |
no_license
|
jb08/AI
|
f17409832c0af8710957f4b4d6c80d90aa06f198
|
8f05d6989174b3c6b76547c5370042038a54e78d
|
refs/heads/master
| 2021-01-01T04:12:10.496120
| 2016-06-09T16:40:58
| 2016-06-09T16:40:58
| 56,189,015
| 0
| 0
| null | 2016-06-08T17:06:55
| 2016-04-13T22:01:04
|
Python
|
UTF-8
|
Python
| false
| false
| 5,263
|
py
|
# Name: Megan Sinclair, David Williams, Jason Brown
# Date: 5/23/16
# All group members were present and contributing during all work on this project
#
# Note that there is a retrain function in this script. This function mirrors the training
# that is present in our bayes.py and bayesbest.py files. Mirroring it here was simply
# done to make our cross-validation easier, but bayesbest.py is still intended to be used
# by itself.
import bayes
import bayesbest
import os, time
def ten_fold():
pos_true = 0
pos_false = 0
neg_true = 0
neg_false = 0
best_pos_true = 0
best_pos_false = 0
best_neg_true = 0
best_neg_false = 0
bc = bayes.Bayes_Classifier()
bcc = bayesbest.Bayes_Classifier()
for i in range(10):
training,testing = single_fold(i)
retrain(bc,training, False)
retrain(bcc,training, True)
#print "\tDone training"
#print len(testing)
#print len(training)
#time.sleep(3)
ct = 1
for f in testing:
sTxt = bc.loadFile("movies_reviews/" + f)
bc_result = bc.classify(sTxt)
bcc_result = bcc.classify(sTxt)
#print "\tTested: " ,ct
ct += 1
if (f.startswith("movies-5")):
if bc_result == "positive":
pos_true += 1
else:
pos_false += 1
if bcc_result == "positive":
best_pos_true += 1
else:
best_pos_false += 1
elif (f.startswith("movies-1")):
if bc_result == "negative":
neg_true += 1
else:
neg_false += 1
if bcc_result == "negative":
best_neg_true += 1
else:
best_neg_false += 1
print "fold: ", i
print "\treg results: %d %d %d %d" % (pos_true, pos_false, neg_true, neg_false)
print "\tbest results: %d %d %d %d" % (best_pos_true, best_pos_false, best_neg_true, best_neg_false)
#precision
precision_positive = pos_true / float(pos_true + pos_false)
precision_negative = neg_true / float(neg_true + neg_false)
best_precision_positive = best_pos_true / float(best_pos_true + best_pos_false)
best_precision_negative = best_neg_true / float(best_neg_true + best_neg_false)
#recall
recall_positive = pos_true / float(pos_true + neg_false)
recall_negative = neg_true / float(neg_true + pos_false)
best_recall_positive = best_pos_true / float(best_pos_true + best_neg_false)
best_recall_negative = best_neg_true / float(best_neg_true + best_pos_false)
#f-measure
f_measure_positive = (2 * precision_positive * recall_positive) / float(precision_positive + recall_positive)
f_measure_negative = (2 * precision_negative * recall_negative) / float(precision_negative + recall_negative)
best_f_measure_positive = (2 * best_precision_positive * best_recall_positive) / float(best_precision_positive + best_recall_positive)
best_f_measure_negative = (2 * best_precision_negative * best_recall_negative) / float(best_precision_negative + best_recall_negative)
print "naive bayes classifier:"
print " precision_positive: %.3f" % precision_positive
print " precision_negative: %.3f"% precision_negative
print " recall_positive: %.3f" %recall_positive
print " recall_negative: %.3f" %recall_negative
print " f_measure_positive: %.3f" %f_measure_positive
print " f_measure_negative: %.3f" %f_measure_negative
print " "
print "naive bayes classifier (improved):"
print " precision_positive: %.3f" %best_precision_positive
print " precision_negative: %.3f" %best_precision_negative
print " recall_positive: %.3f" %best_recall_positive
print " recall_negative: %.3f" %best_recall_negative
print " f_measure_positive: %.3f" %best_f_measure_positive
print " f_measure_negative: %.3f" %best_f_measure_negative
def single_fold(start_val):
count = start_val%10 #10 fold validation
IFileList = []
for fFileObj in os.walk("movies_reviews/"):
IFileList = fFileObj[2]
break
training_set = []
testing_set = []
for f in IFileList:
#Training set
if(count == 9):
#print "count was: ", count, " ; append to testing_set"
testing_set.append(f)
count = 0
else:
#print "count was: ", count, " ; append to training_set"
training_set.append(f)
count+=1
return training_set,testing_set
def retrain(bc, training_set, is_best):
#For each file name, parse and determine if pos (5) or neg (1)
bc.positive = dict()
bc.negative = dict()
for f in training_set:
#Positive review, add words/frequencies to positive dictionary
if (f.startswith("movies-5")):
bc.dictionary = bc.positive
#Negative review, add words/frequencies to negative dictionary
elif (f.startswith("movies-1")):
bc.dictionary = bc.negative
else:
#print "error: file didn't start with movies-1 or movies-5"
continue
sTxt = bc.loadFile("movies_reviews/" + f)
token_list = bc.tokenize(sTxt)
#print "dictionary: ", dictionary
for word in token_list:
if (is_best):
word = word.lower()
#If word exists in dictionary already, increase frequency by 1
if word in bc.dictionary:
bc.dictionary[word] +=1
#Add word to dictionary with frequency of 1 if it did not already exist
else:
bc.dictionary[word] = 1
|
[
"jasonkingsley.brown@gmail.com"
] |
jasonkingsley.brown@gmail.com
|
b69ca6b786925c7020c263729f5d7bd1e74e3d05
|
35cf6fc79b8d6c335add8e55e0f4dca6f2816d1d
|
/Python_Study/第七模块学习/Day04/EdmureBlog/web/forms/base.py
|
ab198421829eb1b2c3ebc96a9c1743d571cc884e
|
[] |
no_license
|
KongChan1988/51CTO-Treasure
|
08b4ca412ad8a09d67c1ea79c7149f8573309ca4
|
edb2e4bd11d39ac24cd240f3e815a88361867621
|
refs/heads/master
| 2021-07-04T15:57:56.164446
| 2019-07-24T15:28:36
| 2019-07-24T15:28:36
| 97,453,749
| 5
| 8
| null | 2019-10-30T22:05:12
| 2017-07-17T08:34:59
|
Python
|
UTF-8
|
Python
| false
| false
| 208
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
class BaseForm(object):
def __init__(self, request, *args, **kwargs):
self.request = request
super(BaseForm, self).__init__(*args, **kwargs)
|
[
"wangwei_198811@163.com"
] |
wangwei_198811@163.com
|
451da3310b48ed6fd08983ee33fda5f2b27b92fd
|
24171ea136e2ec211792d1d7644cd5c945a6df35
|
/test/41.py
|
a05e622c42a995a2e87dd72731ed1c176dae0dc4
|
[] |
no_license
|
reidevries/codecoach
|
1329ab367dc8aa3f3dd76af0b7cbc975a7d67ccd
|
a6d8e3cf28a6d264b0aa6aa8a44cc315803954b2
|
refs/heads/master
| 2021-05-23T10:02:53.403344
| 2020-04-05T12:57:24
| 2020-04-05T12:57:24
| 253,233,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,001
|
py
|
#! /usr/bin/env python2.6
import re
import sys
import os
argv = len(sys.argv)
if argv != 2:
print "usage: ipfun.py <filename>"
sys.exit(1)
argo = sys.argv[1]
if (os.access(argo, os.R_OK) == 0):
print argo, "is not readable"
sys.exit(2)
InFile = open(argo, "r")
ipcheck = r"((([0-1]?[0-9]?[0-9])|(2[0-4][0-9])|(25[0-5]))\.(([0-1]?[0-9]?[0-9])|(2[0-4][0-9])|(25[0-5]))\.(([0-1]?[0-9]?[0-9])|(2[0-4][0-9])|(25[0-5]))\.(([0-1]?[0-9]?[0-9])|(2[0-4][0-9])|(25[0-5]))\:)"
for line in InFile:
validip = re.match(ipcheck, line)
line = line.split('\n')
line = line[0]
if validip:
line2 = line.split(':')
try :
port = int(line2[1])
except:
print line,"- Invalid Port Number"
else:
if ((port > 0) & (port < 32767)):
validport = 1
if port < 1024:
root = 1
print line,"- Valid (root privileges required)"
else:
root = 0
print line,"- Valid"
else:
print line,"- Invalid Port Number"
else:
print line,"- Invalid IP Address"
sys.exit(0)
|
[
"raeaw@localhost.localdomain"
] |
raeaw@localhost.localdomain
|
fa423cdd35927ebb9664b82df50cab4322eebe1f
|
9f75a1f7e1aa7c9e3bff6aeb261808d596b75fa5
|
/agent.py
|
8f2c88d0157d6805b784d73414cf0700d6839e53
|
[] |
no_license
|
butterkaffee/drlnd_project1
|
2609b97d4122683b25d2c22f452077d2ccde1f71
|
2843211006947f0598e5ba7c23f10e90e399d834
|
refs/heads/master
| 2020-05-31T06:42:36.942280
| 2019-07-08T17:25:56
| 2019-07-08T17:25:56
| 190,148,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,605
|
py
|
import numpy as np
import random
from collections import namedtuple, deque
from model import QNetwork, DuelingDQN
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 256 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
import random
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
num_atoms = 51
Vmin = -10
Vmax = 10
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# Get max predicted Q values (for next states) from target model
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions)
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
class PrioReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed, prob_alpha=0.6):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.prob_alpha = prob_alpha
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
self.priorities = np.zeros((buffer_size,), dtype=np.float32)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
class DuelingAgent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = DuelingDQN(state_size, action_size, seed).to(device)
self.qnetwork_target = DuelingDQN(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local.act(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# Get max predicted Q values (for next states) from target model
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
# Compute Q targets for current states
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Get expected Q values from local model
Q_expected = self.qnetwork_local(states).gather(1, actions)
# Compute loss
loss = F.mse_loss(Q_expected, Q_targets)
if random.uniform(0,1) > 0.99:
print(loss)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
|
[
"noreply@github.com"
] |
noreply@github.com
|
014373df5e1938c8b39fd5fcaacbbd0655dfd64d
|
4a53ae5afb11850196ac08763afc637a81ce1dbd
|
/turbo-entabulator/turbo_entabulator/utilities.py
|
2e1a8a52db3511f74cc1a59121cdb99fb9f55ff6
|
[] |
no_license
|
xinyli-cumulus/TE_update
|
5983309ef66f5316f56298fc3d7b1ef4fb0719d7
|
4e5b9299d3159bbcdd715c33cc08820ad2c8e3fb
|
refs/heads/master
| 2021-05-26T10:49:14.229467
| 2020-04-08T14:10:19
| 2020-04-08T14:10:19
| 254,102,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,936
|
py
|
#!/usr/bin/env python3
"""
Turbo-Entabulator utilities.
This file contains utilities used by the Turbo-Entabulator suite that don't
fall under the 'detections' or 'discovery' categories.
"""
# Copyright(c) 2018, 2019, 2020 Cumulus Networks, Inc
# John Fraizer <jfraizer@cumulusnetworks.com>
import json
import os
import random
import re
import sys
from turbo_entabulator.m_logger import logger
def check_dependencies(funcname, required, satisfied): # noqa
"""
Validate that list 'requirements' is a subset of list 'satisfied'.
:param funcname
:param required
:param satisfied
:return bool
"""
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
logger.debug("Checking dependencies: {} for function [{}]."
.format(required, funcname))
if not set(required).issubset(set(satisfied)):
missing = list(set(required).difference(set(satisfied)))
logger.debug("Required dependencies {} for [{}] have not been "
"satisfied!".format(missing, funcname))
return False
else:
logger.debug("Dependencies satisfied.")
return True
def expand_frr_ec(deprecated, satisfied, includes, problems, # noqa
regex_matches):
"""
Try to provide suggestions for ECs from FRR.
:param deprecated:
:param satisfied:
:param includes:
:param problems:
:param regex_matches:
:return:
"""
# Get function name (accesses private sys function, no better way)
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return satisfied, problems, {}
reqs = ['detect_log_sigs']
if not check_dependencies(name, reqs, satisfied):
return satisfied, problems, {}
if 'Uncategorized FRR Error' not in regex_matches:
logger.debug("No matches to look up! Skipping".format(name))
return satisfied, problems, {}
# variable initialization not needed
# db = {}
filename = includes + "/frr/ec.json"
if not os.path.isfile(filename):
logger.debug("Could not open {} .".format(filename))
problems.append('* * * TE CONFIG ERROR * * * Could not find {}! '
'Please verify that Turbo-Entabulator '
'is installed properly.'.format(filename))
return satisfied, problems, {}
logger.debug('Reading in {}...'.format(filename))
with open(filename) as fh:
db = json.load(fh)
fh.close()
# Dict to hold suggestions.
suggestions = []
count = 0
for match in regex_matches['Uncategorized FRR Error']:
_, ec = match.split(' ')
count = count + 1
if count > 1:
suggestions.append('-' * 76)
# Does FRR contain the expanded error description?
if ec in db:
suggestions.append(match + ':\t' + db[ec]['title'])
suggestions.append('Description:\t' + db[ec]['description'])
suggestions.append('Suggestion:\t' + db[ec]['suggestion'])
else:
suggestions.append(match + ':\t' + 'Unknown Error Code')
suggestions.append('Description:\t' + 'Not found in FRR error DB')
suggestions.append('Suggestion:\t' + 'Please File bug with FRR '
'team to add detail for ' +
match)
msg = ('FILE-A-BUG: [' + match + '] not found in FRR Error '
'Codes. Please file a bug with '
'FRR team to have error detail '
'added.')
problems.append(msg)
satisfied.append(name)
# Then, return:
return satisfied, problems, suggestions
def find_frr_path(deprecated, satisfied, support_path): # noqa
# Determine the ?.show_running file we need to parse.
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return (satisfied, None)
reqs = ['find_support_path']
if not check_dependencies(name, reqs, satisfied):
return (satisfied, None)
frr_files = ['frr.show_running', 'quagga.show_running',
'Quagga.show_running', 'zebra.config']
for F in frr_files:
filename = support_path + F
if os.path.isfile(filename):
logger.debug("Found {} .".format(filename))
satisfied.append(name)
return (satisfied, filename)
logger.debug("Unable to find ?.show_running file to parse FRR data!")
return (satisfied, None)
def find_ifquery_path(deprecated, satisfied, support_path): # noqa
# Determine the ifquery file we need to parse.
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return (satisfied, None)
reqs = ['find_support_path']
if not check_dependencies(name, reqs, satisfied):
return (satisfied, None)
ifquery_files = ['ifquery', 'ifquery-a']
for F in ifquery_files:
filename = support_path + F
if os.path.isfile(filename):
logger.debug("Found {} .".format(filename))
satisfied.append(name)
return (satisfied, filename)
logger.debug("Unable to find ifquery file to parse data!")
return (satisfied, None)
def find_support_path(deprecated, satisfied, CL): # noqa
# This function verifies that we can find "support/" or "Support/"
# in the cl_support directory that has been passed to the script. It will
# return the full path to the support directory or False.
# We're at the top of the food-chain here so, we have no dependencies.
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return (satisfied, None)
reqs = []
if not check_dependencies(name, reqs, satisfied):
return (satisfied, None)
# We need to verify that the cl_support directory we were passed is
# actually a directory.
if not os.path.isdir(CL):
logger.debug("{} is not a directory!".format(CL))
return (satisfied, None)
satisfied.append('CL')
support_paths = ['Support/', 'support/']
for P in support_paths:
support_path = CL + "/" + P
if os.path.isdir(support_path):
logger.debug("Found {} .".format(support_path))
satisfied.append(name)
return (satisfied, support_path)
else:
logger.debug("{} is not a directory!".format(support_path))
return (satisfied, None)
def generate_report(result, print_logs, print_suggestions): # noqa
"""
Generate human readable report.
"""
if not result:
logger.error("Results are empty! Shit's broke!")
exit(1)
# Common section dividers
section_start_divider = '='*76 + '\n'
section_end_divider = '='*76 + '\n\n'
# Generate the report
interested = ['Script Version', 'hostname', 'eth0_ip', 'uptime',
'cl_support', 'Command line', 'Reason', 'license',
'lsb-release', 'image-release', 'upgraded with apt-get',
'sysinfo', 'platform.detect', 'switch-architecture',
'vendor', 'model', 'cpld_version', 'onie_version', 'bios',
'service_tag', 'chipset', 'ports', 'capabilities', 'caveats',
'datasheet'
]
msg = "[Overview]".center(76, '=') + '\n'
for item in interested:
if 'discovered' in result and item in result['discovered']:
if 'sysinfo' in item:
for item2 in result['discovered'][item]:
msg = msg + ('{:>21}: {}\n'
.format(item2.upper(),
result['discovered'][item][item2]))
elif 'bios' in item:
msg = msg + ('{:>21}: ['.format('BIOS'))
for item2 in result['discovered'][item]:
msg = msg + (' {}: {} '
.format(item2,
result['discovered'][item][item2]))
msg = msg + ' ]\n'
else:
msg = msg + ('{:>21}: {}\n'.format(item.upper(),
result['discovered'][item]))
msg = msg + section_end_divider
# print problems
if 'problems' in result.keys():
msg = msg + "[Problems]".center(76, '=') + '\n'
for item in result['problems']:
msg = msg + item + '\n'
msg = msg + section_end_divider
# print warnings
if 'warnings' in result.keys():
msg = msg + "[Warnings]".center(76, '=') + '\n'
for item in result['warnings']:
msg = msg + item + '\n'
msg = msg + section_end_divider
# print info
if 'info' in result.keys():
msg = msg + "[Informational]".center(76, '=') + '\n'
for item in result['info']:
msg = msg + item + '\n'
msg = msg + section_end_divider
# print logs
if print_logs and 'logs' in result.keys():
if 'problems' in result['logs'].keys():
msg = msg + ('Logs of interest [Problems]:\n')
msg = msg + section_start_divider
for item in result['logs']['problems']:
msg = msg + item + '\n'
msg = msg + section_end_divider
if 'warnings' in result['logs'].keys():
msg = msg + ('Logs of interest [Warnings]:\n')
msg = msg + section_start_divider
for item in result['logs']['warnings']:
msg = msg + item + '\n'
msg = msg + section_end_divider
if 'info' in result['logs'].keys():
msg = msg + ('Logs of interest [Informational]:\n')
msg = msg + section_start_divider
for item in result['logs']['info']:
msg = msg + item + '\n'
msg = msg + section_end_divider
# print frr error codes
if print_suggestions and 'suggestions' in result.keys():
msg = msg + ('Expanded FRR Error Codes:\n')
msg = msg + section_start_divider
for item in result['suggestions']:
msg = msg + item + '\n'
msg = msg + section_start_divider
return msg
def glob_to_numbers(glob): # noqa
"""
Given a string containing single numbers and ranges, return a sorted
list of deduplicated integers.
glob - A string of digits and ranges
>>> glob_to_numbers('3-4,7,10-12,17,22,4001-4003,7777,8000-8004')
[3, 4, 7, 10, 11, 12, 17, 22, 4001, 4002, 4003, 7777, 8000, 8001, 8002,
8003, 8004]
"""
assert isinstance(glob, (str)), "glob={0}".format(glob)
# Using split(',') instead of the replacement could yield empty strings in
# the result.
glob_list = glob.replace(',', ' ').split()
numbers = set()
range_re = re.compile(r"""^(\d+)-(\d+)$""") # ex. 4-6
for x in glob_list:
if x.isdigit():
numbers.add(int(x))
else:
range_match = range_re.match(x)
if range_match is None:
# The substring is neither a digit nor a range.
print("Globs must consist of numbers or ranges, but {0} is "
"neither. We were given glob '{1}'.".format(x, glob))
return []
else:
min_range = int(range_match.group(1))
max_range = int(range_match.group(2))
if max_range >= min_range:
numbers.update(range(min_range, max_range + 1))
else:
# print("Glob \"{0}\" contains the invalid range \"{1}\"."
# .format(glob, x)) # ex. 6-4
return []
return sorted(numbers) # A sorted list
def ifname_expand_glob(ifname): # noqa
if not isinstance(ifname, (str)):
raise TypeError("This function takes a string and returns a list of "
"strings. type(ifname)={0}".format(type(ifname)))
return ifname_expand_glob_helper(ifname, [])
def ifname_expand_glob_helper(ifname, result): # noqa
""" This function is recursive. """
if ifname == '':
# Base case 1
return result
if not ifname_is_glob(ifname):
# Base case 2: non-globish input
result.append(ifname)
return result
# Get the first glob component. This could be a single name, like "bridge"
# or it could be a range with commas and hyphens. For example, given
# "swp1-7,9", get the entire string.
# Given "swp1-7,9,eth0", get "swp1-7,9,".
glob = ''
# Subinterface base and range?
m = (re.match(
r"""(?P<base>[a-zA-Z0-9-]+?\-?(?:\d+s)?\d+\.)(?P<glob>(?:0(?!\d)|[1-9]\d*)((,|-)\d+)+,?)""", # noqa
ifname)) # noqa
if m is None:
# Non-subinterface base and range?
m = (re.match(
r"""(?P<base>[a-zA-Z0-9-]+?\-?(?:\d+s)?)(?P<glob>(?:0(?!\d)|[1-9]\d*)((,|-)\d+)+,?)""", # noqa
ifname)) # noqa
if m is None:
m = re.match(r"""(?P<base>\S+?),""", ifname)
if m is not None:
# The input begins with a component that doesn't have a range.
# Ex: lo, bridge, peer-group, Bond-T, server02, etc.
glob = None
else:
raise ValueError("Couldn't parse '{0}'.".format(ifname))
# Append the expanded substring of interfaces to the result.
base = m.group('base')
assert not ifname_is_glob(base), "base = {0}".format(base)
if glob is None:
# Append a single interface name to the result.
result.append(base)
else:
# Append a multiple interface names to the result.
glob = m.group('glob').rstrip(',')
for number in glob_to_numbers(glob):
result.append('{0}{1}'.format(base, number))
# Recurse with the remaining input string.
return ifname_expand_glob_helper(ifname[len(m.group()):], result)
def ifname_is_glob(ifname): # noqa
assert isinstance(ifname, str), "ifname={0}".format(ifname)
# The empty string and strings with spaces are not globs.
if not ifname or ' ' in ifname:
return False
if re.search(r"""\S,\S""", ifname) is not None:
# Strings with comma-separated components are always a glob.
return True
# Strings with hyphens might be globs.
re_range = re.search(r"""(?<!-)(\d+)-(\d+)(,|$)""", ifname)
if re_range is not None:
start_range = re_range.group(1)
end_range = re_range.group(2)
if ((len(start_range) > 1 and start_range.startswith('0')) or
end_range.startswith('0')):
# Valid ranges do not contain lead zeros.
# '0' is not valid as the end range.
return False
if int(end_range) > int(start_range):
return True
return False
def test_check_dependencies(deprecated, satisfied): # noqa
# This function tests the check_dependencies function.
# This is a test list of satisfied modules.
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return(satisfied)
test = ['module1', 'module2']
# This is a list of reqs that should be satisfied by test.
should_pass = ['module1', 'module2']
# This is a list of reqs that should not be satisfied by test.
should_fail = ['module2', 'module3']
if not check_dependencies('TEST: should_pass', should_pass, test):
logger.error("ERROR! Function check_dependencies is broken! "
"False Negative")
exit(1)
if check_dependencies('TEST: should_fail', should_fail, test):
logger.error("ERROR! Function check_dependencies is broken! "
"False Positive")
exit(1)
satisfied.append(name)
return satisfied
def verify_path(path):
"""
Verify the normalized directory or file path exists.
:param path:
:return normalized path:
"""
path = os.path.abspath(os.path.expanduser(path))
# if path location does not exist, exit.
if not os.path.exists(path):
logger.error("Filesystem path {} invalid.".format(path))
exit(1)
else:
return path
def wisdom(deprecated, satisfied, info):
"""TE-WISDOM is just a fun little function that adds a one-liner."""
name = sys._getframe().f_code.co_name
logger.debug("This is {}().".format(name))
if name in deprecated:
logger.debug("[{}] is deprecated. Skipping".format(name))
return(satisfied, info)
reqs = ['find_support_path']
if not check_dependencies(name, reqs, satisfied):
return(satisfied, info)
wisdom = [
'This CL-SUPPORT Analysis is brought to you by Coors Light... '
'Taste the Rockies!',
'# rm -rf / ; reboot - Because its never too late to start again!',
'Nothing makes a person more productive than the LAST MINUTE!',
'I had my patience tested. I\'m negative.',
'Interviewer: "What do you make at your current job?" '
'ME: "Mostly mistakes!"',
'Dear Karma, I have a list of people you missed!!!',
'Don\'t forget to shout "JENGA" when everything falls apart...',
'Calories: Tiny creatures that live in your closet and sew your '
'clothes a little tighter every night.',
'A little bit goes a long way says the Big-Endian...',
'My backup plan is just my original plan - with more ALCOHOL!',
'Light travels faster than sound. This is why some people appear '
'bright until you hear them speak.',
'Silence is golden. Duct-tape is silver.',
'If at first, you don\'t succeed, skydiving is not for you!',
'My imaginary friend says that you need a therapist!',
'My neighbor\'s diary says that I have boundary issues...',
'I clapped because it\'s finished, not because I liked it.',
'What do you mean I\'m not in shape? Round is a shape!',
'I\'m smiling. That alone should scare you!',
'Common sense is a flower that doesn\'t grow in everyone\'s garden...',
'Your trial license for Turbo-Entabulator has expired. Generating '
'random false-positives.',
]
rand = random.randrange(0, len(wisdom))
info.append('TE-WISDOM: {}'.format(wisdom[rand]))
return(satisfied, info)
|
[
"noreply@github.com"
] |
noreply@github.com
|
b22afa174867cbcdb44387342cabbb4d1d5cce42
|
77a7f05272e82024cffa7ec3bf79b5cb5f90ee3e
|
/job_search_webapp_project/jobsearch/scraping/dice.py
|
e308bc35ab1f12a91bf963fe88d208574d75ae8f
|
[] |
no_license
|
fergusonsa/JobSearch_Django
|
5c3ff42cf59cd9380e5b10c3a2a64582382bbb55
|
35d626555b5dad8309358e3fde3c093a7df12702
|
refs/heads/master
| 2021-09-26T04:42:42.989940
| 2020-09-27T15:07:00
| 2020-09-27T15:07:00
| 85,593,500
| 0
| 0
| null | 2021-09-22T17:41:15
| 2017-03-20T15:26:32
|
HTML
|
UTF-8
|
Python
| false
| false
| 20,929
|
py
|
# coding: utf-8
import logging
import datetime
import re
import requests
from bs4 import BeautifulSoup
import geopy.geocoders
import geopy.distance
import geopy.exc
import jobsearch.scraping
import jobsearch.models as models
NUMBER_POSTINGS_PER_REQUEST = 25
MAX_POSTINGS_RETRIEVED = 1000
logger = logging.getLogger(__name__)
def get_max_len_of_dict_vals_for_keys(this_dict, keys):
lengths = [len(this_dict.get(key)) if this_dict.get(key) else 0
for key in keys]
if len(lengths) == 0:
lengths.append(0)
return max(lengths)
def get_max_len_of_dict_vals_for_key_in_list_of_dicts(dict_list, keys):
lengths = [get_max_len_of_dict_vals_for_keys(thisDict, keys)
for thisDict in dict_list]
if len(lengths) == 0:
lengths.append(0)
return max(lengths)
def convert_ago_to_date(ago_str):
try:
if ago_str.lower() in ('just posted', 'today'):
return datetime.datetime.now().strftime('%Y-%m-%d')
else:
value = re.sub('([0-9]+[+]?) (?:minute[s]?|hour[s]?|day[s]?) ago',
r"\1",
ago_str)
if 'minute' in ago_str:
dt = (datetime.datetime.now() - datetime.timedelta(
minutes=int(value))).strftime('%Y-%m-%d %H:%M')
elif 'hour' in ago_str:
dt = (datetime.datetime.now() - datetime.timedelta(
hours=int(value))).strftime('%Y-%m-%d %H:%M')
elif 'today' in ago_str.lower():
dt = datetime.datetime.now().strftime('%Y-%m-%d')
elif 'day' in ago_str:
if value == '30+':
dt = 'over 30 days old'
else:
dt = (datetime.datetime.now() - datetime.timedelta(
days=int(value))).strftime('%Y-%m-%d')
else:
dt = ago_str
except Exception as exc:
logger.error('Could not convert "{}" to a date'.format(ago_str), exc)
dt = ago_str
return dt
def parse_html_page(page_html, source, job_site_details, aliases, geo_locator, home_location, geo_locations,
search_terms='',
verbose=False):
"""
'numberJobsFound': {
'element':'div',
'criteria':{'id':'searchCount'},
'regex': '^Jobs (?:[0-9,]+) to (?:[0-9,]+) of ([0-9,]+)$',
},
"""
logger.debug(('parse_html_page(page_html, job_site_details={}, # aliases={}, geo_locator, home_location, '
'geo_locations, search_terms={}, verbose={})').format(job_site_details, len(aliases),
search_terms, verbose))
soup = BeautifulSoup(page_html, 'html.parser')
total_number_jobs_found = -1
num_jobs_details = job_site_details['parseInfo'].get('numberJobsFound')
if num_jobs_details:
number_postings_elem = soup.find(num_jobs_details['element'],
num_jobs_details['criteria'])
if number_postings_elem:
prop = num_jobs_details.get('property')
if prop:
value = number_postings_elem[prop]
elif hasattr(number_postings_elem, 'text'):
value = number_postings_elem.text
else:
value = number_postings_elem.string
if num_jobs_details.get('regex'):
value = re.sub(num_jobs_details['regex'], r"\1", value)
stripped_val = value.replace(',', '')
if stripped_val.isdigit():
total_number_jobs_found = int(stripped_val)
else:
logger.info(
'For %s site, the numberJobsFound parsing information, "%s", appears to return a non-numeric '
'string "%s" '
% (job_site_details['netLoc'], num_jobs_details['regex'], value))
total_number_jobs_found = 1 # Just to ensure that it is known that at least 1 job found
items = soup.findAll(job_site_details['parseInfo']['parentElement'],
job_site_details['parseInfo']['parentCriteria'])
postings_list = {}
for it in items:
posting_info = {'elem': it, 'searchTerms': search_terms}
for field in job_site_details['parseInfo']['fields'].keys():
field_info = job_site_details['parseInfo']['fields'][field]
# logger.info('looking for field {}'.format(field))
try:
value = None
elem_type = field_info['element']
if elem_type == 'parent':
elem = it
else:
elem = it.find(elem_type, field_info.get('criteria'))
prop = field_info.get('property')
if prop and elem.has_attr(prop):
value = elem[prop]
elif hasattr(elem, 'text'):
value = elem.text
elif elem:
value = elem.string
if field_info.get('regex'):
value = re.sub(field_info['regex'], r"\1", value)
if value:
posting_info[field] = re.sub(r"^\s+|\s+$|\s+(?=\s)", "", value)
except Exception as exc:
logger.error(('Unable to parse posting {} information for item: '
'\n\n{} \n\nError type: {}, val: {}').format(field, it,
type(exc), exc))
if posting_info.get('id'):
if posting_info.get('postedDate'):
posting_info['postedDate'] = convert_ago_to_date(
posting_info['postedDate'])
if posting_info.get('url'):
posting_info['url'] = 'http://{}{}'.format(
job_site_details['netLoc'], posting_info['url'])
if posting_info.get('elem'):
link_elements = posting_info['elem'].findAll('a')
for linkElem in link_elements:
if not linkElem['href'].startswith('http'):
if linkElem['href'].startswith('/'):
linkElem['href'] = 'http://{}{}'.format(
job_site_details['netLoc'], linkElem['href'])
else:
linkElem['href'] = 'http://{}/{}'.format(
job_site_details['netLoc'], linkElem['href'])
if posting_info.get('locale'):
posting_info['locale'] = posting_info['locale'].replace(' , ', ', ')
if jobsearch.scraping.save_posting_to_db(posting_info, source, search_terms, aliases,
geo_locator, home_location, geo_locations):
postings_list[posting_info['id']] = posting_info
if verbose:
logger.info(('Adding item details for id "{}" to list with posted'
' Date {}').format(posting_info['id'],
posting_info.get('postedDate')))
else:
logger.info('Unknown item not being added to list')
return postings_list, len(items), total_number_jobs_found
def sort_by_sub_dict(dictionary, sub_dict_key):
return sorted(dictionary.items(), key=lambda k_v: k_v[1][sub_dict_key])
def login_to_web_site(session, job_site_detail_info):
logger.debug('login_to_web_site(session, job_site_detail_info={})'.format(job_site_detail_info))
if job_site_detail_info.get('username') and job_site_detail_info.get('password'):
login_data = {
'action': 'Login',
'__email': job_site_detail_info['username'],
'__password': job_site_detail_info['password'],
'remember': '1',
'hl': 'en',
# 'continue': '/account/view?hl=en',
}
if job_site_detail_info['nextUrl']:
login_data['next'] = job_site_detail_info['nextUrl']
# res = session.get(job_site_detail_info['loginUrl'], verify=False)
res = session.post(job_site_detail_info['loginUrl'], data=login_data,
headers={"Referer": "HOMEPAGE"})
# if logger.getLogger().getEffectiveLevel() == logger.DEBUG:
logger.debug('session.post("{}", data={}) returns {}'.format(job_site_detail_info['loginUrl'],
login_data, res))
else:
logger.debug('Username "{}" or password "{}" is not set. Not logging in to website {}. Details: {}'.format(
job_site_detail_info.get('username'),
job_site_detail_info.get('password'),
job_site_detail_info.get('loginUrl'),
job_site_detail_info))
def get_postings_from_site_for_multiple_search_terms(source,
job_site_details_info,
search_terms_list,
aliases,
geo_locator,
home_location,
geo_locations,
expected_postings_per_page=10,
max_pages=100, min_pages=4,
verbose=False):
logger.debug(('get_postings_from_site_for_multiple_search_terms(job_site_details_info: {}, search_terms_list: {}, '
'# aliases: {}, expected_postings_per_page={}, geo_locator, home_location: {}, geo_locations,'
'max_pages={}, min_pages={}, verbose={})').format(job_site_details_info,
search_terms_list,
len(aliases),
home_location,
expected_postings_per_page,
max_pages,
min_pages, verbose))
session = requests.Session()
if job_site_details_info['urlSchema'] == 'https':
login_to_web_site(session, job_site_details_info)
for searchTerm in search_terms_list:
get_job_postings_from_site(
source, job_site_details_info, searchTerm, aliases,
geo_locator, home_location, geo_locations,
expected_postings_per_page=expected_postings_per_page,
max_pages=max_pages, min_pages=min_pages, session=session,
verbose=verbose)
def check_for_more_postings(num_postings_on_page, expected_postings_per_page,
num_unique_postings_found_on_page, num_postings_site_found,
start_index, max_pages, min_pages, verbose=False):
"""
Checks criteria for whether to check for more postings on the next page.
Args:
:param num_postings_on_page: the total number of postings found on the page
:param expected_postings_per_page: the number of postings expected to be on the page
:param num_unique_postings_found_on_page: the number of new/unique postings found on the page
:param num_postings_site_found: the total number of postings found on the site
:param start_index: the starting index for the page, should be a multiple of expectedPostingsPerPage
:param max_pages: the maximum number of pages to scrape
:param min_pages: the minimum number of pages to scrape
:param verbose:
"""
logger.debug(('check_for_more_postings(num_postings_on_page={}, expected_postings_per_page={}, '
'num_all_unique_postings_found_on_page={}, num_postings_site_found={}, '
'start_index={}, max_pages={}, min_pages={}, verbose={})').format(num_postings_on_page,
expected_postings_per_page,
num_unique_postings_found_on_page,
num_postings_site_found,
start_index, max_pages,
min_pages, verbose))
if start_index + expected_postings_per_page <= num_postings_site_found:
if num_postings_on_page == expected_postings_per_page:
if (num_unique_postings_found_on_page > 0 and
start_index < expected_postings_per_page * (max_pages - 1)):
return True
elif start_index < expected_postings_per_page * (min_pages - 1):
return True
if verbose:
logger.info(
'numPostingsOnPage ({0}) != expectedPostingsPerPage ({1}) OR numAllUniquePostingsFoundOnPage ({2}) == '
'0 OR startIndex ({3}) < expectedPostingsPerPage ({4}) * (maxPages ({5}) -1) OR startIndex ({3}) < '
'expectedPostingsPerPage ({4}) * (minPages ({6}) -1) '.format(
num_postings_on_page, expected_postings_per_page, num_unique_postings_found_on_page,
start_index, expected_postings_per_page, max_pages, min_pages))
return False
else:
if verbose:
logger.debug('startIndex ({}) + expectedPostingsPerPage ({}) <= numPostingsSiteFound ({}) is False'.format(
start_index, expected_postings_per_page, num_postings_site_found))
return False
def get_job_postings_from_site(source, job_site_details_info, search_term, aliases,
geo_locator, home_location, geo_locations,
expected_postings_per_page=10, max_pages=100,
min_pages=4, session=None, verbose=False):
logger.debug(('get_job_postings_from_site(job_site_details_info={}, search_term={}, # aliases={},'
'geo_locator, home_location, geo_locations, '
'expected_postings_per_page={}, max_pages={}, '
'min_pages={}, session={}, verbose={}').format(job_site_details_info, search_term, len(aliases),
geo_locator, home_location, geo_locations,
expected_postings_per_page, max_pages,
min_pages, session, verbose))
if not session:
session = requests.Session()
if job_site_details_info['urlSchema'] == 'https':
login_to_web_site(session, job_site_details_info)
start_index = 0
url_arguments = {'q': search_term,
'l': job_site_details_info['location'],
job_site_details_info['jobTypeKey']: 'contract',
'sort': 'date',
job_site_details_info['pageIndexKey']: 0,
}
url = '{}://{}/{}'.format(job_site_details_info['urlSchema'],
job_site_details_info['netLoc'],
job_site_details_info['urlPath'])
page = session.get(url, params=url_arguments, verify=False)
# logger.info('\n\n page header content-type info: {}\n'.format(
# page.headers['content-type']))
logger.info('\n\nHere is initial URL to be "scraped": {}\n'.format(page.url))
postings_list, num_postings_on_page, init_total_num_postings = parse_html_page(
page.text, source, job_site_details_info, aliases, geo_locator,
home_location, geo_locations, search_term, verbose)
logger.info('Found {} new of {} postings of {} from url {}'.format(
len(postings_list), num_postings_on_page,
init_total_num_postings, page.url))
while check_for_more_postings(num_postings_on_page, expected_postings_per_page,
len(postings_list), init_total_num_postings,
start_index, max_pages, min_pages, verbose):
start_index += expected_postings_per_page
if job_site_details_info['pageIndexType'] == 'pageCount':
url_arguments[job_site_details_info['pageIndexKey']] += 1
else:
url_arguments[job_site_details_info['pageIndexKey']] = start_index
page = session.get(url, params=url_arguments, verify=False)
postings_list, num_postings_on_page, total_number_jobs_found = parse_html_page(
page.text, job_site_details_info, aliases, geo_locator,
home_location, geo_locations, search_term, verbose)
logger.info('Found {} new of {} postings of {} from url {}'.format(len(postings_list),
num_postings_on_page,
total_number_jobs_found,
page.url))
def scrape_new_job_postings(config=None, geo_locator=None, geo_locations=None, home_location=None):
if not config:
config = jobsearch.scraping.get_configuration()
if not geo_locator:
geo_locator = geopy.geocoders.Nominatim(user_agent="JobSearch")
if not home_location:
# Get coordinates for home
home_location_str = config.get('home_location')
home_location = jobsearch.scraping.get_geo_location(geo_locator, home_location_str)
if not geo_locations:
geo_locations = {} # Cache of geo locations, so do not have to get the same location multiple times
search_terms_list = ['java', 'devops', 'python', ]
client = None
if not client:
logger.warning('Dice posting retrieval not implemented yet!')
return 0
inserted_timestamp = datetime.datetime.now()
for search_term in search_terms_list:
start_index = 0
get_more_postings = True
while get_more_postings:
get_more_postings = False
params = {
'q': search_term,
'jt': 'contract',
'l': "ottawa,ontario,canada",
'userip': "1.2.3.4",
'useragent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2)",
'start': start_index,
'limit': NUMBER_POSTINGS_PER_REQUEST,
'co': 'ca',
'sort': 'date'
}
logger.debug("Getting postings for {} starting index {}".format(search_term, start_index))
search_response = client.search(**params)
logger.debug(search_response)
results_postings = search_response.get('results')
if results_postings:
aliases = models.CompanyAliases.objects.all()
for posting in results_postings:
if jobsearch.scraping.save_posting_to_db(posting, 'dice', search_term, aliases,
geo_locator, home_location, geo_locations):
# If we saved at least 1 posting, then we can try getting more postings from the source!
get_more_postings = True
start_index += len(results_postings)
if not results_postings:
logger.debug('No postings returned from indeed api call, so not trying to get any more!')
break
if start_index > MAX_POSTINGS_RETRIEVED:
logger.debug('Already retrieved max number, {}, of postings, so not trying to get any more!'.format(
MAX_POSTINGS_RETRIEVED))
break
num_new_postings = models.JobPostings.objects.filter(inserted_date__gte=inserted_timestamp).count()
num_saved_aliases = models.CompanyAliases.objects.filter(inserted_date__gte=inserted_timestamp).count()
num_saved_recruiters = models.RecruitingCompanies.objects.filter(date_inserted__gte=inserted_timestamp).count()
logger.debug('# new postings from Dice saved: {} # aliases: {} # recruiters: {} '.format(num_new_postings,
num_saved_aliases,
num_saved_recruiters))
return num_new_postings
|
[
"fergusonsa@yahoo.com"
] |
fergusonsa@yahoo.com
|
b3698f59655330a8fa5ab0c4d49985791d562870
|
f284021b02f6331888b6d41cfc34d555367b3797
|
/bin/easy_install
|
b9baf79171c742180e9e609b8b8fc87a1bd06354
|
[] |
no_license
|
Hubert51/Web_Django
|
aa8aa771de3085d7bff2fd2b64e8de131b9af537
|
f48ad6260291311262a95f71ceda354990518dfc
|
refs/heads/master
| 2020-01-23T21:38:44.507734
| 2016-11-29T02:17:52
| 2016-11-29T02:17:52
| 74,692,472
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
#!/Users/gengruijie/Django1.10/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"gengr@rpi.edu"
] |
gengr@rpi.edu
|
|
0106c4e95e4cb7a8b9b3ea1a99c3e6cf72e413fa
|
ee461003c4836dcc2e7c493e7b705841825cba52
|
/titanic/variable_builder.py
|
5e497b440f4638a3102f6f594e2c37429bcd89c0
|
[] |
no_license
|
kenta-s/kaggle
|
5e05b10b2455f8e5744dc4aab99def3b15681063
|
b68ddfede3480214a163d4d8a778e4eb74d4f6f9
|
refs/heads/master
| 2021-06-24T03:37:41.385822
| 2018-10-27T00:36:04
| 2018-10-27T00:36:04
| 96,259,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,148
|
py
|
import numpy as np
import pandas as pd
from IPython import embed
class VariableBuilder():
def __init__(self, file):
self.df = pd.read_csv(file)
def __call__(self):
valid_data = self.build_variable_x()
valid_data = np.array(valid_data).astype(np.float32).T
return valid_data
@staticmethod
def convert_sex_to_int(str):
if str == 'male':
return 0
elif str == 'female':
return 1
else:
return 2
@staticmethod
def convert_embarked_to_int(str):
if str == 'S':
return 0
elif str == 'C':
return 1
elif str == 'Q':
return 2
else:
return 3
def build_train_variable(self):
sex_list = list(map(VariableBuilder.convert_sex_to_int, self.df.Sex))
age_list = list(map(lambda x: 0.0 if np.isnan(x) else x, self.df.Age))
embarked_list = list(map(VariableBuilder.convert_embarked_to_int, self.df.Embarked))
valid_data = np.array([
self.df.Pclass,
sex_list,
age_list,
self.df.SibSp,
self.df.Parch,
self.df.Fare,
embarked_list,
self.df.Survived
]).astype(np.float32)
data = list(map(lambda x: (np.array(x[0:7]), np.array(x[7]).astype(np.int32)), valid_data.T))
return data
def build_test_variable(self, file):
sex_list = list(map(VariableBuilder.convert_sex_to_int, self.df.Sex))
age_list = list(map(lambda x: 0.0 if np.isnan(x) else x, self.df.Age))
embarked_list = list(map(VariableBuilder.convert_embarked_to_int, self.df.Embarked))
df2 = pd.read_csv(file)
survived = df2.Survived
valid_data = np.array([
self.df.Pclass,
sex_list,
age_list,
self.df.SibSp,
self.df.Parch,
self.df.Fare,
embarked_list,
survived
]).astype(np.float32)
data = list(map(lambda x: (np.array(x[0:7]), np.array(x[7]).astype(np.int32)), valid_data.T))
return data
|
[
"knt01222@gmail.com"
] |
knt01222@gmail.com
|
e0f9841169ca668d1ced55ab7a6441e600ad51af
|
064992da81d70b4df85fc192cddf93f2ded111a0
|
/analytic_scripts/code_reimplementation/Android/android_reimpl.py
|
cdba8d7572a2f6a951af93712bab41bee11b75b4
|
[] |
no_license
|
maxxbw54/reuse_reimpl
|
07fd42ee1708337d009a88e505b2a157e4aeaef9
|
c88bc6671f791485dfd47a35c1c5d16415b5beac
|
refs/heads/master
| 2021-09-14T11:28:28.835579
| 2018-05-12T13:41:27
| 2018-05-12T13:41:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,450
|
py
|
import sys, subprocess, os, re
from collections import OrderedDict
import pandas as pd
def shellCommand(command_str):
cmd = subprocess.Popen(command_str, shell=True, stdout=subprocess.PIPE)
cmd_out, cmd_err = cmd.communicate()
return cmd_out
def removeBracketsInQuotes(line):
line = re.sub(r'\\\"', '', line)
return re.sub(r'\".*?\"', '', line)
def addedMethods(diff_list):
added_method_list = list()
in_block = False
brackets = 0
method_name, param_cnt = None, None
for line in diff_list:
cleaned_line = removeBracketsInQuotes(line)
if cleaned_line.startswith('+') and re.search(method_pattern, cleaned_line):
method_sig = re.findall('(?:(?:public|private|protected|static|final|native|synchronized|abstract|transient)+\\s)+(?:[\\$_\\w\\<\\>\\[\\]]*)\\s+([\\$_\\w]+)\\(([^\\)]*)\\)?\\s*\\{?[^\\}]*\\}?', cleaned_line)
method_name = method_sig[0][0]
if len(method_sig[0][1].strip()) == 0:
param_cnt = 0
else:
param_cnt = method_sig[0][1].count(',') + 1
in_block = True
if '{' in cleaned_line:
brackets += 1
if '}' in cleaned_line:
brackets -= 1
if brackets == 0:
if DEBUG:
print 'ENTIRE METHOD FOUND: %s %s\n\n' %(method_name, param_cnt)
added_method_list.append([method_name, param_cnt])
in_block = False
brackets = 0
elif in_block and cleaned_line.startswith('+'):
if '{' in cleaned_line:
brackets += 1
if '}' in cleaned_line:
brackets -= 1
if brackets == 0:
if DEBUG:
print 'ENTIRE METHOD FOUND: %s %s\n\n' %(method_name, param_cnt)
added_method_list.append([method_name, param_cnt])
in_block = False
brackets = 0
elif in_block == True:
in_block = False
brackets = 0
return added_method_list
def removedInvocations(diff_list):
imported_classes = dict()
instance_dict = dict()
removed_invoc_dict = OrderedDict()
i = 1
for line in diff_list:
if not re.search(r'^(\+|\-)?\s*$', line):
if line.startswith('-'):
# collect removed library methods
matched = re.findall(r'import\s+[\w\.]+\.(\w+)\s*\;', line)
if matched:
full_import_class = line[1:-1]
class_name = matched[0]
imported_classes[class_name] = full_import_class
else:
# instance of the removed library method
instantiated = re.findall(r'(\w+)\s*\[?\s*\w*\s*\]?\s*\=\s*new\s+(\w+)\s*\<?\s*\w*\s*\>?\s*\(.*\)\s*\;', line)
if len(instantiated):
if instantiated[0][1] in imported_classes:
instance_dict[instantiated[0][0]] = imported_classes[instantiated[0][1]]
else:
# remove redundant white space
cleaned_line = re.sub(r'\s+', ' ', line)
# whether an instance of a removed library method's invocation is also removed (instance method)
for inst in instance_dict:
# IS IT ALSO POSSIBLY TO GET AN ATTRIBUTE?
invoc = re.findall(r'(\w+)\s*\[?\s*\w*\s*\]?\.\w+\s*\(.*\)\s*\;', cleaned_line)
if len(invoc):
if invoc[0] == inst:
removed_invoc_dict[i] = instance_dict[inst]
break
# whether a removed library method's invocation is also removed (class method)
for c in imported_classes:
if not ('implements %s' %c in cleaned_line or 'extends %s' %c in cleaned_line):
# IS IT ALSO POSSIBLY TO GET AN ATTRIBUTE?
invoc = re.findall(r'(\w+)\.\w+\s*\(.*\)\s*\;', cleaned_line)
if len(invoc):
if invoc[0] == c:
removed_invoc_dict[i] = imported_classes[c]
break
i += 1
if DEBUG:
print removed_invoc_dict
return removed_invoc_dict
def addNearDelPosition(last_removed, removed_cnt, i, line):
if last_removed:
position_delta = i - last_removed - removed_cnt
if DEBUG:
print 'Pos delta:', position_delta, line
if position_delta < 5 and position_delta > -5:
return True
return False
def addedInvocations(diff_list, added_method_list, removed_invoc_list):
refact_pairs = set()
last_removed = None
removed_cnt = 0
i = 1
for line in diff_list:
if not re.search(r'^(\+|\-)?\s*$', line):
if line.startswith('+'):
for m in added_method_list:
method_name = m[0]
param_cnt = m[1]
if (re.search(method_pattern, line)) == None and (method_name in line):
matched = re.search(r'\((.+)\)', line)
if matched:
if len(matched.group(1).strip()) == 0:
argument_cnt = 0
else:
argument_cnt = matched.group(1).count(',')+1
if param_cnt == argument_cnt:
addedNearby = addNearDelPosition(last_removed, removed_cnt, i, line)
if addedNearby:
if DEBUG:
print last_removed, i, line
refact_pairs.add((last_removed, i, last_library))
elif line.startswith('-'):
if i in removed_invoc_list:
last_removed = i
removed_cnt = 0
last_library = removed_invoc_list[i]
elif removed_cnt != None:
removed_cnt += 1
i += 1
if DEBUG:
print sorted(refact_pairs)
return sorted(refact_pairs)
# combine main funcitons to search refactoring from a client method implementation to an API call
def searchRefactoring(diff_str):
diff_list = diff_str.split('\n')
# Detect entire added methods
added_method_list = addedMethods(diff_list)
# check whether a library method is removed near a deleted method call
removed_invoc_list = removedInvocations(diff_list)
# Check whether an added method's invocation is also added
refact_res = addedInvocations(diff_list, added_method_list, removed_invoc_list)
return refact_res
def formatOutput(refact_res):
formatted_list = list()
for pair in refact_res:
formatted_list.append('%s^%s' %(pair[0],pair[1]))
return '-'.join(formatted_list)
if __name__ == '__main__':
DEBUG = False
method_pattern = '((public|private|protected|static|final|native|synchronized|abstract|transient)+\\s)+[\\$_\\w\\<\\>\\[\\]]*\\s+[\\$_\\w]+\\([^\\)]*\\)?\\s*\\{?[^\\}]*\\}?'
current_dir = os.getcwd()
shellCommand('mkdir -p %s/converse_candidates' %current_dir)
i = 1
app_names = os.listdir('fdroid_apps')
for an_app in app_names:
print 'Analyzing %s (%d) ...' %(an_app,i)
output_list = list()
# change to the subject system's directory
os.chdir('%s/fdroid_apps/%s' %(current_dir,an_app))
# output commit list
commit_logs = subprocess.check_output('git log --pretty=format:%h'.split())
for commit_id in commit_logs.split('\n'):
if len(commit_id):
diff_str = shellCommand('git show %s' %commit_id)
# our current computational resources cannot allow to analyze super huge patches
if sys.getsizeof(diff_str) > 1000000:
print ' %s is too big!' %commit_id
print ' ' + '-' * 50
else:
refact_res = searchRefactoring(diff_str)
if len(refact_res):
# output locations
print ' ', commit_id
output_list.append(commit_id)
for res in refact_res:
print ' ', res[0], res[1], '\t', res[2]
output_list.append(' (%s, %s)\t%s' %(res[0],res[1],res[2]))
print ' ' + '-' * 50
output_list.append('-' * 50)
# output the patch
shellCommand('mkdir -p %s/converse_patches/%s' %(current_dir,an_app))
with open('%s/converse_patches/%s/%s.txt' %(current_dir,an_app,commit_id), 'w') as pf:
pf.write(diff_str)
if len(output_list):
with open('%s/converse_candidates/%s_candidates.txt' %(current_dir,an_app), 'w') as wf:
wf.write('\n'.join(output_list))
i += 1
|
[
"le.an@polymtl.ca"
] |
le.an@polymtl.ca
|
883ff69f8f33ab9939a29caa2769bdfcffbdd30c
|
d6ce7d815af09eea09d8bc2c6f3aaa1b341270cc
|
/ros_ws/devel/lib/python3/dist-packages/cozmo_rc/srv/__init__.py
|
56810697b51cdf3f2704bf4724155ca111994cdd
|
[] |
no_license
|
danbrick92/cozmoRos
|
7e47569e6d9cdd56c84b6cffb5b1fe46453f4b48
|
f0345c70f58525d3cbd4227e109b468fa4a07e15
|
refs/heads/main
| 2023-09-03T21:30:52.839599
| 2021-11-18T01:33:15
| 2021-11-18T01:33:15
| 423,007,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54
|
py
|
from ._light_req import *
from ._speaker_req import *
|
[
"danbrickner@hotmail.com"
] |
danbrickner@hotmail.com
|
3312b51d5e5f1f3726320f7259525ad1936b0f31
|
b7320c9d3b36973812314cb6cde6c056f3311972
|
/general_test.py
|
530964ae76bd5bac390bc7c9f6451797558492a2
|
[] |
no_license
|
dhueholt/Misc-bits
|
27b75cf85026d0253e53f99197c11bbfc44baba6
|
dc7219ea79234e661c5d8f9b113a26edacd94ec5
|
refs/heads/master
| 2022-02-14T23:16:16.831681
| 2019-07-22T20:18:40
| 2019-07-22T20:18:40
| 198,296,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
""" Test for VIIRS EDR product
Author(s): Daniel Hueholt @dhueholt GitHub
"""
from glob import glob
import matplotlib.pyplot as plt
from satpy import Scene
import cartopy.crs as ccrs
import pdb
FILENAMES = glob('/Users/dhueholt/Documents/Data/CloudMask/20190306/JRR*.nc')
SCN = Scene(reader='viirs_edr_gran', filenames=FILENAMES)
SCN.load(['cloudmaskbinary'])
MY_AREA = SCN['cloudmaskbinary'].attrs['area'].compute_optimal_bb_area({'proj': 'lcc', 'lon_0': -96.,
'lat_0': 39., 'lat_1': 25.,
'lat_2': 25.})
NEW_SCN = SCN.resample(MY_AREA)
# pdb.set_trace()
NEW_SCN.save_dataset('cloudmaskbinary','/Users/dhueholt/Images/cmb.png')
CRS = NEW_SCN['cloudmaskbinary'].attrs['area'].to_cartopy_crs()
lambert_proj = ccrs.LambertConformal()
AX = plt.axes(projection=CRS)
AX.coastlines()
AX.gridlines()
AX.set_global()
plt.imshow(NEW_SCN['cloudmaskbinary'], transform=CRS, extent=CRS.bounds, origin='upper')
# CBAR = plt.colorbar()
# CBAR.set_label('cloudmaskbinary')
# plt.clim(-4,4)
plt.savefig('/Users/dhueholt/Images/reference_1.png')
|
[
"dmhuehol@ncsu.edu"
] |
dmhuehol@ncsu.edu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.