blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
45b6569b1aa2ab1993894fd21d96ac72bd8ae4e6
|
7060e97de0a303508004bf07daecbdf104668758
|
/blogappreview/settings.py
|
02d01dbcabfcef603854ad616386e4c677b72fd8
|
[] |
no_license
|
adamgrossman/legacy-blog-app
|
9963d0ee0e61c84d980c20e64380c2a6cbcb772d
|
3671d1d16de2899a10284e02d77d71323206e2e2
|
refs/heads/master
| 2021-01-21T05:01:27.428179
| 2014-09-23T21:49:17
| 2014-09-23T21:49:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,130
|
py
|
"""
Django settings for blogappreview project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ymu0=j(kfo_*@2@%5^6_^^+zq!c+#ib$j(o*i@azngq@9&513^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'blogappreview.urls'
WSGI_APPLICATION = 'blogappreview.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
try:
from local_settings import *
except ImportError:
pass
|
[
"adam.grossman08@me.com"
] |
adam.grossman08@me.com
|
4e8e2af6420a58bf95043e9fce852658ac6f319e
|
4c453d73abb05f151e3a7f2220920c42b9b8dfdd
|
/a_star.py
|
1cbff4e0b873412c11d42cea5e5d7f06936291eb
|
[
"MIT"
] |
permissive
|
MlHsieh/Pathfinding
|
ef632426106076a69e181ed60636129ce458456b
|
3f1818595b54f6805fbba528d7a72ed87995f08a
|
refs/heads/master
| 2021-01-13T22:56:27.039031
| 2020-02-23T14:16:26
| 2020-02-23T14:16:26
| 242,520,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,993
|
py
|
""
A* path finding with pygamezero
Find the shortest path from top-left corner to bottom-right corner.
Obstacles are generated randomly each time.
Author: ML Hsieh
2020-2-23
"""
# pylint: disable=missing-module-docstring, invalid-name, missing-function-docstring
import random
import math
import typing
import heapq
import pygame
class Node:
"""Hold information of a node. """
def __init__(
self,
walkable: bool,
grid_postion: typing.List[int],
position: typing.List[float],
size: int
):
self.walkable = walkable
self.col = grid_postion[0]
self.row = grid_postion[1]
self.position = position
self.size = size
self.gcost = 0
self.hcost = 0
self.parent = None # Previous node on the path
def draw(self, screen, color=pygame.Color(255, 255, 255)):
""" Draw itself onto screen."""
pygame.draw.rect(
screen, color,
(self.position[0], self.position[1], self.size, self.size)
)
def fcost(self):
""" total movement required from start node to target node"""
return self.gcost + self.hcost
def dist(self, other: 'Node'):
""" Calculate the distance between self and other.
Since diagonal path is not allowed, the distance is dx + dy.
"""
return abs(self.row - other.row) + abs(self.col - other.col)
def __lt__(self, other: 'Node'):
""" Return self < other
This function is called when using heap queue algorithms
"""
return (self.fcost() < other.fcost() or
(self.fcost() == other.fcost() and
self.hcost < other.hcost
)
)
class Grid:
""" Hold information of all the nodes.
Nodes are stored in a 1 dimensional list.
"""
def __init__(self, cols, rows, grid_size=55, line_width=3):
self.nodes = []
self.grid_size = grid_size
self.line_width = line_width
self.cols = cols
self.rows = rows
self.screen_w = ((self.grid_size+self.line_width) * self.cols
+ self.line_width)
self.screen_h = ((self.grid_size+self.line_width) * self.rows
+ self.line_width)
self.create_grid()
self.generate_obstacles()
def create_grid(self):
for x in range(0, self.cols):
for y in range(0, self.rows):
self.nodes.append(
Node(True, [x, y], [
self.line_width*(x+1) + self.grid_size*x,
self.line_width*(y+1) + self.grid_size*y
], self.grid_size)
)
def generate_obstacles(self):
for _ in range(math.floor(self.cols*self.rows/1.5)):
self.nodes[
random.randint(2, self.cols*self.rows-3)
].walkable = False
def find_neighbors(self, node: Node):
"""Find neighbor of node. (no diagonal) """
index = node.col*self.rows + node.row
neighbors_index = [index+self.rows, index-self.rows]
# if node.row != 0:
# neighbors_index.append(index-1)
# if node.row != self.rows-1:
# neighbors_index.append(index+1)
if node.row != 0:
neighbors_index.extend(
[index-self.rows-1, index-1, index+self.rows-1]
)
if node.row != self.rows-1:
neighbors_index.extend(
[index-self.rows+1, index+1, index+self.rows+1]
)
neighbors = []
for i in neighbors_index:
if 0 <= i < self.rows*self.cols:
neighbors.append(self.nodes[i])
return neighbors
def draw(self, screen: pygame.display):
""" Draw all the nodes. """
for n in self.nodes:
if n.walkable:
n.draw(screen)
else:
n.draw(screen, (0, 0, 0))
class AStarPathFinding:
""" A* pathfinding algorithm
Start from top left corner to bottom right corner.
Obstacles are generated randomly.
"""
def __init__(self):
self.grid = Grid(100, 50, 10, 1)
self.start = self.grid.nodes[0]
self.target = self.grid.nodes[self.grid.cols*self.grid.rows-1]
self.open = [self.start] # Nodes to be evaluated
self.close = set() # Nodes already evaluated
self.path_found = False
def update(self):
if self.open and not self.path_found:
# Start from node in open with the lowest fcost
current_node = heapq.heappop(self.open)
self.close.add(current_node)
# Path found
if current_node == self.target:
self.path_found = True
return
# Update neighbors
neighbors = self.grid.find_neighbors(current_node)
for n in neighbors:
if n not in self.close and n.walkable:
new_gcost = current_node.gcost + n.dist(current_node)
if n not in self.open or new_gcost < n.gcost:
n.gcost = new_gcost
n.hcost = n.dist(self.target)
n.parent = current_node
if n not in self.open:
heapq.heappush(self.open, n)
def draw(self, screen: pygame.display):
if not self.path_found:
for n in self.open:
n.draw(screen, pygame.Color(0, 200, 0))
for n in self.close:
n.draw(screen, pygame.Color(200, 0, 0))
self.start.draw(screen, pygame.Color(0, 150, 200))
self.target.draw(screen, pygame.Color(0, 150, 200))
else:
# Draw the path
n = self.target
while n.parent is not None:
n.draw(screen, pygame.Color(0, 150, 200))
n = n.parent
def main():
path = AStarPathFinding()
# Initialize
pygame.init()
screen = pygame.display.set_mode((path.grid.screen_w, path.grid.screen_h))
screen.fill(pygame.Color(80, 80, 80))
path.grid.draw(screen)
# Start the event loop
run = True
while run:
key = pygame.key.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
elif (event.type == pygame.KEYDOWN and
event.key == pygame.K_F4 and
(key[pygame.K_LALT] or key[pygame.K_RALT])
):
run = False
path.draw(screen)
path.update()
pygame.display.update()
pygame.quit()
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
MlHsieh.noreply@github.com
|
77702e3b0454d2ed113e6af1b53fc1a48a813af7
|
1bc287996993413650476d2ec7b096dc959b5903
|
/venv/Lib/site-packages/apiclient/model.py
|
4ad88a88803cb5381871d8b373b14e153aa984a5
|
[] |
no_license
|
jorgejmt94/TFG
|
6ea6bce7cf6ae53200f3d3e42e0f1f04c2557fae
|
2e7d8eaf512d47049181909b20743683712902c2
|
refs/heads/master
| 2022-12-28T18:53:03.810829
| 2020-10-08T16:32:26
| 2020-10-08T16:32:26
| 143,515,791
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,809
|
py
|
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model objects for requests and responses.
Each API may support one or more serializations, such
as JSON, Atom, etc. The model classes are responsible
for converting between the wire format and the Python
object representation.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import logging
import urllib.request, urllib.parse, urllib.error
from apiclient import __version__
from .errors import HttpError
from oauth2client.anyjson import simplejson
import collections
dump_request_response = False
def _abstract():
raise NotImplementedError('You need to override this function')
class Model(object):
"""Model base class.
All Model classes should implement this interface.
The Model serializes and de-serializes between a wire
format such as JSON and a Python object representation.
"""
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized in the desired wire format.
"""
_abstract()
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
_abstract()
class BaseModel(Model):
"""Base model class.
Subclasses should provide implementations for the "serialize" and
"deserialize" methods, as well as values for the following class attributes.
Attributes:
accept: The value to use for the HTTP Accept header.
content_type: The value to use for the HTTP Content-type header.
no_content_response: The value to return when deserializing a 204 "No
Content" response.
alt_param: The value to supply as the "alt" query parameter for requests.
"""
accept = None
content_type = None
no_content_response = None
alt_param = None
def _log_request(self, headers, path_params, query, body):
"""Logs debugging information about the request if requested."""
if dump_request_response:
logging.info('--request-start--')
logging.info('-headers-start-')
for h, v in headers.items():
logging.info('%s: %s', h, v)
logging.info('-headers-end-')
logging.info('-path-parameters-start-')
for h, v in path_params.items():
logging.info('%s: %s', h, v)
logging.info('-path-parameters-end-')
logging.info('body: %s', body)
logging.info('query: %s', query)
logging.info('--request-end--')
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
query = self._build_query(query_params)
headers['accept'] = self.accept
headers['accept-encoding'] = 'gzip, deflate'
if 'user-agent' in headers:
headers['user-agent'] += ' '
else:
headers['user-agent'] = ''
headers['user-agent'] += 'google-api-python-client/%s (gzip)' % __version__
if body_value is not None:
headers['content-type'] = self.content_type
body_value = self.serialize(body_value)
self._log_request(headers, path_params, query, body_value)
return (headers, path_params, query, body_value)
def _build_query(self, params):
"""Builds a query string.
Args:
params: dict, the query parameters
Returns:
The query parameters properly encoded into an HTTP URI query string.
"""
if self.alt_param is not None:
params.update({'alt': self.alt_param})
astuples = []
for key, value in params.items():
if type(value) == type([]):
for x in value:
x = x.encode('utf-8')
astuples.append((key, x))
else:
if getattr(value, 'encode', False) and isinstance(value.encode, collections.Callable):
value = value.encode('utf-8')
astuples.append((key, value))
return '?' + urllib.parse.urlencode(astuples)
def _log_response(self, resp, content):
"""Logs debugging information about the response if requested."""
if dump_request_response:
logging.info('--response-start--')
for h, v in resp.items():
logging.info('%s: %s', h, v)
if content:
logging.info(content)
logging.info('--response-end--')
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
self._log_response(resp, content)
# Error handling is TBD, for example, do we retry
# for some operation/error combinations?
if resp.status < 300:
if resp.status == 204:
# A 204: No Content response should be treated differently
# to all the other success states
return self.no_content_response
return self.deserialize(content)
else:
logging.debug('Content from bad request was: %s' % content)
raise HttpError(resp, content)
def serialize(self, body_value):
"""Perform the actual Python object serialization.
Args:
body_value: object, the request body as a Python object.
Returns:
string, the body in serialized form.
"""
_abstract()
def deserialize(self, content):
"""Perform the actual deserialization from response string to Python
object.
Args:
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
"""
_abstract()
class JsonModel(BaseModel):
"""Model class for JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request and response bodies.
"""
accept = 'application/json'
content_type = 'application/json'
alt_param = 'json'
def __init__(self, data_wrapper=False):
"""Construct a JsonModel.
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper
def serialize(self, body_value):
if (isinstance(body_value, dict) and 'data' not in body_value and
self._data_wrapper):
body_value = {'data': body_value}
return simplejson.dumps(body_value)
def deserialize(self, content):
if type(content) is bytes:
content = content.decode('utf-8')
body = simplejson.loads(content)
if self._data_wrapper and isinstance(body, dict) and 'data' in body:
body = body['data']
return body
@property
def no_content_response(self):
return {}
class RawModel(JsonModel):
"""Model class for requests that don't return JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = '*/*'
content_type = 'application/json'
alt_param = None
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ''
class MediaModel(JsonModel):
"""Model class for requests that return Media.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = '*/*'
content_type = 'application/json'
alt_param = 'media'
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ''
class ProtocolBufferModel(BaseModel):
"""Model class for protocol buffers.
Serializes and de-serializes the binary protocol buffer sent in the HTTP
request and response bodies.
"""
accept = 'application/x-protobuf'
content_type = 'application/x-protobuf'
alt_param = 'proto'
def __init__(self, protocol_buffer):
"""Constructs a ProtocolBufferModel.
The serialzed protocol buffer returned in an HTTP response will be
de-serialized using the given protocol buffer class.
Args:
protocol_buffer: The protocol buffer class used to de-serialize a
response from the API.
"""
self._protocol_buffer = protocol_buffer
def serialize(self, body_value):
return body_value.SerializeToString()
def deserialize(self, content):
return self._protocol_buffer.FromString(content)
@property
def no_content_response(self):
return self._protocol_buffer()
def makepatch(original, modified):
"""Create a patch object.
Some methods support PATCH, an efficient way to send updates to a resource.
This method allows the easy construction of patch bodies by looking at the
differences between a resource before and after it was modified.
Args:
original: object, the original deserialized resource
modified: object, the modified deserialized resource
Returns:
An object that contains only the changes from original to modified, in a
form suitable to pass to a PATCH method.
Example usage:
item = service.activities().get(postid=postid, userid=userid).execute()
original = copy.deepcopy(item)
item['object']['content'] = 'This is updated.'
service.activities.patch(postid=postid, userid=userid,
body=makepatch(original, item)).execute()
"""
patch = {}
for key, original_value in original.items():
modified_value = modified.get(key, None)
if modified_value is None:
# Use None to signal that the element is deleted
patch[key] = None
elif original_value != modified_value:
if type(original_value) == type({}):
# Recursively descend objects
patch[key] = makepatch(original_value, modified_value)
else:
# In the case of simple types or arrays we just replace
patch[key] = modified_value
else:
# Don't add anything to patch if there's no change
pass
for key in modified:
if key not in original:
patch[key] = modified[key]
return patch
|
[
"jorge@MacBook-Pro-de-Jorge.local"
] |
jorge@MacBook-Pro-de-Jorge.local
|
4999ade213d9555e0697b7004de81a99a9d815b3
|
ec7c1e8b6fd1c397d072a05702221ba198a12f3b
|
/mysite/laser/migrations/0005_auto_20170418_1524.py
|
053ca170596cc2635b405118c995c4f72d40f5e7
|
[] |
no_license
|
profanat/laser
|
3c4d197d012c178be1ed1b2ad780b65ad68288c2
|
5b207f60caf7dd4610c2fb3dd25d399463d3a808
|
refs/heads/master
| 2021-01-19T23:44:14.995911
| 2017-04-30T10:46:10
| 2017-04-30T10:46:10
| 89,022,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-18 13:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('laser', '0004_item_image'),
]
operations = [
migrations.CreateModel(
name='SubCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Название')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='laser.Category')),
],
options={
'verbose_name': 'Подкатегория',
'verbose_name_plural': 'Подкатегории',
'ordering': ['name'],
},
),
migrations.RemoveField(
model_name='item',
name='category',
),
migrations.AddField(
model_name='item',
name='subcategory',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='laser.SubCategory'),
preserve_default=False,
),
]
|
[
"profanat@mail.ru"
] |
profanat@mail.ru
|
07e17c5553d0087fa7eb1cd218f0ce579fa6d45b
|
89b297e3ffb43ecbb508fd01cf28b89817cd869b
|
/c103.py
|
7f447ef1daf34e463dbd3c56b79fe26cdc43df21
|
[] |
no_license
|
DhyeyPatel779/C103
|
0eeff9a104243272c75eba06d959eea8e608c22a
|
d4afa720ada23121a9fcfb1ca0c023ac9cb72590
|
refs/heads/main
| 2023-08-24T23:36:39.754111
| 2021-10-14T07:43:26
| 2021-10-14T07:43:26
| 417,035,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
import pandas as pd
import plotly.express as px
df = pd.read_csv('line_chart.csv')
fig = px.line(df,x="Year", y="Per capita income", color="Country", title="Per Capita Income")
fig.show()
|
[
"noreply@github.com"
] |
DhyeyPatel779.noreply@github.com
|
5051561594eab17e7933f5df6df726631b2282fd
|
9f9031218c411443a4bb9ada6e5af333cf4fa083
|
/azuresearch/indexes/scoring_profile.py
|
8ec8b0698fe9d0d111206c542f8cfeeb63c9c5b1
|
[] |
no_license
|
aloosley/python-azure-search
|
ba51c19b2387ea35706ea9a6e7eca8227cf3402f
|
ce605528a2bbcdc7a3f08fca1e5c39129ab726d9
|
refs/heads/master
| 2020-05-01T19:21:13.115786
| 2019-01-13T13:06:33
| 2019-01-13T13:06:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,780
|
py
|
import json
import warnings
from azuresearch.azure_search_object import AzureSearchObject
class ScoringProfile(AzureSearchObject):
'''
A scoring profile for an index. See this link for more information:
taken from https://docs.microsoft.com/en-us/rest/api/searchservice/add-scoring-profiles-to-a-search-index
'''
def __init__(self, name, text=None, functions=None,**kwargs):
super().__init__(**kwargs)
if functions is None:
functions = []
self.name = name
self.text = text
self.functions = functions
def __repr__(self):
return "<{classname}: {name}>".format(
classname=self.__name__, name=self.name
)
def to_dict(self):
return_dict = {
"name": self.name,
"text": self.text.to_dict(),
"functions": [func.to_dict() for func in self.functions]
}
# add additional user generated params
return_dict.update(self.params)
# make all params camelCase (to be sent correctly to Azure Search
return_dict = self.to_camel_case_dict(return_dict)
# Remove None values
return_dict = self.remove_empty_values(return_dict)
return return_dict
@classmethod
def load(cls, data):
if type(data) is str:
data = json.loads(data)
if type(data) is not dict:
raise Exception("Failed to parse input as Dict")
if 'text' in data:
data['text'] = ScoringProfileText.load(data['text'])
if 'functions' in data:
data['functions'] = [ScoringProfileFunction.load(spf) for spf in data['functions']]
data = cls.to_snake_case_dict(data)
return cls(**data)
class ScoringProfileText(AzureSearchObject):
'''
A text value for a scoring profile. Holds the weights of different fields.
See this link for more information:
https://docs.microsoft.com/en-us/rest/api/searchservice/add-scoring-profiles-to-a-search-index
@:param weights: a list of field name : weight value pairs
'''
def __init__(self, weights, **kwargs):
super().__init__(**kwargs)
self.weights = weights
def to_dict(self):
return_dict = {
"weights": self.weights
}
# add additional user generated params
return_dict.update(self.params)
# make all params camelCase (to be sent correctly to Azure Search
return_dict = self.to_camel_case_dict(return_dict)
# Remove None values
return_dict = self.remove_empty_values(return_dict)
return return_dict
@classmethod
def load(cls, data):
if type(data) is str:
data = json.loads(data)
if type(data) is not dict:
raise Exception("Failed to parse input as Dict")
data = cls.to_snake_case_dict(data)
return cls(**data)
class ScoringProfileFunction(AzureSearchObject):
'''
A function to perform for scoring.
See this link for more information:
https://docs.microsoft.com/en-us/rest/api/searchservice/add-scoring-profiles-to-a-search-index#bkmk_indexref '''
def __init__(self,
type,
field_name=None,
boost=None,
interpolation=None,
magnitude=None,
freshness=None,
distance=None,
tag=None,
**kwargs):
super().__init__(**kwargs)
self.type = type
self.field_name = field_name
self.boost = boost
self.interpolation = interpolation
self.magnitude = magnitude
self.freshness = freshness
self.distance = distance
self.tag = tag
self._validate_interpolation()
def to_dict(self):
return_dict = {
"type": self.type,
"boost": self.boost,
"fieldName": self.field_name,
"interpolation": self.interpolation,
"magnitude": self.magnitude,
"freshness": self.freshness,
"distance": self.distance,
"tag": self.tag,
}
# add additional user generated params
return_dict.update(self.params)
# make all params camelCase (to be sent correctly to Azure Search
return_dict = self.to_camel_case_dict(return_dict)
# Remove None values
return_dict = self.remove_empty_values(return_dict)
return return_dict
def _validate_interpolation(self):
if self.interpolation and self.interpolation not in interpolations:
warnings.warn("{interpolation} not in list of supported interpolations: {interpolations}".format(
interpolation=self.interpolation, interpolations=interpolations))
function_types = {
"magnitude",
"freshness",
"distance",
"tag"
}
interpolations = {
"constant",
"linear",
"quadratic",
"logarithmic"
}
# ``` https://docs.microsoft.com/en-us/rest/api/searchservice/add-scoring-profiles-to-a-search-index#bkmk_template
# "magnitude": {
# "boostingRangeStart": # ,
# "boostingRangeEnd": # ,
# "constantBoostBeyondRange": true | false(default)
# }
#
# // (- or -)
#
# "freshness": {
# "boostingDuration": "..."(value representing timespan over which boosting occurs)
# }
#
# // (- or -)
#
# "distance": {
# "referencePointParameter": "...", (parameter to be passed in queries to use as reference location)
# "boostingDistance": # (the distance in kilometers from the reference location where the boosting range ends)
# }
#
# // (- or -)
#
# "tag": {
# "tagsParameter": "..."(parameter to be passed in queries to specify a list of tags to compare against target field)
# }
|
[
"omri.mendels@microsoft.com"
] |
omri.mendels@microsoft.com
|
93103d6e41113db9ca4a00e8b344e771a27edaac
|
6fb95c1523ecd84a47cf351fd8c84d20c697d5c7
|
/hash.py
|
ab811578718de98ac6d5c60bfb5d8d2f77bf9771
|
[] |
no_license
|
Shiva454527/hash
|
5da4c3dbb9587561fe4e1cccc27d1fd1cec627d2
|
936e65946b6088eb13beca58041e41be3a711ab7
|
refs/heads/main
| 2023-03-15T00:21:17.514193
| 2021-03-08T07:48:25
| 2021-03-08T07:48:25
| 345,556,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
import hashlib
print("")
print(" ---------------Hash convert-------------v1.0")
print("")
print("")
print("<============================================>")
print(" Created by shiva")
print("<============================================>")
print("")
print("")
print("Available Options:\n1.MD5\n2.SHA512\n3.SHA256\n4.Select All\n5.Exit")
while True:
choice = int(input('Select An Option: '))
if choice == 5:
exit()
else:
string = input('Enter String To Hash: ')
def md5(string):
hash_object = hashlib.md5(string.encode())
print("MD5: " + hash_object.hexdigest() + "\n")
def sha512(string):
hash_object = hashlib.sha512(string.encode())
print("SHA512: " + hash_object.hexdigest() + "\n")
def sha256(string):
hash_object = hashlib.sha256(string.encode())
print("SHA256: " + hash_object.hexdigest() + "\n")
if choice == 1:
md5(string)
elif choice == 2:
sha512(string)
elif choice == 3:
sha256(string)
elif choice == 4:
md5(string)
sha512(string)
sha256(string)
else:
print("Invalid Option!")
exit()
print("")
print(" Thank you for downloading.")
|
[
"noreply@github.com"
] |
Shiva454527.noreply@github.com
|
72a91011ab0adb33cd3a50f2659fe2abe2c936f8
|
16d26279f2b1e995352529bf8bc54d08d4b3c7a4
|
/Shut down.py
|
08fd9657b1242cb47d23226e01f2d69c5bbf305c
|
[] |
no_license
|
surajmishra57/Shut-down
|
7e0583c8c5d993c6e475d00c6ecc374930922a04
|
a9c6214369628ee76c8293589f00b326b870d4af
|
refs/heads/master
| 2020-06-08T08:15:45.830846
| 2019-06-22T05:21:29
| 2019-06-22T05:21:29
| 193,194,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,046
|
py
|
from tkinter import *
from tkinter.ttk import Combobox
import os
from tkinter import messagebox
from tkinter.font import Font
def Cancel():
root.destroy()
def Clear():
combo.set("Select")
hour.set('H')
Hour.set('H')
mint.set('M')
Mint.set('M')
sec.set('S')
Sec.set('S')
def final():
chack=["H",'0','1','2','3','4','5','6','7','8','9']
chackm=["M",'0','1','2','3','4','5','6','7','8','9']
chacks=["S",'0','1','2','3','4','5','6','7','8','9']
Action=combo.get()
h=hour.get()
H=Hour.get()
m=mint.get()
M=Mint.get()
s=sec.get()
S=Sec.get()
print(type(H))
if Action not in ("Shut down","Restart","Logout"):
messagebox.showerror("Error","Select Options first")
return 0
if h not in chack:
messagebox.showerror("Error","Invalid hour")
return 0
if H not in chack:
messagebox.showerror("Error","Invalid hour")
return 0
if m not in chackm:
messagebox.showerror("Error","Invalid minute")
return 0
if M not in chackm:
messagebox.showerror("Error","Invalid minute")
return 0
if s not in chacks:
messagebox.showerror("Error","Invalid second")
return 0
if S not in chacks:
messagebox.showerror("Error","Invalid second")
return 0
if h=='H':
h='0'
if H=='H':
H='0'
if m=='M':
m='0'
if M=='M':
M='0'
if s=='S':
s='0'
if S=='S':
S='0'
Ghanta=(h+H)
Minate=(m+M)
Seconde=(s+S)
HOUR=int(Ghanta)
MIN=int(Minate)
SEC=int(Seconde)
if HOUR ==0 and MIN==0 and SEC==0:
if Action=='Shut down':
result=messagebox.askquestion("SHOW INFO","System will shut down Immediate")
if result=='yes':
os.system("shutdown -s -t 00")
root.destroy()
if Action=='Restart':
result=messagebox.askquestion("SHOW INFO","System will Restart Immediate")
if result=='yes':
os.system("shutdown -r -t 00")
root.destroy()
if Action=='Logout':
result=messagebox.askquestion("SHOW INFO","System will LogOut Immediate")
if result=='yes':
os.system("shutdown -l -t 00")
root.destroy()
TIME=HOUR*60
TIME=TIME+MIN
Timesec=TIME*60
Timesec=Timesec+SEC
if Action=="Shut down":
command="shutdown -s -t "
Shut=command+str(Timesec)
result=messagebox.askquestion("TIME SET","System will down in %d Hour %d Min %d Sec"%(HOUR,MIN,SEC))
if result=='yes':
os.system(Shut)
root.destroy()
if Action=="Restart":
commande="shutdown -r -t "
SRestart=commande+str(Timesec)
result=messagebox.askquestion("TIME SET","System will Restart in %d Hour %d Min %d Sec"%(HOUR,MIN,SEC))
if result=='yes':
os.system(SRestart)
root.destroy()
if Action=="Logout":
comm="shutdown -l -t "
Lock=comm+str(Timesec)
result=messagebox.askquestion("TIME SET","System will LogOut in %d Hour %d Min %d Sec"%(HOUR,MIN,SEC))
if result=='yes':
os.system(Lock)
root.destroy()
root=Tk()
root.title("Computer Timer")
root.geometry("600x600+350+50")
fontsize=3
frm=Frame(root,width=600,height=800,bg="lightblue")
canvas=Canvas(frm,width=300,height=190,bg="black")
canv=Canvas(frm,width=300,height=15,bg="white")
can=Canvas(frm,width=300,height=15,bg="black")
computer=Label(canv,text="COMPUTER",font=fontsize,bg="white")
computer.place(x=110,y=0)
canvas.place(x=150,y=50)
canv.place(x=150,y=240)
can.place(x=150,y=270)
h=Label(frm,text="HOUR",font=fontsize,bg="lightblue")
h.place(x=210,y=390)
m=Label(frm,text="MINUTE",font=fontsize,bg="lightblue")
m.place(x=280,y=390)
s=Label(frm,text="SECOND",font=fontsize,bg="lightblue")
s.place(x=360,y=390)
V=["Shut down","Restart","Logout"]
combo=Combobox(frm,values=V,width=20)
combo.set("Select")
combo.place(x=230,y=350)
H=list(range(0,10))
hour=Combobox(frm,values=H,width=2)
hour.place(x=200,y=410)
hour.set("H")
Hour=Combobox(frm,values=H,width=2)
Hour.place(x=235,y=410)
Hour.set("H")
mint=Combobox(frm,values=H,width=2)
mint.set("M")
Mint=Combobox(frm,values=H,width=2)
Mint.set("M")
mint.place(x=280,y=410)
Mint.place(x=315,y=410)
sec=Combobox(frm,values=H,width=2)
sec.set("S")
Sec=Combobox(frm,values=H,width=2)
Sec.set("S")
sec.place(x=360,y=410)
Sec.place(x=395,y=410)
button=Button(frm,text=" Done ",bg="gray",command=final)
button.place(x=290,y=510)
clearbutton=Button(frm,text=" Clear ",bg="gray",command=Clear)
clearbutton.place(x=200,y=510)
Cancelbutton=Button(frm,text=" Cancel ",bg="gray",command=Cancel)
Cancelbutton.place(x=380,y=510)
frm.pack()
root.mainloop()
|
[
"noreply@github.com"
] |
surajmishra57.noreply@github.com
|
916739e2b47fd6b0bdc3b3d4a294774d7c172611
|
9f7631c2d987a33a9b7f2c2da38bf9369eac92bd
|
/test_segmentor.py
|
2c96cbdc374287eac5fa659ce15d39692caa3742
|
[
"MIT"
] |
permissive
|
Leo-xxx/kissing-detector
|
2a605837991bfc8c1dafcf38fb2cd758271592c2
|
df44f229c4f5160a792c4a6dc6d98d6817910df0
|
refs/heads/master
| 2020-06-02T18:56:18.249555
| 2019-06-12T12:57:00
| 2019-06-12T12:57:00
| 191,274,160
| 0
| 0
| null | 2019-06-11T01:52:17
| 2019-06-11T01:52:16
| null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
import unittest
from segmentor import Segmentor
class TestSegmentor(unittest.TestCase):
def test_segmentor(self):
tests = [
([1, 1, 0, 1, 0, 0, 1, 0, 1], [[0, 1, 2, 3]]),
([1, 1, 0, 1, 1, 0, 1, 0, 1], [[0, 1, 2, 3, 4, 5, 6]]),
([1, 1, 0, 1, 1, 0, 1, 1, 1], [list(range(0, 8 + 1))]),
([1, 1, 1, 0, 1], [[0, 1, 2, 3, 4]]),
([0, 0, 0, 0, 0], []),
([1] * 7 + [0] * 3, [list(range(7))])
]
min_frames = 4
threshold = 0.7
for ex, exp in tests:
self.assertEqual(exp, Segmentor._segmentor(ex, min_frames, threshold))
|
[
"arziai@gmail.com"
] |
arziai@gmail.com
|
6ddebb5147754b838b6cdfe50c7b3136c205898e
|
bd47e29dd98a30da16fab7541e2fa486779571c9
|
/AIAssignment/PA1_MT18099_Kshitij_Srivastava/Question 2/printGrid.py
|
fa3f0d133e2fb2ad5147cb659cbb4878fd6c1c29
|
[] |
no_license
|
kshitijsriv/Artificial-Intelligence-Fall-2018
|
35a96381c7a109164de5b861a5ef81400abb34b3
|
f14656a40e830eaf59122082162fe887eba3e8a9
|
refs/heads/master
| 2023-05-03T07:07:20.164863
| 2021-05-18T20:22:00
| 2021-05-18T20:22:00
| 368,656,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
def prnt(grid, n):
for i in list(range(n)):
for j in list(range(n)):
print(grid[(i * n) + j], end=' ')
print()
|
[
"kshitij18099@iiitd.ac.in"
] |
kshitij18099@iiitd.ac.in
|
7004e91d73dfb7ae5323ceb1a1e9816b5ac70c53
|
1ea1ffa610838c6161f5dcaab6fbfe6341aaa902
|
/BB_MesoWest/MesoWest_STNinfo.py
|
16182f7f04f86a093c3954ad74e64f3709fbe9c6
|
[] |
no_license
|
johnhorel/pyBKB_v2
|
7611e1562e130abad911f02dc9d6b34966a6eaa3
|
6e04bea50666dfc34ec4b6035729bd2d4c807b12
|
refs/heads/master
| 2020-12-30T15:09:34.573208
| 2017-05-12T18:02:01
| 2017-05-12T18:02:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,322
|
py
|
# Brian Blaylock
# Version 2.0 update
# 8 November 2016 (Trump vs. Clinton Presidential Election)
"""
Get basic metadata for a list of station IDs
"""
import json
import urllib2
from get_token import my_token # returns my personal token
import numpy as np
# Get your own key and token from here: https://mesowest.org/api/signup/
token = my_token()
def get_station_info(stationIDs):
"""
Get the metadata info for a list of stations and return it as a dictionary.
Input:
stationIDs : list of station IDs as a string ['UKBKB', 'KSLC', 'WBB']
Output:
A dictionary of the data you are looking for:
- Latitude
- Longitude
- Station Name
- Elevation
- URL
"""
# Check that the input is a list of stations.
if type(stationIDs) != list:
return "List of stations is required! s = ['KSLC', 'WBB', 'MTMET']"
else:
# Convert stationID list to a string deliminated by a comma
stations = ','.join(stationIDs)
# The API request URL
URL = 'http://api.mesowest.net/v2/stations/metadata?&token=' + token \
+ '&stid=' + stations
try:
# Open URL, read the content, and convert JSON to python readable form.
f = urllib2.urlopen(URL)
data = f.read()
data = json.loads(data)
stnid = np.array([])
name = np.array([])
lat = np.array([])
lon = np.array([])
time_zone = np.array([])
for i in data['STATION']:
name = np.append(name, str(i['NAME']))
stnid = np.append(stnid, str(i['STID']))
lat = np.append(lat, float(i['LATITUDE']))
lon = np.append(lon, float(i['LONGITUDE']))
time_zone = np.append(time_zone, str(i['TIMEZONE']))
data_dict = {'URL': URL,
'NAME': name,
'STNID': stnid,
'LAT': lat,
'LON': lon,
'TIME_ZONE': time_zone
}
return data_dict
except:
print 'errors loading:', URL
#--- Example -----------------------------------------------------------------#
if __name__ == "__main__":
STATION = ['kslc', 'ukbkb', 'klax', 'kims', 'kjfk']
A = get_station_info(STATION)
|
[
"blaylockbk@gmail.com"
] |
blaylockbk@gmail.com
|
b9aaef4ba11145f2ffbea561b6705b86c14b215a
|
13cd6c2d28c3df0daa0a90f856275197410973f4
|
/balance.py
|
33e763e2abe4aa551ef4e8f2be4400e56211b26c
|
[] |
no_license
|
ccnsd/balance
|
095501048cec3b50cdc9901ec92052b87ef024e3
|
52ae8cd33698c3db1abcf802692be6254b05987c
|
refs/heads/master
| 2020-04-05T10:37:32.988955
| 2018-12-09T08:12:01
| 2018-12-09T08:12:01
| 156,804,928
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,137
|
py
|
import numpy as np
import networkx as nx
import random
class TriangularBalance:
def __init__(self, size, initialRatio, AgeExponent, noiseType):
# Size of the networ
self.Size = size
# Caclulate the number of triangles connected to one link
self.TrianglesOnLink = self.LinkTriangles()
# Calculate the number of all triangles in the fully connected network
self.Triangles = self.TriangleCount()
# The portion of friendly link
self.InitRatio = initialRatio
# The age factor
self.AgeExponent = AgeExponent
# The type of noise in memory
self.NoiseType = noiseType
# This will initialize the network based on given parameters
self.NetworkInitiator()
# region Initial Functions
# Count all the triangles in network
def TriangleCount(self):
numinator = (self.Size) * (self.Size - 1) * (self.Size - 2)
denominator = 6
return(numinator/denominator)
# Count the triangles connected to a link
def LinkTriangles(self):
trianglesOnLink = self.Size - 2
return(trianglesOnLink)
# This function initializes the network at the begining of the run
def NetworkInitiator(self):
# Generate a matrix with random elements between 0 and 1
tempMatrix = np.random.rand(self.Size, self.Size)
# Change elements smaller than friendship ratio to -1 (Will be changed to 1 soon)
tempMatrix[tempMatrix < self.InitRatio] = -1
# Change elements greater than friendship ratio to 1 (Will be changed to -1 soon)
tempMatrix[tempMatrix >= self.InitRatio] = 1
# The Created marix is not symmetric (and does not reperesnt a Graph) so we symmetrize it by this trick
# Another point is that we change the sign of links here as we promised earlier
adjMatrix = np.tril(-tempMatrix, -1) + np.tril(-tempMatrix, -1).T
# Put this matrix to the class InitialNetwork and Network
self.InitialNetwork = adjMatrix
# To save the initial state we store the network in another variable
self.Network = self.InitialNetwork
# Birth matrix
self.BirthTime = np.zeros((self.Size, self.Size))
# Time of system
self.SystemTime = 1
# Calculate the Energy of our network
self.Energy = self.NetworkEnergy()
# This function calculates the total energy of the netwrok
def NetworkEnergy(self):
# Calculate the sum of product of all two pairs on each link
netLen2Path = np.matmul(self.Network, self.Network)
# Calculate the energy of all triangles on each link
# (not exactly energy it needs a multiply by -1 to be energy)
energyMat = np.multiply(self.Network, netLen2Path)
# Every link is counted 2 times
unnormalTotalEnergy = np.sum(energyMat) / 2
# Every triangle is counted 3 times
unnormalTotalEnergy = unnormalTotalEnergy / 3
# We want energy to be between -1 to +1
totalEnergy = float(-unnormalTotalEnergy) / self.Triangles
return(totalEnergy)
# endregion
# region Dynamics
# Calculation of the attribution of one link in the total energy of the network
def LinkEnergy(self, adjTuple):
# Get the link's sign
linkSign = self.Network[adjTuple]
# Adjacent links
linkRow = self.Network[adjTuple[0]]
linkCol = self.Network[adjTuple[1]]
# The energy of triangles on the link
linkEng = float(-1.0 * np.inner(linkRow, linkCol) * linkSign) / self.Triangles
return(linkEng)
def LinkEnergyCheck(self, linkEnergy, linkSign):
# This part is for manipulating E=0 case
randomSign = random.sample((1, -1), 1)
addedEnergy = randomSign[0] / (2 * self.Triangles)
tempEnergy = linkEnergy + addedEnergy
# **************************************
# link's new sign and energy change
tempSign = - np.sign(tempEnergy) * linkSign
delta = abs(tempSign - linkSign) * linkEnergy
# *********************************
return([delta, tempSign])
def LinkBaseDynamics(self):
# choose a random link
link = tuple(random.sample(range(0, self.Size-1), 2))
# get the sign
linkSign = self.Network[link]
# link energy
linkEnergy = self.LinkEnergy(link)
# check if it will change
engStat = self.LinkEnergyCheck(linkEnergy, linkSign)
# how much the sign should change
signChange = linkSign - engStat[1]
# change system's energy and link's sign
self.Energy -= engStat[0]
self.Network[link] -= signChange
self.Network[link[1]][link[0]] -= signChange
def BaseDynamics(self):
# dynamics time length
itterateLength = self.Size ** self.ItterateExp
TimeLine = np.zeros((itterateLength,2))
for Time in range(itterateLength):
TimeLine[Time,0] = self.Network.mean()
TimeLine[Time,1] = self.Energy
self.LinkBaseDynamics()
return(TimeLine)
def LinkAgeCheck(self, Age):
# chack if link's age accepts the change
agePass = int(random.random() < float(Age) ** (self.AgeExponent - 1))
return agePass
def LinkAgedDynamics(self, Time):
# choose a random link
link = tuple(random.sample(range(0, self.Size-1), 2))
# get the sign
linkSign = self.Network[link]
# link energy
linkEnergy = self.LinkEnergy(link)
# get the age of link
linkAge = Time - self.BirthTime[link]
# check if it will change due to energy
engStat = self.LinkEnergyCheck(linkEnergy, linkSign)
# check if the age permits the change
ageStat = self.LinkAgeCheck(linkAge)
# how much the enegy changes
enrgChanged = engStat[0] * ageStat
# how the sign changes
signChange = linkSign - engStat[1]
# is the change happens?
acceptStat = signChange * ageStat
# apply the changes
self.Energy -= enrgChanged
self.Network[link] -= acceptStat
self.Network[link[1]][link[0]] -= acceptStat
self.BirthTime[link] += acceptStat * linkAge / 2
self.BirthTime[link[1]][link[0]]+= acceptStat * linkAge / 2
def AgedDynamics(self):
# print("Aged Dynamics")
# dynamics time length
itterateLength = self.Size ** self.ItterateExp
TimeLine = np.zeros((itterateLength,2))
for Time in range(itterateLength):
TimeLine[Time,0] = self.Network.mean()
TimeLine[Time,1] = self.Energy
self.LinkAgedDynamics(Time + 1)
return(TimeLine)
def TriadDynamics(self, itterateExp):
self.ItterateExp = itterateExp
# check if system has age or not
aged = 0 < self.AgeExponent < 1
self.TimeLine = self.AgedDynamics() if aged else self.BaseDynamics()
# endregion
|
[
"mhoseinhs@gmail.com"
] |
mhoseinhs@gmail.com
|
ba5cdbf4af013697934cd32d9a81f63202ca1ec1
|
9bade1e19d1fe61324e1b0d0d0b2fb650b618b70
|
/ch2/tasks_proj/tests/unit/test_task.py
|
b003f7eeda9e2f47e9d8065d06ab459a9aac6a8a
|
[] |
no_license
|
Vermee81/practicePytest
|
6d998064c7892495cb11630cd70ecdeafc7ced04
|
df85e800f3d57f98017c6ab2bf7c176d605469e7
|
refs/heads/master
| 2021-06-16T05:26:10.885007
| 2019-06-11T12:59:02
| 2019-06-11T12:59:02
| 191,372,066
| 0
| 0
| null | 2021-04-20T18:19:24
| 2019-06-11T12:58:14
|
Python
|
UTF-8
|
Python
| false
| false
| 996
|
py
|
from tasks import Task
def test_asdict():
"""_asdict() should return a dictionary."""
t_task = Task('do something', 'Okken', True, 21)
t_dict = t_task._asdict()
expected = {'summary': 'do something',
'owner': 'Okken',
'done': True,
'id': 21}
assert t_dict == expected
def test_replace():
"""_replace() should change passed in fields"""
t_before = Task('do something', 'Taro', False, 22)
t_after= t_before._replace(summary='replaced summary', done = True)
t_expected = Task('replaced summary', 'Taro', True, 22)
assert t_after == t_expected
def test_defaults():
"""Using no parameters should invoke defaults."""
t1 = Task()
t2 = Task(None, None, False, None)
assert t1 == t2
def test_member_access():
"""Check .field functionality of named tuple."""
t = Task('buy milk', 'Hanako', False)
assert t.summary == 'buy milk'
assert t.owner == 'Hanako'
assert t.done == False
|
[
"hrksb5029@gmail.com"
] |
hrksb5029@gmail.com
|
9673ba1b310997112ef86dc58e7a09bede97e40e
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/oQ99uE4iPNbEnf9QZ_8.py
|
98e927384c07bf13e06fc0c934083fe3ce3dac4c
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
"""
Given a known number of unique items, how many ways could we arrange them in a
row?
Create a function that takes an integer `n` and returns the number of digits
of the number of possible permutations for `n` unique items. For instance, 5
unique items could be arranged in 120 unique ways. 120 has 3 digits, hence the
integer `3` is returned.
### Examples
no_perms_digits(0) ➞ 1
no_perms_digits(1) ➞ 1
no_perms_digits(5) ➞ 3
no_perms_digits(8) ➞ 5
### Notes
This challenge requires some understanding of combinatorics.
"""
import sys
sys.setrecursionlimit(10**6)
def no_perms_digits(num, i=0, total=1):
if i == num:
if total // 10 == 0:
return 1
else:
return 1 + no_perms_digits(num, i, total // 10)
else:
return no_perms_digits(num, i + 1, total * (i + 1))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
0a3fc9f2dc890b6e2efaa610973cc13c5a947a43
|
b5ae0120287b7b6dcd26e65f4292ba81a29f1a1f
|
/basicwebapp/wsgi.py
|
fdafd5ef5f6f01a30de1dadab33001f70dbf31ce
|
[] |
no_license
|
rnvarma/basicwebapp
|
d96a074a3afa6d95d35b04e06c627416db8aa36c
|
1f52c7d377e1e36acd6a75846a2d77a8a8252e13
|
refs/heads/master
| 2021-01-10T05:17:01.842320
| 2015-11-09T22:51:14
| 2015-11-09T22:51:14
| 45,871,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for basicwebapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "basicwebapp.settings")
application = get_wsgi_application()
|
[
"rohan@prompthire.com"
] |
rohan@prompthire.com
|
501738ec710801e81d9ab4e1f7ef15ccbeea5e65
|
12e04c219d6911d06a048c913f8d8d6c00dad857
|
/chendian/member/forms.py
|
d494a75608722225651d47661ad908497bd17ee9
|
[
"MIT"
] |
permissive
|
mozillazg/chendian-plus
|
928e98beb77f351e08b25a5ba9671ad648dac4b5
|
893c62b4b855879006d4cb378faeb9d1c6635923
|
refs/heads/master
| 2023-09-04T09:58:58.112022
| 2017-04-04T09:44:28
| 2017-04-04T09:44:28
| 31,481,576
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django import forms
from django.contrib.auth.forms import AuthenticationForm
class LoginForm(AuthenticationForm):
error_messages = {
'invalid_login': '用户名或密码错误!',
'no_cookies': '您的浏览器没有启用 Cookies!',
'inactive': '此账户未激活!',
}
|
[
"opensource.mozillazg@gmail.com"
] |
opensource.mozillazg@gmail.com
|
ebea53bffcd6e69af4d785a1455f144ba9b50975
|
e9aeb4d00fe6758a51d9982c3e1af1e456a9e244
|
/funcion.py
|
119144c450d8e1880a6bc8dbbd98c49b30da90ad
|
[] |
no_license
|
PacoBocanegra/myproject
|
ae365ca3aaa37699355eed1bd5ae950a241e74d0
|
976a70987c87003ba36ecec11894e37d4725c8ac
|
refs/heads/master
| 2020-04-07T03:14:20.866838
| 2019-04-10T10:28:02
| 2019-04-10T10:28:02
| 158,008,209
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
def contador(unaLista):
unDict = {}
for valores in unaLista:
if valores not in unDict.keys():
unDict[valores] = 1
else:
unDict[valores] += 1
return unDict
|
[
"noreply@github.com"
] |
PacoBocanegra.noreply@github.com
|
496ed58b9cc4cf8030a102eaf9e3214dbf825d58
|
b7395bf3c8e209a8b2f0ace1b7e763ed76cd0b25
|
/HSE/smith_waterman/smith_waterman.py
|
f06680fd29cdc7f0c54c197dcf0544a0042c410d
|
[] |
no_license
|
antosiv/study_progs
|
9f7ceb28f33211211bfe43f9af21cf68e745e338
|
9ec3fb4313f28b662635c8693e0f435d6b75187b
|
refs/heads/master
| 2022-12-08T18:04:56.410185
| 2021-07-27T13:31:57
| 2021-07-27T14:31:04
| 133,510,616
| 0
| 0
| null | 2022-11-23T22:27:38
| 2018-05-15T12:07:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,166
|
py
|
import numpy as np
import argparse
import os
class Node:
def __init__(self, value):
self.diag = None
self.up = None
self.left = None
self.value = value
def build_alignment_tree(seq1, seq2, similar_score=1, gap_penalty=1, mismatch_penalty=1):
assert isinstance(seq1, str) and isinstance(seq2, str), 'Sequences are not strings'
score_matrix = np.zeros((len(seq1) + 1, len(seq2) + 1))
for i in range(1, len(seq1) + 1):
for j in range(1, len(seq2) + 1):
if seq1[i - 1] == seq2[j - 1]:
current_similarity = similar_score
else:
current_similarity = mismatch_penalty * -1
score_matrix[i, j] = max(
score_matrix[i - 1, j - 1] + current_similarity,
score_matrix[i - 1, j] - gap_penalty,
score_matrix[i, j - 1] - gap_penalty,
0
)
start_position = tuple(
np.unravel_index(
np.argmax(score_matrix), shape=score_matrix.shape
)
)
root = Node(start_position)
current_positions = [root]
while len(current_positions) > 0:
next_positions = list()
for node in current_positions:
if seq1[node.value[0] - 1] == seq2[node.value[1] - 1]:
current_similarity = similar_score
else:
current_similarity = mismatch_penalty * -1
for step_score, coordinates, direction in \
(current_similarity, (node.value[0] - 1, node.value[1] - 1), 'diag'), \
(-1 * gap_penalty, (node.value[0] - 1, node.value[1]), 'up'), \
(-1 * gap_penalty, (node.value[0], node.value[1] - 1), 'left'):
if coordinates[0] > 0 and \
coordinates[1] > 0 and \
score_matrix[coordinates[0], coordinates[1]] > 0 and \
score_matrix[coordinates[0], coordinates[1]] + step_score == \
score_matrix[node.value[0], node.value[1]]:
setattr(node, direction, Node(value=coordinates))
next_positions.append(getattr(node, direction))
current_positions = next_positions
return root, score_matrix
def extract_alignments_recursive(root, final_paths, prefix=None):
if prefix is None:
prefix = list()
prefix.append(root.value)
for direction in root.left, root.up, root.diag:
leaf = True
if direction is not None:
leaf = False
extract_alignments_recursive(direction, final_paths, prefix.copy())
if leaf:
final_paths.append(prefix)
def extract_alignments_from_root(root):
result = list()
extract_alignments_recursive(root, result)
return result
def alignment_path_to_seqs(alignment_path, seq1, seq2):
result = [[], []]
for i in range(len(alignment_path) - 1):
if alignment_path[i] == (alignment_path[i + 1][0] + 1, alignment_path[i + 1][1] + 1):
result[0].append(seq1[alignment_path[i][0] - 1])
result[1].append(seq2[alignment_path[i][1] - 1])
elif alignment_path[i] == (alignment_path[i + 1][0] + 1, alignment_path[i + 1][1]):
result[0].append(seq1[alignment_path[i][0] - 1])
result[1].append('-')
elif alignment_path[i] == (alignment_path[i + 1][0], alignment_path[i + 1][1] + 1):
result[0].append('-')
result[1].append(seq2[alignment_path[i][1] - 1])
else:
raise ValueError('Invalid alignment path: {p}'.format(p=alignment_path))
result[0].append(seq1[alignment_path[-1][0] - 1])
result[1].append(seq2[alignment_path[-1][1] - 1])
return result
def align(seq1, seq2, similar_score=1, gap_penalty=1, mismatch_penalty=1, print_info=True):
alignment_tree_root, score_matrix = build_alignment_tree(
seq1, seq2, similar_score=similar_score, gap_penalty=gap_penalty, mismatch_penalty=mismatch_penalty
)
alignments_coordinates = extract_alignments_from_root(alignment_tree_root)
alignments = [alignment_path_to_seqs(item, seq1, seq2) for item in alignments_coordinates]
if print_info:
print('Sequence 1:', seq1)
print('Sequence 2:', seq2)
row_names = ' ' + seq1
print(
'Score matrix:',
'\t\t' + '\t'.join(seq2),
*(row_names[i] + '\t' + '\t'.join(map(str, score_matrix[i, :])) for i in range(score_matrix.shape[0])),
sep='\n'
)
print('Best alignment score: {s}'.format(s=np.max(score_matrix)))
for alignment_path, alignment in zip(alignments_coordinates, alignments):
print('\nAlignment:', ''.join(alignment[0][::-1]), ''.join(alignment[1][::-1]), sep='\n')
path_matrix = np.full(score_matrix.shape, ' ', dtype='U')
for i in range(len(alignment_path) - 1):
if alignment_path[i] == (alignment_path[i + 1][0] + 1, alignment_path[i + 1][1] + 1):
path_matrix[alignment_path[i][0], alignment_path[i][1]] = '\\'
elif alignment_path[i] == (alignment_path[i + 1][0] + 1, alignment_path[i + 1][1]):
path_matrix[alignment_path[i][0], alignment_path[i][1]] = '|'
elif alignment_path[i] == (alignment_path[i + 1][0], alignment_path[i + 1][1] + 1):
path_matrix[alignment_path[i][0], alignment_path[i][1]] = '-'
path_matrix[alignment_path[-1][0], alignment_path[-1][1]] = '\\'
print('Alignment path:')
print(
' | | ' + ' | '.join(seq2) + ' |',
*(' | '.join((row_names[i], *path_matrix[i, :])) + ' |' for i in range(path_matrix.shape[0])),
sep='\n' + '---' + '----' * (path_matrix.shape[1]) + '\n'
)
return alignments
def main(args):
seqs = [args.seq1, args.seq2]
for i, attribute in zip(range(2), ('seq1', 'seq2')):
if os.path.exists(getattr(args, attribute)):
with open(getattr(args, attribute)) as input_d:
seqs[i] = input_d.read()
return align(
seq1=seqs[0],
seq2=seqs[1],
gap_penalty=args.gap_penalty,
mismatch_penalty=args.mismatch_penalty,
similar_score=args.similar_score
)
if __name__ == '__main__':
examples = """Examples:
python3 smith_waterman.py --seq1 AAAA --seq2 AAAA
python3 smith_waterman.py --seq1 file.txt --seq2 AAAA
"""
parser = argparse.ArgumentParser(
description='Smith-Waterman algorithm implementation',
epilog=examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--seq1', required=True, help='path to .txt file with sequence or sequence string')
parser.add_argument('--seq2', required=True, help='path to .txt file with sequence or sequence string')
parser.add_argument('--similar_score', default=1, required=False)
parser.add_argument('--gap_penalty', default=1, required=False)
parser.add_argument('--mismatch_penalty', default=1, required=False)
main(parser.parse_args())
|
[
"anton6722@gmail.com"
] |
anton6722@gmail.com
|
794e7f2d349b256ab00c025b5646697ca1a79a20
|
bc2aaafbd154c3b116b393881213747979238c9f
|
/coding_challenge.py
|
6cd92649b448fadd45100c18b31383326f11e935
|
[] |
no_license
|
victor-gesit/udemy-python-bootcamp
|
787570f9f243809dddfbf490438d8422cd4c01a3
|
4c47dcdf35c6c69b3f7c647f9a50676d9a2c7401
|
refs/heads/master
| 2020-03-26T14:53:50.765381
| 2018-08-16T16:04:52
| 2018-08-16T16:04:52
| 145,010,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
import re
def adds_to_ten(a_string):
for number in range(0, len(a_string)):
letter = a_string[number]
try:
number = int(letter)
except:
print(a_string[number:number+3:1], number, a_string[number-1], letter*3)
print(a_string[number:number+3:1] == letter*3 and a_string[number+4] != letter and a_string[number-1] != letter)
def find_3_letters_between(a_string):
for number in range(0, len(a_string) - 4):
letter = a_string[number]
matches = a_string[number:number+3:1] == letter*3 and a_string[number+4] == letter and a_string[number-1] != letter
print(matches)
find_3_letters_between('ababaaab')
|
[
"victoridongesit@gmail.com"
] |
victoridongesit@gmail.com
|
c1cbaa23a1aaaec6e5a3843f37f57011a4402b8d
|
7213ae1a7a1677d17d11cdc2827221ee8ced4f9c
|
/Baek_jun/2292.py
|
b67d534f18b44bf42ff155713c5c97013813a389
|
[] |
no_license
|
eat-toast/Algorithm
|
c532fbdc9e8faa2205dd0a7ab333a8c98f7c16e2
|
f82c6fdc763c434ca2cc3bf86ee8378503d019f1
|
refs/heads/master
| 2021-10-24T23:52:38.482506
| 2019-03-30T08:28:07
| 2019-03-30T08:28:07
| 112,037,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
N = int( input() )
step = 1
old_num = 1
if N == 1:
print(1)
else:
while(1):
max_num = 6*step + old_num
if old_num < N <= max_num:
print(step+1)
break
else:
step += 1
old_num = max_num
|
[
"noreply@github.com"
] |
eat-toast.noreply@github.com
|
d078d98ab762bb1d1a429ab405b5e969885c2172
|
8bcf7c1ed213d5a296d592a420b3d678f01ff716
|
/stack_and_queue/ANARC09A.py
|
b6aaf653865fde7ac201ca8d253c099faf4230a2
|
[] |
no_license
|
cskanani/codechef_prepare
|
237b65455c2294c4a96d72cfa4cdecb8734b48ee
|
36075328b0f52dc6237a96a4094358128327dc0b
|
refs/heads/master
| 2020-12-03T14:11:04.963970
| 2020-03-05T17:29:23
| 2020-03-05T17:29:23
| 231,349,403
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
parantheses_string = input().strip()
ans_number = 0
while '-' not in parantheses_string:
ans_number += 1
operations_required = 0
bracket_count = 0 # to simmulate stack
for i in range(len(parantheses_string)):
if parantheses_string[i] == '{':
bracket_count += 1
elif bracket_count > 0 and parantheses_string[i] == '}':
bracket_count -= 1
elif parantheses_string[i] == '}':
operations_required += 1
bracket_count = 1
print('{}. {}'.format(ans_number, operations_required + bracket_count//2))
parantheses_string = input().strip()
|
[
"kananichandresh@gmail.com"
] |
kananichandresh@gmail.com
|
6a463bc00a35e2a6151c42ca825513e6973d82ad
|
b653971414c2c8b144d2fcb63b214cd82523c4d0
|
/hdf5_getters.py
|
e92d561ca0873048680c1d52a02c820673422174
|
[
"MIT"
] |
permissive
|
shirleycohen/MillionSong
|
b37a35a51fa997a8a118581605376f9dc389c72d
|
d5a31fe6bd32999555efc47226093f6bc7e09199
|
refs/heads/master
| 2021-01-18T04:12:53.666761
| 2017-03-23T19:49:07
| 2017-03-23T19:49:07
| 85,751,728
| 0
| 0
| null | 2017-03-21T20:47:51
| 2017-03-21T20:47:51
| null |
UTF-8
|
Python
| false
| false
| 22,184
|
py
|
"""
Thierry Bertin-Mahieux (2010) Columbia University
tb2332@columbia.edu
This code contains a set of getters functions to access the fields
from an HDF5 song file (regular file with one song or
aggregate / summary file with many songs)
This is part of the Million Song Dataset project from
LabROSA (Columbia University) and The Echo Nest.
Copyright 2010, Thierry Bertin-Mahieux
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import tables
def open_h5_file_read(h5filename):
"""
Open an existing H5 in read mode.
Same function as in hdf5_utils, here so we avoid one import
"""
return tables.openFile(h5filename, mode='r')
def get_num_songs(h5):
"""
Return the number of songs contained in this h5 file, i.e. the number of rows
for all basic informations like name, artist, ...
"""
return h5.root.metadata.songs.nrows
def get_artist_familiarity(h5,songidx=0):
"""
Get artist familiarity from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_familiarity[songidx]
def get_artist_hotttnesss(h5,songidx=0):
"""
Get artist hotttnesss from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_hotttnesss[songidx]
def get_artist_id(h5,songidx=0):
"""
Get artist id from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_id[songidx]
def get_artist_mbid(h5,songidx=0):
"""
Get artist musibrainz id from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_mbid[songidx]
def get_artist_playmeid(h5,songidx=0):
"""
Get artist playme id from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_playmeid[songidx]
def get_artist_7digitalid(h5,songidx=0):
"""
Get artist 7digital id from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_7digitalid[songidx]
def get_artist_latitude(h5,songidx=0):
"""
Get artist latitude from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_latitude[songidx]
def get_artist_longitude(h5,songidx=0):
"""
Get artist longitude from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_longitude[songidx]
def get_artist_location(h5,songidx=0):
"""
Get artist location from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_location[songidx]
def get_artist_name(h5,songidx=0):
"""
Get artist name from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.artist_name[songidx]
def get_release(h5,songidx=0):
"""
Get release from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.release[songidx]
def get_release_7digitalid(h5,songidx=0):
"""
Get release 7digital id from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.release_7digitalid[songidx]
def get_song_id(h5,songidx=0):
"""
Get song id from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.song_id[songidx]
def get_song_hotttnesss(h5,songidx=0):
"""
Get song hotttnesss from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.song_hotttnesss[songidx]
def get_title(h5,songidx=0):
"""
Get title from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.title[songidx]
def get_track_7digitalid(h5,songidx=0):
"""
Get track 7digital id from a HDF5 song file, by default the first song in it
"""
return h5.root.metadata.songs.cols.track_7digitalid[songidx]
def get_similar_artists(h5,songidx=0):
"""
Get similar artists array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.metadata.songs.nrows == songidx + 1:
return h5.root.metadata.similar_artists[h5.root.metadata.songs.cols.idx_similar_artists[songidx]:]
return h5.root.metadata.similar_artists[h5.root.metadata.songs.cols.idx_similar_artists[songidx]:
h5.root.metadata.songs.cols.idx_similar_artists[songidx+1]]
def get_artist_terms(h5,songidx=0):
"""
Get artist terms array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.metadata.songs.nrows == songidx + 1:
return h5.root.metadata.artist_terms[h5.root.metadata.songs.cols.idx_artist_terms[songidx]:]
return h5.root.metadata.artist_terms[h5.root.metadata.songs.cols.idx_artist_terms[songidx]:
h5.root.metadata.songs.cols.idx_artist_terms[songidx+1]]
def get_artist_terms_freq(h5,songidx=0):
"""
Get artist terms array frequencies. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.metadata.songs.nrows == songidx + 1:
return h5.root.metadata.artist_terms_freq[h5.root.metadata.songs.cols.idx_artist_terms[songidx]:]
return h5.root.metadata.artist_terms_freq[h5.root.metadata.songs.cols.idx_artist_terms[songidx]:
h5.root.metadata.songs.cols.idx_artist_terms[songidx+1]]
def get_artist_terms_weight(h5,songidx=0):
"""
Get artist terms array frequencies. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.metadata.songs.nrows == songidx + 1:
return h5.root.metadata.artist_terms_weight[h5.root.metadata.songs.cols.idx_artist_terms[songidx]:]
return h5.root.metadata.artist_terms_weight[h5.root.metadata.songs.cols.idx_artist_terms[songidx]:
h5.root.metadata.songs.cols.idx_artist_terms[songidx+1]]
def get_analysis_sample_rate(h5,songidx=0):
"""
Get analysis sample rate from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.analysis_sample_rate[songidx]
def get_audio_md5(h5,songidx=0):
"""
Get audio MD5 from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.audio_md5[songidx]
def get_danceability(h5,songidx=0):
"""
Get danceability from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.danceability[songidx]
def get_duration(h5,songidx=0):
"""
Get duration from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.duration[songidx]
def get_end_of_fade_in(h5,songidx=0):
"""
Get end of fade in from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.end_of_fade_in[songidx]
def get_energy(h5,songidx=0):
"""
Get energy from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.energy[songidx]
def get_key(h5,songidx=0):
"""
Get key from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.key[songidx]
def get_key_confidence(h5,songidx=0):
"""
Get key confidence from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.key_confidence[songidx]
def get_loudness(h5,songidx=0):
"""
Get loudness from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.loudness[songidx]
def get_mode(h5,songidx=0):
"""
Get mode from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.mode[songidx]
def get_mode_confidence(h5,songidx=0):
"""
Get mode confidence from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.mode_confidence[songidx]
def get_start_of_fade_out(h5,songidx=0):
"""
Get start of fade out from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.start_of_fade_out[songidx]
def get_tempo(h5,songidx=0):
"""
Get tempo from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.tempo[songidx]
def get_time_signature(h5,songidx=0):
"""
Get signature from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.time_signature[songidx]
def get_time_signature_confidence(h5,songidx=0):
"""
Get signature confidence from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.time_signature_confidence[songidx]
def get_track_id(h5,songidx=0):
"""
Get track id from a HDF5 song file, by default the first song in it
"""
return h5.root.analysis.songs.cols.track_id[songidx]
def get_segments_start(h5,songidx=0):
"""
Get segments start array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.segments_start[h5.root.analysis.songs.cols.idx_segments_start[songidx]:]
return h5.root.analysis.segments_start[h5.root.analysis.songs.cols.idx_segments_start[songidx]:
h5.root.analysis.songs.cols.idx_segments_start[songidx+1]]
def get_segments_confidence(h5,songidx=0):
"""
Get segments confidence array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.segments_confidence[h5.root.analysis.songs.cols.idx_segments_confidence[songidx]:]
return h5.root.analysis.segments_confidence[h5.root.analysis.songs.cols.idx_segments_confidence[songidx]:
h5.root.analysis.songs.cols.idx_segments_confidence[songidx+1]]
def get_segments_pitches(h5,songidx=0):
"""
Get segments pitches array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.segments_pitches[h5.root.analysis.songs.cols.idx_segments_pitches[songidx]:,:]
return h5.root.analysis.segments_pitches[h5.root.analysis.songs.cols.idx_segments_pitches[songidx]:
h5.root.analysis.songs.cols.idx_segments_pitches[songidx+1],:]
def get_segments_timbre(h5,songidx=0):
"""
Get segments timbre array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.segments_timbre[h5.root.analysis.songs.cols.idx_segments_timbre[songidx]:,:]
return h5.root.analysis.segments_timbre[h5.root.analysis.songs.cols.idx_segments_timbre[songidx]:
h5.root.analysis.songs.cols.idx_segments_timbre[songidx+1],:]
def get_segments_loudness_max(h5,songidx=0):
"""
Get segments loudness max array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.segments_loudness_max[h5.root.analysis.songs.cols.idx_segments_loudness_max[songidx]:]
return h5.root.analysis.segments_loudness_max[h5.root.analysis.songs.cols.idx_segments_loudness_max[songidx]:
h5.root.analysis.songs.cols.idx_segments_loudness_max[songidx+1]]
def get_segments_loudness_max_time(h5,songidx=0):
"""
Get segments loudness max time array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.segments_loudness_max_time[h5.root.analysis.songs.cols.idx_segments_loudness_max_time[songidx]:]
return h5.root.analysis.segments_loudness_max_time[h5.root.analysis.songs.cols.idx_segments_loudness_max_time[songidx]:
h5.root.analysis.songs.cols.idx_segments_loudness_max_time[songidx+1]]
def get_segments_loudness_start(h5,songidx=0):
"""
Get segments loudness start array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.segments_loudness_start[h5.root.analysis.songs.cols.idx_segments_loudness_start[songidx]:]
return h5.root.analysis.segments_loudness_start[h5.root.analysis.songs.cols.idx_segments_loudness_start[songidx]:
h5.root.analysis.songs.cols.idx_segments_loudness_start[songidx+1]]
def get_sections_start(h5,songidx=0):
"""
Get sections start array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.sections_start[h5.root.analysis.songs.cols.idx_sections_start[songidx]:]
return h5.root.analysis.sections_start[h5.root.analysis.songs.cols.idx_sections_start[songidx]:
h5.root.analysis.songs.cols.idx_sections_start[songidx+1]]
def get_sections_confidence(h5,songidx=0):
"""
Get sections confidence array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.sections_confidence[h5.root.analysis.songs.cols.idx_sections_confidence[songidx]:]
return h5.root.analysis.sections_confidence[h5.root.analysis.songs.cols.idx_sections_confidence[songidx]:
h5.root.analysis.songs.cols.idx_sections_confidence[songidx+1]]
def get_beats_start(h5,songidx=0):
"""
Get beats start array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.beats_start[h5.root.analysis.songs.cols.idx_beats_start[songidx]:]
return h5.root.analysis.beats_start[h5.root.analysis.songs.cols.idx_beats_start[songidx]:
h5.root.analysis.songs.cols.idx_beats_start[songidx+1]]
def get_beats_confidence(h5,songidx=0):
"""
Get beats confidence array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.beats_confidence[h5.root.analysis.songs.cols.idx_beats_confidence[songidx]:]
return h5.root.analysis.beats_confidence[h5.root.analysis.songs.cols.idx_beats_confidence[songidx]:
h5.root.analysis.songs.cols.idx_beats_confidence[songidx+1]]
def get_bars_start(h5,songidx=0):
"""
Get bars start array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.bars_start[h5.root.analysis.songs.cols.idx_bars_start[songidx]:]
return h5.root.analysis.bars_start[h5.root.analysis.songs.cols.idx_bars_start[songidx]:
h5.root.analysis.songs.cols.idx_bars_start[songidx+1]]
def get_bars_confidence(h5,songidx=0):
"""
Get bars start array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.bars_confidence[h5.root.analysis.songs.cols.idx_bars_confidence[songidx]:]
return h5.root.analysis.bars_confidence[h5.root.analysis.songs.cols.idx_bars_confidence[songidx]:
h5.root.analysis.songs.cols.idx_bars_confidence[songidx+1]]
def get_tatums_start(h5,songidx=0):
"""
Get tatums start array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.tatums_start[h5.root.analysis.songs.cols.idx_tatums_start[songidx]:]
return h5.root.analysis.tatums_start[h5.root.analysis.songs.cols.idx_tatums_start[songidx]:
h5.root.analysis.songs.cols.idx_tatums_start[songidx+1]]
def get_tatums_confidence(h5,songidx=0):
"""
Get tatums confidence array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.analysis.songs.nrows == songidx + 1:
return h5.root.analysis.tatums_confidence[h5.root.analysis.songs.cols.idx_tatums_confidence[songidx]:]
return h5.root.analysis.tatums_confidence[h5.root.analysis.songs.cols.idx_tatums_confidence[songidx]:
h5.root.analysis.songs.cols.idx_tatums_confidence[songidx+1]]
def get_artist_mbtags(h5,songidx=0):
"""
Get artist musicbrainz tag array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.musicbrainz.songs.nrows == songidx + 1:
return h5.root.musicbrainz.artist_mbtags[h5.root.musicbrainz.songs.cols.idx_artist_mbtags[songidx]:]
return h5.root.musicbrainz.artist_mbtags[h5.root.metadata.songs.cols.idx_artist_mbtags[songidx]:
h5.root.metadata.songs.cols.idx_artist_mbtags[songidx+1]]
def get_artist_mbtags_count(h5,songidx=0):
"""
Get artist musicbrainz tag count array. Takes care of the proper indexing if we are in aggregate
file. By default, return the array for the first song in the h5 file.
To get a regular numpy ndarray, cast the result to: numpy.array( )
"""
if h5.root.musicbrainz.songs.nrows == songidx + 1:
return h5.root.musicbrainz.artist_mbtags_count[h5.root.musicbrainz.songs.cols.idx_artist_mbtags[songidx]:]
return h5.root.musicbrainz.artist_mbtags_count[h5.root.metadata.songs.cols.idx_artist_mbtags[songidx]:
h5.root.metadata.songs.cols.idx_artist_mbtags[songidx+1]]
def get_year(h5,songidx=0):
"""
Get release year from a HDF5 song file, by default the first song in it
"""
return h5.root.musicbrainz.songs.cols.year[songidx]
|
[
"jeff.kibler@infobright.com"
] |
jeff.kibler@infobright.com
|
cdf4fc4eac8ac721c4cb6dccc000e71414fb2392
|
d659810b24ebc6ae29a4d7fbb3b82294c860633a
|
/aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/GetServiceDetailRequest.py
|
0274809385003cccbff6ca1a9c5f43b64d78a821
|
[
"Apache-2.0"
] |
permissive
|
leafcoder/aliyun-openapi-python-sdk
|
3dd874e620715173b6ccf7c34646d5cb8268da45
|
26b441ab37a5cda804de475fd5284bab699443f1
|
refs/heads/master
| 2023-07-31T23:22:35.642837
| 2021-09-17T07:49:51
| 2021-09-17T07:49:51
| 407,727,896
| 0
| 0
|
NOASSERTION
| 2021-09-18T01:56:10
| 2021-09-18T01:56:09
| null |
UTF-8
|
Python
| false
| false
| 3,259
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkedas.endpoint import endpoint_data
class GetServiceDetailRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Edas', '2017-08-01', 'GetServiceDetail','Edas')
self.set_uri_pattern('/pop/sp/api/mseForOam/getServiceDetail')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_registryType(self):
return self.get_query_params().get('registryType')
def set_registryType(self,registryType):
self.add_query_param('registryType',registryType)
def get_serviceType(self):
return self.get_query_params().get('serviceType')
def set_serviceType(self,serviceType):
self.add_query_param('serviceType',serviceType)
def get_origin(self):
return self.get_query_params().get('origin')
def set_origin(self,origin):
self.add_query_param('origin',origin)
def get_appId(self):
return self.get_query_params().get('appId')
def set_appId(self,appId):
self.add_query_param('appId',appId)
def get_ip(self):
return self.get_query_params().get('ip')
def set_ip(self,ip):
self.add_query_param('ip',ip)
def get_namespace(self):
return self.get_query_params().get('namespace')
def set_namespace(self,namespace):
self.add_query_param('namespace',namespace)
def get_serviceVersion(self):
return self.get_query_params().get('serviceVersion')
def set_serviceVersion(self,serviceVersion):
self.add_query_param('serviceVersion',serviceVersion)
def get_serviceName(self):
return self.get_query_params().get('serviceName')
def set_serviceName(self,serviceName):
self.add_query_param('serviceName',serviceName)
def get_source(self):
return self.get_query_params().get('source')
def set_source(self,source):
self.add_query_param('source',source)
def get_region(self):
return self.get_query_params().get('region')
def set_region(self,region):
self.add_query_param('region',region)
def get_serviceId(self):
return self.get_query_params().get('serviceId')
def set_serviceId(self,serviceId):
self.add_query_param('serviceId',serviceId)
def get_group(self):
return self.get_query_params().get('group')
def set_group(self,group):
self.add_query_param('group',group)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
d97a89429ebe411ef2de2f263cc53f3eb6aa6988
|
5dec96a4833bea52d5105a77f45022f2c3c3157a
|
/QUICK_START/NODE_DEEPLABV3SEG_CUSTER/src/script/squeezeseg/nets/utils/__init__.py
|
2aee7f84a672a2e05885f81f414f00fb090af095
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
jtpils/DINK
|
ebb3259d0fb8da0db46f670659bbb98c5b34c317
|
5f6b3eaba279126f79ae6607f965311002d7451c
|
refs/heads/master
| 2020-04-28T23:47:42.726499
| 2019-03-14T15:10:32
| 2019-03-14T15:10:32
| 175,669,731
| 1
| 0
|
BSD-3-Clause
| 2019-03-14T17:32:06
| 2019-03-14T17:32:06
| null |
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
# BSD 3-Clause License
#
# Copyright (c) 2019, FPAI
# Copyright (c) 2019, SeriouslyHAO
# Copyright (c) 2019, xcj2019
# Copyright (c) 2019, Leonfirst
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
[
"2281504330@qq.com"
] |
2281504330@qq.com
|
ad19fd452e4db1b70a3909df02d3983f5143594a
|
8fab832545c32c4c9bbdd9d2656b0e7999e1ba95
|
/python/inky/phat/calendar-phat.py.save
|
1d2108a89fe905198d278a00531fd1bf55bd768c
|
[] |
no_license
|
krishnamarwaha/Raspberry-PI
|
1d96c5f5a9279f8210f566509d1d352f47dc775a
|
cc429335ef3fa6fb87b6f7ab0453283887ac9a04
|
refs/heads/master
| 2021-07-09T19:24:37.589325
| 2020-12-20T19:06:18
| 2020-12-20T19:06:18
| 221,528,517
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,723
|
save
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import calendar
import argparse
from PIL import Image, ImageDraw
from inky import InkyWHAT
print("""Inky pHAT: Calendar
Draws a calendar for the current month to your Inky pHAT.
This example uses a sprite sheet of numbers and month names which are
composited over the background in a couple of different ways.
""")
# Command line arguments to set display type and colour
parser = argparse.ArgumentParser()
parser.add_argument('--colour', '-c', type=str, required=True, choices=["red", "black", "yellow"], help="ePaper display colour")
args = parser.parse_args()
colour = args.colour
# Set up the display
inky_display = InkyPHAT(colour)
inky_display.set_border(inky_display.BLACK)
# inky_display.set_rotation(180)
def create_mask(source, mask=(inky_display.WHITE, inky_display.BLACK, inky_display.RED)):
"""Create a transparency mask.
Takes a paletized source image and converts it into a mask
permitting all the colours supported by Inky pHAT (0, 1, 2)
or an optional list of allowed colours.
:param mask: Optional list of Inky pHAT colours to allow.
"""
mask_image = Image.new("1", source.size)
w, h = source.size
for x in range(w):
for y in range(h):
p = source.getpixel((x, y))
if p in mask:
mask_image.putpixel((x, y), 255)
return mask_image
def print_digit(position, digit, colour):
"""Print a single digit using the sprite sheet.
Each number is grabbed from the masked sprite sheet,
and then used as a mask to paste the desired colour
onto Inky pHATs image buffer.
"""
o_x, o_y = position
num_margin = 2
num_width = 6
num_height = 7
s_y = 11
s_x = num_margin + (digit * (num_width + num_margin))
sprite = text_mask.crop((s_x, s_y, s_x + num_width, s_y + num_height))
img.paste(colour, (o_x, o_y), sprite)
def print_number(position, number, colour):
"""Print a number using the sprite sheet."""
for digit in str(number):
print_digit(position, int(digit), colour)
position = (position[0] + 8, position[1])
# Load our sprite sheet and prepare a mask
text = Image.open("resources/calendar.png")
text_mask = create_mask(text, [inky_display.WHITE])
# Note: The mask determines which pixels from our sprite sheet we want
# to actually use when calling img.paste().
# See: http://pillow.readthedocs.io/en/3.1.x/reference/Image.html?highlight=paste#PIL.Image.Image.paste
# Load our backdrop image
img = Image.open("resources/empty-backdrop.png")
draw = ImageDraw.Draw(img)
# Grab the current date, and prepare our calendar
cal = calendar.Calendar()
now = datetime.datetime.now()
dates = cal.monthdatescalendar(now.year, now.month)
col_w = 20
col_h = 13
cols = 7
rows = len(dates) + 1
cal_w = 1 + ((col_w + 1) * cols)
cal_h = 1 + ((col_h + 1) * rows)
cal_x = inky_display.WIDTH - cal_w - 2
cal_y = 2
# Paint out a black rectangle onto which we'll draw our canvas
draw.rectangle((cal_x, cal_y, cal_x + cal_w - 1, cal_y + cal_h - 1), fill=inky_display.BLACK, outline=inky_display.WHITE)
# The starting position of the months in our spritesheet
months_x = 2
months_y = 20
# Number of months per row
months_cols = 3
# The width/height of each month in our spritesheet
month_w = 23
month_h = 9
# Figure out where the month is in the spritesheet
month_col = (now.month - 1) % months_cols
month_row = (now.month - 1) // months_cols
# Convert that location to usable X/Y coordinates
month_x = months_x + (month_col * month_w)
month_y = months_y + (month_row * month_h)
crop_region = (month_x, month_y, month_x + month_w, month_y + month_h)
month = text.crop(crop_region)
month_mask = text_mask.crop(crop_region)
monthyear_x = 28
# Paste in the month name we grabbed from our sprite sheet
img.paste(inky_display.WHITE, (monthyear_x, cal_y + 4), month_mask)
# Print the year right below the month
print_number((monthyear_x, cal_y + 5 + col_h), now.year, inky_display.WHITE)
# Draw the vertical lines which separate the columns
# and also draw the day names into the table header
for x in range(cols):
# Figure out the left edge of the column
o_x = (col_w + 1) * x
o_x += cal_x
crop_x = 2 + (16 * x)
# Crop the relevant day name from our text image
crop_region = ((crop_x, 0, crop_x + 16, 9))
day_mask = text_mask.crop(crop_region)
img.paste(inky_display.WHITE, (o_x + 4, cal_y + 2), day_mask)
# Offset to the right side of the column and draw the vertical line
o_x += col_w + 1
draw.line((o_x, cal_y, o_x, cal_h))
# Draw the horizontal lines which separate the rows
for y in range(rows):
o_y = (col_h + 1) * y
o_y += cal_y + col_h + 1
draw.line((cal_x, o_y, cal_w + cal_x - 1, o_y))
# Step through each week
for row, week in enumerate(dates):
y = (col_h + 1) * (row + 1)
y += cal_y + 1
# And each day in the week
for col, day in enumerate(week):
x = (col_w + 1) * col
x += cal_x + 1
# Draw in the day name.
# If it's the current day, invert the calendar background and text
if (day.day, day.month) == (now.day, now.month):
draw.rectangle((x, y, x + col_w - 1, y + col_h - 1), fill=inky_display.WHITE)
print_number((x + 3, y + 3), day.day, inky_display.BLACK)
# If it's any other day, paint in as white if it's in the current month
# and red if it's in the previous or next month
else:
print_number((x + 3, y + 3), day.day, inky_display.WHITE if day.month == now.month else inky_display.RED)
# Display the completed calendar on Inky pHAT
inky_display.set_image(img)
inky_display.show()
|
[
"krishna.marwaha@outlook.com"
] |
krishna.marwaha@outlook.com
|
f28de4409a49b4958ed94421be319b08d13fc589
|
bd14c979335112b7718b0feda18ebf0e3b40fe5c
|
/first_trial/c_travelling.py
|
7702d96dadc01c04581f9ca99310328b19e3c015
|
[] |
no_license
|
ababa831/atcoder_beginners
|
22c57b15333d110126d1b1afadc0ff5e8784fc4f
|
1a30882ce7f20f312045d5dc7bfaa5688cc8a88e
|
refs/heads/master
| 2023-03-07T15:47:19.750682
| 2020-03-04T19:53:45
| 2020-03-04T19:53:45
| 143,360,607
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
# I coudn't reach an answer at first.
# Keypoints:
# Look for conditions which constrain search range!
# 1. There are no paths for t_i < x_i + y_i (e.g. t=4, x=100, y=100)
# 2. There are no paths for t_i % 2 != (x_i + y_i) % 2 (e.g. t=4, x=1, y=0)
# Because you can only reach the point by either odd or even trials (if you want to rearrive the same point, you sould take 2*i times)
import random
n = int(input())
txy_list = list(map(int, [input().split() for _ in range(n)]))
loc = [0, 0]
n_check = 0
checkpoint = -1
start = 0
# select one from 4 paths
def add_x(x, y):
x += 1
return [x, y]
def add_y(x, y):
y += 1
return [x, y]
def sub_x(x, y):
x -= 1
return [x, y]
def sub_y(x, y):
y -= 1
return [x, y]
for i in range(n):
for j in range(start, i):
path = random.randrange(1,5)
if path == 1:
loc = add_x(loc[0], loc[1])
if path == 2:
loc = add_y(loc[0], loc[1])
if path == 3:
loc = sub_x(loc[0], loc[1])
if path == 4:
loc = sub_y(loc[0], loc[1])
if loc == txy_list[i][1:]:
n_check += 1
#Gave up at this point.
# A sapmle answer
n = int(input())
for i in range(n):
t, x, y = map(int, input().split())
if x + y > t or (x + y) % 2 != t % 2:
print('No')
exit()
print("Yes")
|
[
"flvonlineconverter@gmail.com"
] |
flvonlineconverter@gmail.com
|
b1f306e2edac18026c53ab1ebe97885c109fcb53
|
1480955b11c30691f56865bf827b7f89168da523
|
/book/knapsack.py
|
ecd31fecf868cda6dc21e99bfb6837b1663c9477
|
[] |
no_license
|
zoo200/atcoder
|
f604bff3301dfd7fd0fba916fc71f9d11eeb3871
|
db9f15e5294ff034e25358d53cc2266bc95686e9
|
refs/heads/master
| 2020-11-25T19:31:42.312537
| 2020-07-25T15:38:31
| 2020-07-25T15:38:31
| 228,813,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,417
|
py
|
from collections import defaultdict
n,w = map(int,input().split())
items = defaultdict(dict)
c = [[0] * (w +1) for _ in range(n+1) ]
g = [[0] * (w +1) for _ in range(n+1) ]
for i in range(1,n+1):
a,b = map(int,input().split())
items[i]['v'] = a
items[i]['w'] = b
for i in range(w+1):
c[0][i] = 0
g[0][i] = 1
for i in range(1,n+1):
c[i][0] = 0
for i in range(1,n+1):
for j in range(1,w+1):
c[i][j] = c[i-1][j]
g[i][w] = 0
if(items[i]['w'] > j): continue
# 一つ前のアイテムまでが入ってるナップザックに今のアイテム重量が入ったときのナップザックの価値
now_v = items[i]['v'] + c[i-1][j- items[i]['w']]
# 入れないより入れたほうが価値が高い
if( now_v > c[i-1][j]):
c[i][j] = now_v
g[i][j] = 1
# 最大価値
max_v = c[n][w]
print(max_v)
[print(i) for i in c]
[print(i) for i in g]
# どれを選べば最大価値か逆順でたどっていく
p = []
ww = w
for i in range(n,0,-1):
if(g[i][ww] == 1):
p.append(i)
ww -= items[i]['w']
print(p)
# p.416
# in
# 4 5
# 4 2
# 5 2
# 2 1
# 8 3
# out
# 13
# [0, 0, 0, 0, 0, 0]
# [0, 0, 4, 4, 4, 4]
# [0, 0, 5, 5, 9, 9]
# [0, 2, 5, 7, 9, 11]
# [0, 2, 5, 8, 10, 13]
# [1, 1, 1, 1, 1, 1]
# [0, 0, 1, 1, 1, 1]
# [0, 0, 1, 1, 1, 1]
# [0, 1, 0, 1, 0, 1]
# [0, 0, 0, 1, 1, 1]
# [4, 2]
|
[
"noreply@github.com"
] |
zoo200.noreply@github.com
|
aa3908605e7f5276c6faa8a19ddda181df2fe430
|
18a185cc757d9aa3d690f174ca0841e1d90157b3
|
/scripts/lib/calculate_reference_plot_llhs.py
|
931bb3231c6b987e9c3f2e128770397eb4330b2b
|
[
"MIT"
] |
permissive
|
fiedl/hole-ice-study
|
b2e213747af914fedadd76cb94e7042389183f41
|
9811a29c65fb22e314efd65e0309c83772928d7f
|
refs/heads/master
| 2023-04-13T21:05:06.897901
| 2022-09-13T12:08:14
| 2022-09-13T12:08:14
| 118,835,798
| 6
| 2
|
NOASSERTION
| 2023-03-16T23:10:07
| 2018-01-24T23:35:15
| null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
#!/usr/bin/env python
# Calculate log likelihood (LLH) for reference angular acceptance curve vs. simulation.
# https://github.com/fiedl/hole-ice-study/issues/12
# Usage: ./calculate_reference_plot_llhs.py ~/hole-ice-study/results/parameter_scan
# This will calculate the LLH for each run within the given data directory
# and append the "llh" to the "options.json" files within.
import cli
import options
import reference_curve_llh
print "Calculating reference-curve LLHs"
for data_dir in cli.data_dirs():
print " " + data_dir
opts = options.read(data_dir)
if "llh" in opts:
llh = opts["llh"]
else:
llh = reference_curve_llh.calculate(data_dir)
options.append(data_dir, {"llh": llh})
print " LLH = " + str(llh)
|
[
"github@fiedlschuster.de"
] |
github@fiedlschuster.de
|
19e07b20e6e2919522eb238a49df393a79596a9c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02936/s644824648.py
|
1cee97e5f35d7e758af2c7ed362e708613fbb89b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
import sys
sys.setrecursionlimit(10 ** 6)
def input():
return sys.stdin.readline()[:-1]
def dfs(v,prev = -1):
for u in graph[v]:
if u == prev:
continue
point[u] += point[v]
dfs(u,v)
N, Q = list(map(int,input().split()))
graph = [[] for _ in range(N)]
point = [0]*N
for i in range(N-1):
a,b = list(map(int,input().split()))
graph[a-1].append(b-1)
graph[b-1].append(a-1)
for i in range(Q):
p,x = list(map(int,input().split()))
p -= 1
point[p] += x
dfs(0)
print(*point)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
4f6e2604717222728344a10eea3c90f2e8dd5604
|
797bd52c384b9ce5fcac4add937982fec266af8f
|
/object_classification.py
|
238e75428e545141b2e559af68279e18204afbbc
|
[] |
no_license
|
anki08/staff
|
11b9e8c45c65c2de76e4a2406b4c7c9f26ba1797
|
d2ca5c8bea1f3e1e82ce3a7f408d06c9d469d200
|
refs/heads/main
| 2023-03-24T14:40:08.019544
| 2021-03-10T16:48:10
| 2021-03-10T16:48:10
| 346,416,818
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
import torch
import torchvision
from PIL import Image
import json
class_idx = json.load(open("imagenet_class_index.json"))
I = Image.open('dog.jpg')
print(I)
model = torchvision.models.resnext101_32x8d(pretrained=True, progress=False)
transform = torchvision.transforms.Compose([
torchvision.transforms.Scale(224),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
model.eval()
p = model(transform(I)[None])[0]
print(model)
# print(p)
print( ' , '.join([class_idx[str(int(i))][1] for i in p.argsort(descending=True)[:5]]) )
|
[
"noreply@github.com"
] |
anki08.noreply@github.com
|
68faa4a34463e8c87df6fa1a967b58e7fc27082b
|
0472e717b8b2739a1eee20d49026f87031f1f72d
|
/v2/model/source.py
|
ddff5219b2f39d781469a0e5555dc673543fdab5
|
[] |
no_license
|
nightarcherbr/uplayer
|
f11dd47c1da7655005c3051df7be706e75a6feca
|
3d67275676111a0fdf0b4874129c6ba824022f4f
|
refs/heads/master
| 2021-01-12T17:08:38.407182
| 2016-10-24T10:47:36
| 2016-10-24T10:47:36
| 71,516,128
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,635
|
py
|
import model;
class BaseSource():
def load(self):
raise model.ModelException('Not implemented');
class FileSource(BaseSource):
"""
Faz a leitura do JSON em um arquivo
"""
def __init__( self, source, parameters = None ):
self.source = source;
self.parameters = parameters;
def load(self):
import json;
try:
with open(self.source, 'r+') as fp:
try:
data = json.load(fp);
return data;
except ValueError:
raise model.InvalidResponseException( 'Invalid JSON' );
except FileNotFoundError:
raise model.FileNotFoundException('File not found');
class HTTPSource(BaseSource):
"""
Faz a leitura do JSON de uma URL
"""
def __init__( self, source, parameters = None, method="POST" ):
self.source = source;
self.parameters = parameters;
self.method = method;
def load(self):
import urllib.request;
import urllib.error;
import json;
try:
req = urllib.request.Request(self.source, self.parameters);
with urllib.request.urlopen(req) as fp:
try:
info = (fp.read().decode('utf-8'))
data = json.loads(info);
return data;
except ValueError:
raise model.InvalidResponseException('Invalid JSON');
except urllib.error.HTTPError as e:
raise model.NetworkException('404 - URL not found');
except urllib.error.URLError:
raise model.NetworkException('404 - Server not found');
class ProxySource(BaseSource):
"""
Repassa o comando de load para outro Source
"""
def __init__(self, Source):
self.Source = Source;
def load(self):
try:
return self.Source.load();
except model.SourceException as e:
raise e;
except Exception as e:
raise model.SourceException( str(e) );
class TimeCacheSource(ProxySource):
"""
Mantem o cache da ultima requisição válida por x segundos
"""
def __init__(self, Source, cachetime=60):
super().__init__(Source);
self.time = 0;
self.cache = None;
self.cachetime = cachetime;
def load(self, force = False):
import time;
tm = time.time();
if( self.cache is None or ( tm > (self.time+self.cachetime) ) or force == True ):
self.cache = super().load();
self.time = tm;
return self.cache;
|
[
"fabio@docker"
] |
fabio@docker
|
8934f8bae88b16771f2a9c6978607699ce1d5c3d
|
b500119f19bf221b6cffdedf4d74e0ddebeff503
|
/dropdown2.py
|
a706333e18160d55ed4e47b45bb7fa2394b3b2e1
|
[] |
no_license
|
Threesies/ELO
|
07bd58f3d3e0fdea76c3799e0ed17fc42c08ba28
|
32664b503e257fca5ef68ee2e2a9239fbb8d683b
|
refs/heads/master
| 2020-03-30T06:31:39.259992
| 2018-10-20T16:30:37
| 2018-10-20T16:30:37
| 150,867,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,557
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 11 15:17:27 2018
@author: acelentano
"""
import tkinter as tk
from tkinter import *
import tkinter.messagebox
import pandas as pd
import numpy as np
import os
from time import gmtime, strftime
root = tk.Tk()
text = tk.Text(root)
title = "RTB"
root.title(title)
export = "ThreesiesLog"
os.chdir('/Users/acelentano/pong/ELO')
champs = pd.read_csv('ThreesiesLog.csv')
current_rating = pd.read_csv('Threesies_Elo_Ratings')
current_rating.columns = ['Name', 'Rating']
current_rating.set_index('Name', inplace = True)
global dictionary
dictionary = current_rating.to_dict()
championships = pd.read_csv('Championships.csv')
championships.set_index('Names', inplace= True)
# =============================================================================
# Functions
# =============================================================================
def SubmitEntry():
submit = tk.messagebox.askquestion("Submit Entry", "Submit Game?")
if submit == "yes":
player1 = e1.get()
player2 = e2.get()
gamewinner = e3.get()
if e4.get() == 'Select Champion':
global champs
currentrow = pd.DataFrame([[player1,player2,gamewinner, strftime("%m-%d-%Y %H:%M", \
gmtime())]],columns=['Left Side Player','Right Side Player',\
'Winner','Time'])
champs = pd.concat([champs,currentrow],axis=0, ignore_index=True, sort=True)
champs.to_csv('ThreesiesLog.csv', index=False)
else:
add_championship(e4.get())
championships.to_csv('Championships.csv')
currentrow = pd.DataFrame([[player1,player2,gamewinner,strftime("%m-%d-%Y %H:%M", \
gmtime())]],columns=['Left Side Player','Right Side Player',\
'Winner','Time'])
champs = pd.concat([champs,currentrow],axis=0, ignore_index=True, sort=True)
champs.to_csv('ThreesiesLog.csv', index=False)
print_new(root)
e1.set(choices[0])
e2.set(choices[0])
e3.set(choices[0])
e4.set(champions[0])
def add_championship(y):
num = championships.loc[y]
newnum = num + 1
championships.at[y] = newnum
return championships
def QuitEntry():
quittask = tk.messagebox.showinfo("End Tournament", "Submit Championship?")
if quittask == "ok":
root.destroy()
def loser(row):
if row['Right Side Player'] == row['Winner']:
return row['Left Side Player']
else:
return row['Right Side Player']
def expected_result(elo_a, elo_b):
elo_width = 400
expect_a = 1.0/(1+10**((elo_b - elo_a)/elo_width))
return expect_a
def update_elo(winner_elo, loser_elo):
k_factor = 64
expected_win = expected_result(winner_elo, loser_elo)
change_in_elo = k_factor * (1-expected_win)
winner_elo += change_in_elo
loser_elo -= change_in_elo
return round(winner_elo, 2), round(loser_elo, 2)\
def player_matchup(player1, player2):
global winner_matrix
player2wins = winner_matrix[player1][player2]
player1wins = winner_matrix[player2][player1]
total = player1wins + player2wins
player1_perc = round((player1wins/total)*100, 2)
player2_perc = round((100 - player1_perc), 2)
return player1_perc, player2_perc
#print('Percentage wins for ' + str(player1) + ': ' + str(player1_perc)+'%')
#print('Percentage wins for ' + str(player2) + ': ' + str(player2_perc)+'%')
def win_perc():
for player in championships.index:
wins = champs.Winner.value_counts()[player]
loss = champs.Loser.value_counts()[player]
percentage = (wins/(wins+loss))*100
championships.at[player, "Win Percentage"] = percentage
championships.to_csv('Championships.csv')
return championships
def update_text(root):
printout = ''
printout2=''
matchup = ''
if e1.get() != "Select Player":
printout += (e1.get() + " " + str( (Ratings['Rating'][e1.get()])))
if e2.get() != "Select Player":
printout2 += (e2.get() + " " + str( (Ratings['Rating'][e2.get()])))
matchup += str((player_matchup(e1.get(), e2.get())))
text.delete('1.0', 'end')
text.insert('1.0', printout + '\n')
text.insert('1.0', printout2 + '\n')
text.insert('1.0', matchup + '\n')
text.tag_configure("center", justify='center')
text.tag_add("center", 1.0, "end")
def print_new(root):
champs['Loser'] = champs.apply(loser,axis=1)
Winner = list(champs['Winner'])
Loser = list(champs['Loser'])
update_win =[]
update_loss = []
for i in range(len(Winner)):
Win = Winner[i]
Lose = Loser[i]
updated_score = update_elo(dictionary['Rating'][Win], dictionary['Rating'][Lose])
dictionary['Rating'][Win], dictionary['Rating'][Lose] = updated_score
update_win.append(updated_score[0])
update_loss.append(updated_score[1])
Ratings = pd.DataFrame.from_dict(dictionary)
new = ''
if e3.get() != "Select Player":
new += (e3.get() + " new rating: " + str( (Ratings['Rating'][e3.get()])))
text.insert('1.0', new + '\n')
# =============================================================================
# Rating Preprocessing
# =============================================================================
#Add Loser
champs['Loser'] = champs.apply(loser,axis=1)
#Create list of Winners and Losers
Winner = list(champs['Winner'])
Loser = list(champs['Loser'])
#Blank List to fill DataFrame later
update_win =[]
update_loss = []
# =============================================================================
# Create new ELO Rating
# =============================================================================
for i in range(len(Winner)):
Win = Winner[i]
Lose = Loser[i]
updated_score = update_elo(dictionary['Rating'][Win], dictionary['Rating'][Lose])
dictionary['Rating'][Win], dictionary['Rating'][Lose] = updated_score
update_win.append(updated_score[0])
update_loss.append(updated_score[1])
# =============================================================================
# Add to DataFrame
# =============================================================================
champs['Winner ELO Update'] = update_win
champs['Loser ELO Update'] = update_loss
# =============================================================================
# Player matchup percentages
# =============================================================================
#average number of wins on left side of table
#np.where(champs['Winner'] == champs['Left Side Player'],1,0).mean()
win_perc()
winner_matrix = pd.crosstab(champs['Winner'],champs['Loser'])
Ratings = pd.DataFrame.from_dict(dictionary)
# =============================================================================
# Create menus
# =============================================================================
e1 = tk.StringVar(root)
e2 = tk.StringVar(root)
e3 = tk.StringVar(root)
e4 = tk.StringVar(root)
choices = ['Select Player', 'Carts','Ali','MP','SunChow','PPJ', 'Fonz', 'Alex', \
'D$', 'M1', 'M2', 'Spidey', 'MH', 'Jodie', 'Chris', 'RonRon', 'Peter', 'SeaBass', 'juju']
champions = ['Select Champion', 'Carts','Ali','MP','SunChow','PPJ', 'Fonz', 'Alex', \
'D$', 'M1', 'M2', 'Spidey', 'MH', 'Jodie', 'Chris', 'RonRon', 'Peter', 'SeaBass', 'juju']
e1.set(choices[0])
entry1 = tk.OptionMenu(root, e1, *choices, command = update_text)
entry1.grid(row=1, column=0, sticky='e')
entry1.config(width=10)
e2.set(choices[0])
entry2 = tk.OptionMenu(root, e2, *choices, command = update_text)
entry2.grid(row=1, column=2, sticky='e')
entry2.config(width=10)
e3.set(choices[0])
entry3 = tk.OptionMenu(root, e3, *choices)
entry3.grid(row=3, column=1, sticky='nsew')
entry3.config(width=10)
e4.set(champions[0])
entry4 = tk.OptionMenu(root, e4, *champions)
entry4.grid(row=4, column=1, sticky='nsew')
entry4.config(width=10)
submit = tk.Button(root, fg="blue", bg="green", text = "Submit Game", width = 20, command = SubmitEntry, activebackground="yellow")
submit.config(width=20)
Quit = tk.Button(root, fg="blue", bg="green", text="End Tournament", width=20, command=QuitEntry, activebackground="yellow")
Quit.config(width=20)
# =============================================================================
# Dropdown Button formatting
# =============================================================================
firstname = tk.Label(text="Left Side Player", fg="green")
secondname = tk.Label(text="Right Side Player", fg="green")
winner = tk.Label(text="WINNER", fg="blue")
Champion = tk.Label(text="Champion!", fg="blue")
firstname.grid(row=0, column=0,sticky='e')
secondname.grid(row=0, column=2, sticky='e')
winner.grid(row=2, column=1, sticky='nsew')
submit.grid(row=5, column=1, sticky='nsew')
Quit.grid(row=7, column=1, sticky='nsew')
root.grid_rowconfigure(4, minsize=20)
root.grid_columnconfigure(1, minsize = 10)
text.tag_configure("center", justify='center')
text.tag_add("center", 1.0, "end")
text.grid(row = 8, column = 1, sticky = 'nsew')
root.mainloop()
# =============================================================================
# Delete crap
# =============================================================================
del i, update_loss, update_win, updated_score, Winner, Lose, current_rating, Win
del E, N, S, TclVersion, TkVersion, W, X, Y, export, title, wantobjects
del winner_matrix, dictionary, choices, Loser, champions
|
[
"celentano_ali@bah.com"
] |
celentano_ali@bah.com
|
9c279d65479f9af05c8270845fd5db4027c1e978
|
4057adee68fdd294896893ba904d2d7dc89eec2c
|
/setup.py
|
9a9781b69a7e8c3668b0e698abdd6635be7b0efe
|
[] |
no_license
|
kindlychung/PopupBubble
|
9819b24c1b4683fb7d0d338de4cb9d6ad6bd8908
|
45943d70f1113c2e5988380561ec33a966cfa9db
|
refs/heads/master
| 2020-05-21T11:34:53.300144
| 2014-11-12T18:31:13
| 2014-11-12T18:31:13
| 26,549,443
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
from distutils.core import setup
import platform, os
sysname = platform.system()
if sysname == "Windows":
binpath = os.getenv("SystemRoot")
else:
binpath = r"/usr/local/bin"
setup(
name = "PopupBubble",
packages = ["PopupBubble"],
version = "0.0.4",
data_files = [(binpath, ["scripts/pop_bubble.py"])],
description = "Cross-platform desktop notification using Qt",
author = "Kaiyin Zhong",
author_email = "kindlychung@gmail.com",
url = "https://github.com/kindlychung/PopupBubble",
keywords = ["notification", "cross-platform"]
)
|
[
"kindlychung@gmail.com"
] |
kindlychung@gmail.com
|
9ac436c999320a0d5baf824db317806e7afeac41
|
530d8af2100c88690e09731fb950a02f33e6e4e0
|
/blog/migrations/0003_auto_20180818_2054.py
|
e4f33d23ea905284782c215ad82a717e511a6100
|
[] |
no_license
|
gkLeo/userSystem-Django
|
940acf12f264e986ad8ac5e7d3b8aaa0a5a7b1c6
|
82e3cf45d94bb4b3d65da94a32bc18cebb6b1709
|
refs/heads/master
| 2022-04-29T18:12:57.164887
| 2018-08-22T07:49:41
| 2018-08-22T07:49:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-08-18 12:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20180818_1641'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='title',
),
migrations.AlterField(
model_name='comment',
name='parent_comment',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='blog.Comment'),
),
]
|
[
"geek_leo@outlook.com"
] |
geek_leo@outlook.com
|
ec19430ba5d5df43b88d4f9b77763a84416ac3eb
|
99af81d1da4545ab7d83f3c744c8494bf917ee9d
|
/push_feedstock_changes
|
1763e16bef8000ba18ab84869efd98bb81888b8d
|
[] |
no_license
|
msarahan/conda_recipe_tools
|
ea32e921b412c1eb686c3a193021062f0333249a
|
9ff81cbd745af66d62d4555ff7e5fc2367a35603
|
refs/heads/master
| 2020-03-19T10:06:18.108036
| 2018-10-31T22:30:27
| 2018-10-31T22:30:27
| 136,343,451
| 0
| 0
| null | 2018-06-06T14:49:21
| 2018-06-06T14:49:20
| null |
UTF-8
|
Python
| false
| false
| 5,850
|
#! /usr/bin/env python
# push feedstock changes to AnacondaRecipes git organization
# requires Python 3.5+
import argparse
import logging
import os
import subprocess
import sys
from subprocess import PIPE
from conda_build.api import render
LOG_FORMAT = '%(asctime)s - %(levelname)s : %(message)s'
class GitRepo(object):
def __init__(self, path):
self._path = path
def _git(self, git_args, check=True):
args = ['git', '-C', self._path] + git_args
logging.debug('command: ' + ' '.join(args))
complete = subprocess.run(args, stdout=PIPE, stderr=PIPE)
logging.debug('returncode: ' + str(complete.returncode))
logging.debug('stdout: ' + complete.stdout.decode('utf-8'))
logging.debug('stderr: ' + complete.stderr.decode('utf-8'))
if check:
complete.check_returncode()
return complete
def checkout(self, branch='master'):
return self._git(['checkout', branch])
def fetch(self, remote='origin'):
return self._git(['fetch', remote])
def reset_hard(self, remote='origin', branch='master'):
return self._git(['reset', '--hard', '%s/%s' % (remote, branch)])
def rebase(self, remote, branch='master', check=True):
return self._git(['rebase', '%s/%s' % (remote, branch)], check)
def rebase_abort(self, check=True):
self._git(['rebase', '--abort'], check)
def push(self, remote='origin', local_branch='master',
remote_branch='master', force=False, check=True):
refspec = '{src}:{dst}'.format(src=local_branch, dst=remote_branch)
if force:
return self._git(['push', '--force', remote, refspec], check)
return self._git(['push', remote, refspec], check)
def branch(self, branch_name):
return self._git(['checkout', '-b', branch_name])
def branch_delete(self, branch_name):
return self._git(['branch', '-D', branch_name])
def ls_files_modified(self):
out = self._git(['ls-files', '-m'])
return out.stdout.decode('utf-8').split()
class FeedStock(GitRepo):
def __init__(self, path, feedstock_name=None):
super(FeedStock, self).__init__(path)
if feedstock_name is None:
feedstock_name = os.path.basename(path)
self._feedstock_name = feedstock_name
def add_remote(self, org, remote_name=None, check=True):
url = 'https://github.com/%s/%s' % (org, self._feedstock_name)
if remote_name is None:
remote_name = org
self._git(['remote', 'add', remote_name, url], check=check)
def push_feedstock(feedstock_path):
""" Push git changes to a feedstock to AnacondaRecipes
returns True on success, False when the push failed
"""
feedstock = FeedStock(feedstock_path)
complete = feedstock.push(check=False)
if complete.returncode: # push failed b/c it is not a fast-forward
logging.info('standard push failed, creating archive branch')
# create temp branch of origin/master
temp_branch = 'temp_origin_master'
feedstock.branch(temp_branch) # git checkout -b temp_origin_master
feedstock.fetch() # git fetch origin
feedstock.reset_hard() # git reset --hard origin/master
# render the recipe to find the version
recipe_dir = os.path.join(feedstock_path, 'recipe')
recipes = render(recipe_dir, finalize=False)
metadata, download, needs_reparse = recipes[0]
version = metadata.version()
# push to archive branch
archive_branch = 'archive_{version}'.format(version=version)
logging.info('push origin/master to ' + archive_branch)
# git push origin master:archive_version
feedstock.push(remote_branch=archive_branch, local_branch='HEAD')
# force push master branch
feedstock.checkout() # git checkout master
feedstock.push(force=True) # git push --force origin master:master
# git branch -D temp_origin_master
feedstock.branch_delete(temp_branch)
logging.info('force push succeeded')
return True
def main():
parser = argparse.ArgumentParser(
description='Push feedstock git changes to AnacondaRecipes.')
parser.add_argument(
'feedstock_dir', nargs='*',
help='one or more feedstock directories to push')
parser.add_argument(
'--file', '-f', type=str,
help='file with feedstock directories to push')
parser.add_argument(
'--base_dir', default='.', type=str,
help='feedstock base directory, default is current directory')
parser.add_argument(
'--log', default='info',
help='log level; debug, info, warning, error, critical')
args = parser.parse_args()
# set up logging
log_numeric_level = getattr(logging, args.log.upper(), None)
if not isinstance(log_numeric_level, int):
raise ValueError('Invalid log level: %s' % args.log)
logging.basicConfig(level=log_numeric_level, format=LOG_FORMAT)
# detemine feedstock directories to sync
if args.file is not None:
# skip comments (#) and blank lines
is_valid = lambda x: not x.startswith('#') and len(x.strip())
with open(args.file) as f:
feedstock_dirs = [l.strip() for l in f if is_valid(l)]
else:
feedstock_dirs = args.feedstock_dir
# sync recipes
for feedstock_dir in feedstock_dirs:
if feedstock_dir.endswith('/'):
feedstock_dir = feedstock_dir[:-1]
logging.info('pushing: ' + feedstock_dir)
feedstock_path = os.path.join(args.base_dir, feedstock_dir)
if not push_feedstock(feedstock_path):
logging.warning('push failed: ' + feedstock_dir)
return 0
if __name__ == "__main__":
sys.exit(main())
|
[
"jjhelmus@gmail.com"
] |
jjhelmus@gmail.com
|
|
ace3c17769f70ffc745907457172f979761025b3
|
a03cdb676a7db91c09d5b051585af8c1fbdcee66
|
/venv/bin/pip
|
fdd43619020cbfe9bfbe0730b4ed7ae9dbc6ca25
|
[] |
no_license
|
d11m4/movies
|
fb1f4cbe7141cc4643a0ab2797595b432ece664f
|
4df606564b565c72930f0a3213e3955f925d4987
|
refs/heads/master
| 2023-02-13T05:54:54.029461
| 2020-04-23T20:03:55
| 2020-04-23T20:03:55
| 258,317,203
| 0
| 0
| null | 2021-01-06T01:08:32
| 2020-04-23T20:02:46
|
Python
|
UTF-8
|
Python
| false
| false
| 388
|
#!/Users/dmitry/nikmovies/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"d11m4@yandex.ru"
] |
d11m4@yandex.ru
|
|
757c511b89810d2b035d2c74957e10869842c3dd
|
755271e401d9dd06866d134f314e7865c06f2bcc
|
/lib/kml_parser.py
|
91838350b9c4b44e9f4ea313d547519315386211
|
[] |
no_license
|
Galli1598333/LoRaWAN-Application
|
31b00d769d7d5b85e53bbf10b3709c31ddcf30f0
|
ac299da43656f79a8111586acfed1a04662547de
|
refs/heads/master
| 2023-06-12T01:06:55.774495
| 2021-06-29T16:19:26
| 2021-06-29T16:19:26
| 301,663,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
import xml.etree.ElementTree as ET
class KmlParser:
def __init__(self, merged_fpath):
self.merged_fpath = merged_fpath
def merge_files(self, count_fpath, track_fpath):
with open(count_fpath, 'r') as file:
count = file.read()
with open(track_fpath, 'r') as file:
track = file.read().replace("</gx:Track>", count + "</gx:Track>")
with open(self.merged_fpath, 'w') as file:
file.write(track)
return
def __parse(self, root):
result_map = {}
for child in root[0][1][0][-1][0][0]:
result_map[int(child.text)] = {'when': '', 'coord': []}
ids = list(result_map.keys())
i = 0
for child in root[0][1][0]:
if child.text is not None:
data = child.text.split(' ')
if 'when' in child.tag:
result_map[ids[i]]['when'] = data[0]
elif 'coord' in child.tag:
result_map[ids[i]]['coord'] = data
i = i + 1
return result_map
def parse_string(self, kml_track):
root = ET.fromstring(kml_track)
return self.__parse(root)
def parse_merged(self):
tree = ET.parse(self.merged_fpath)
root = tree.getroot()
return self.__parse(root)
|
[
"noreply@github.com"
] |
Galli1598333.noreply@github.com
|
244ed0d2ccb9439a9c3b907c35305cf549eccb03
|
ccb0fa72852a29c38e0d83427318ef95225697bf
|
/benchmarks/utils/training.py
|
c886bf9b8130fc948318ee2c196ebc3a9df4f043
|
[
"Apache-2.0"
] |
permissive
|
crispitagorico/GPSig
|
62d3f4f3a078e753a98641670bb8088c0df960bb
|
c93155f83c11b05b9850b24481c6584ef5206a49
|
refs/heads/master
| 2023-02-26T06:05:45.353637
| 2021-01-30T08:02:04
| 2021-01-30T08:02:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,155
|
py
|
import sys
import os
sys.path.append('..')
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import gpflow as gp
import gpsig
import keras
from keras import backend as K
def fit_nn_with_gp_layer(X_train, y_train, var_list, x_tens, y_tens, loss, sess, minibatch_size=50, max_epochs=1000,
val_scores=None, lower_is_better=True, patience=None, history=None, w_tens=None, W_train=None):
assert (w_tens is None) == (W_train is None)
opt = keras.optimizers.Adam(clipvalue=5.)
param_updates = opt.get_updates(loss, var_list)
num_train = X_train.shape[0]
num_batches_per_epoch = int(np.ceil(float(num_train) / minibatch_size))
train_phase = True
if w_tens is None:
train_batch = K.function(inputs=[K.learning_phase(), x_tens, y_tens], outputs=[loss], updates=param_updates)
else:
train_batch = K.function(inputs=[K.learning_phase(), x_tens, y_tens, w_tens], outputs=[loss], updates=param_updates)
if history is None:
history = {}
start_epoch = 0
if val_scores is not None:
best_score = np.inf if lower_is_better else -np.inf
num_epochs_since_best = 0
else:
prev_epochs = [k for k in history.keys() if isinstance(k, int)]
start_epoch = np.max(prev_epochs) + 1 if len(prev_epochs) > 0 else 0
if val_scores is not None:
params_saved = []
for var in var_list:
params_saved.append(sess.run(var))
history['best'] = {}
history['epoch'] = start_epoch
for i, scorer in enumerate(val_scores):
_score = scorer()
history['best']['val_{}'.format(i)] = _score
history['best']['params'] = params_saved
best_score = _score
num_epochs_since_best = 0
for epoch in range(start_epoch, max_epochs + start_epoch):
if patience is not None and num_epochs_since_best > patience:
break
inds = np.random.permutation(X_train.shape[0])
l_avg = 0.
for t in range(num_batches_per_epoch):
X_batch = X_train[inds[t*minibatch_size:np.minimum(num_train, (t+1)*minibatch_size)]]
y_batch = y_train[inds[t*minibatch_size:np.minimum(num_train, (t+1)*minibatch_size)], None]
if w_tens is None:
l = train_batch([train_phase, X_batch, y_batch, None])[0]
else:
W_batch = W_train[inds[t*minibatch_size:np.minimum(num_train, (t+1)*minibatch_size)]]
l = train_batch([train_phase, X_batch, y_batch, W_batch])[0]
print('\rEpoch: {0:04d}/{1:04d} | Batch {2:2d}/{3:2d} | ELBO: {4:.3f}'.format(epoch+1, max_epochs + start_epoch, t+1, num_batches_per_epoch, -l), end='')
l_avg += l
l_avg /= float(num_batches_per_epoch)
print('\rEpoch: {0:04d}/{1:04d} | Batch {2:2d}/{2:2d} | ELBO: {3:.3f} '.format(epoch+1, max_epochs + start_epoch, num_batches_per_epoch, -l_avg), end='')
history[epoch] = {}
history[epoch]['elbo'] = -l_avg
if val_scores is not None:
for i, scorer in enumerate(val_scores):
_score = scorer()
history[epoch]['val_{}'.format(i)] = _score
print('| Val.{}.: {:.3f} '.format(i, _score), end='')
if lower_is_better and _score <= best_score or not lower_is_better and _score >= best_score:
best_score = _score
num_epochs_since_best = 0
params_saved = []
for var in var_list:
params_saved.append(sess.run(var))
history['best'] = {}
history['epoch'] = epoch
for key, val in history[epoch].items():
history['best'][key] = val
history['best']['params'] = params_saved
print('| New best...', end='')
else:
num_epochs_since_best += 1
print()
return history
|
[
""
] | |
9e72e6d0888bf2204d471dd9e5d05a180ce11fa0
|
795340baeeed8aab921ec02f7226248b35256edf
|
/query_view/views.py
|
ff792aea286aebf5c3d1a585faf07358f5a4ddd6
|
[
"MIT"
] |
permissive
|
enricobarzetti/django-query-view
|
3646970571cf3c4bad97fb2fb7104cbe63855a45
|
79c6247c076d037c3c401d05773f64e5e237f3a2
|
refs/heads/master
| 2023-02-16T01:00:24.562879
| 2021-01-17T04:03:07
| 2021-01-17T04:03:07
| 324,231,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
from django_filters.views import FilterView
from taggit.models import Tag
class QueryView(FilterView):
url_name = None
template_name = 'query_view/query.html'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['tags'] = Tag.objects.all()
context['url_name'] = self.url_name
return context
|
[
"enricobarzetti@gmail.com"
] |
enricobarzetti@gmail.com
|
bbde3cd10bf1a8966da6f099488bf23ab78d6311
|
15c1975e8862f6765e44feb756fdab06f6714525
|
/PDU/devel/test_argparse.py
|
8b1f6f0555d1aaa7280fa18e962eaeee084df320
|
[] |
no_license
|
TianlaiProject/tldev
|
2876571c77b75cac1a2a69e9922a25398f989788
|
d62416ae4ceed07b7c55b6d5b99b3cbdbd75fb71
|
refs/heads/master
| 2020-04-03T19:23:05.834689
| 2019-08-22T09:03:53
| 2019-08-22T09:03:53
| 155,521,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--aa')
parser.add_argument('-b', '--bb', nargs = '?', const = 10, default = 123)
parser.add_argument('-c', '--cc', )
op = parser.parse_args()
try:
print 'aa = ', op.aa
except:
print 'aa error'
try:
print 'bb =', op.bb
except:
print 'bb error'
try:
print 'cc =', op.cc
except:
print 'cc error'
|
[
"astrofanlee@gmail.com"
] |
astrofanlee@gmail.com
|
ff37fca6d36c96f787bcda7ec42d605301f532d9
|
413940b4b461e9bb549c08a0ca21f3893583723f
|
/Tarea de Investigación/Lista.py
|
ae30f2124612ead01dfcd5277317d9c3d950910f
|
[] |
no_license
|
MARILEONV/Tarea-de-Investigaci-n
|
d6da1a3ec6020b67f3007881bf7bf4747103a33f
|
232f5757e28af74bd168fc99d785c129332d07bf
|
refs/heads/main
| 2023-06-28T14:20:29.892082
| 2021-08-05T01:06:55
| 2021-08-05T01:06:55
| 391,653,980
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,264
|
py
|
from Numero import *
class Lista(Intermedio):
def __init__(self,lista):
self.lista= lista
def presentarLista(self):
n = int(input("Cuantos elementos quiere agregar a la lista: "))
for i in range(n):
lis = input("Ingrese valor {}: ".format(i))
self.lista.append(lis)
return self.lista
def buscarLista(self,valor):
p= self.lista.index(valor)
return "El valor [{}] se encuentra en la posicion {}" .format(valor,p)
def listaFactorial(self):
lis = []
for i in self.lista:
fact = super().factorial(i)
lis.append(fact)
return "La lista de los factoriales es: {}".format(lis)
def listaPrimo(self):
lis = []
for i in self.lista:
aux = super().primo(i)
if aux: lis.append(i)
return "La lista de los primos es: {}".format(lis)
def listaNotas(self,listaNotasDiccionario):
list = []
datos=int(input("Cuantos alumnos va a ingresar: "))
for alu in range(datos):
aux = []
alumno= input("Ingrese el nombre del alumno {}: ".format(alu))
notas= int(input("¿Cuantas notas va a ingresar por el estudiante {}?: ".format(alumno)))
for nota in range(notas):
no= round(float(input("Nota {}: ".format(nota))),2)
aux.append(no)
listaNotasDiccionario = {"Nombre": alumno, "Notas":aux}
list.append(listaNotasDiccionario)
return "La lista de los cliente son: {}".format(list)
def insertarLista(self,posicion,valor):
self.lista.insert(posicion,valor)
return "La nueva lista es: {}".format(self.lista)
def eliminarLista(self,valor):
try:
while True:
self.lista.remove(valor)
except:
pass
return "La lista queda: {}".format(self.lista)
def retornaValorLista(self,posicion):
if posicion<len(self.lista):
c= self.lista.pop(posicion)
print("El valor eliminado es: {}".format(c.split()))
else:
print("La posicion {} no se encuentra en la lista".format(posicion))
return "La nueva lista queda: {}".format(self.lista)
def copiarTuplaLista(self,tupla):
n= int(input("Cuantos elementos quiere agregar: "))
for i in range(n):
lis=input("Ingrese valor: ")
self.lista.append(lis)
tupla= tuple(self.lista)
lista= list(tupla)
print(tupla)
return "Copiamos la tupla en una lista y quedó: {}".format(lista)
def vueltoLista(self,listaClientesDiccionario):
list = []
datos=int(input("Cuantos clientes va a ingresar: "))
for cup in range(datos):
"nombre{}".format(cup)
cliente= input("Ingrese el nombre del cliente: ")
cupo= float(input("¿Cuánto es el vuelto del cliente {}?: ".format(cliente)))
listaClientesDiccionario = {"Nombre":cliente,"Vuelto":cupo}
list.append(listaClientesDiccionario)
return "La lista de los cliente son: {}".format(list)
|
[
"noreply@github.com"
] |
MARILEONV.noreply@github.com
|
2d845424b61f091dfef56a43fb628330980dd4d7
|
fbe6090cfd86f07acefae39e885d0a7c3574c942
|
/post/views.py
|
fc036b6d4bc821d88284c85f01f2781eefd28205
|
[] |
no_license
|
andralandrizzy/todos
|
77a4677eea379985487c93c11618966962e590b4
|
f44ad05d6b775414798ca7c1c895aff5eacfd10c
|
refs/heads/main
| 2023-01-03T04:51:25.363851
| 2020-10-22T04:40:59
| 2020-10-22T04:40:59
| 306,193,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from . models import *
from . forms import *
# Create your views here.
def index(request):
if request.method == 'POST':
title = request.POST['title']
description = request.POST['description']
task = Task(title=title, description=description)
task.save()
tasks = Task.objects.order_by('-created')
form = TaskForm()
context = {
'tasks':tasks,
}
return render(request, 'post/tasks.html', context)
|
[
"drip_dev_drizzy@pop-os.localdomain"
] |
drip_dev_drizzy@pop-os.localdomain
|
fe4089d3a3e8b1f7b727787135b1b49cbd154b9f
|
1767232aba61ea15703df6589355f9bae64e22bb
|
/train.py
|
51aaf2f61392dd89c59c86a0758b0d90ce676e1c
|
[
"MIT"
] |
permissive
|
ChinaYi/asrf_with_asformer
|
73fc1a4019fc4db5b7452767be17e5f8d5611726
|
16a5dba6eeb7220c9dd30b64613972883dcb83ca
|
refs/heads/main
| 2023-08-23T05:58:33.541399
| 2021-10-28T13:04:45
| 2021-10-28T13:04:45
| 416,367,698
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,260
|
py
|
import argparse
import os
import random
import pandas as pd
import torch
from torch.utils.data import DataLoader
from torchvision.transforms import Compose
from libs import models
from libs.checkpoint import resume, save_checkpoint
from libs.class_id_map import get_n_classes
from libs.class_weight import get_class_weight, get_pos_weight
from libs.config import get_config
from libs.dataset import ActionSegmentationDataset, collate_fn
from libs.helper import train, validate
from libs.loss_fn import ActionSegmentationLoss, BoundaryRegressionLoss
from libs.optimizer import get_optimizer
from libs.transformer import TempDownSamp, ToTensor
def get_arguments() -> argparse.Namespace:
"""
parse all the arguments from command line inteface
return a list of parsed arguments
"""
parser = argparse.ArgumentParser(
description="train a network for action recognition"
)
parser.add_argument("config", type=str, help="path of a config file")
parser.add_argument(
"--seed",
type=int,
default=0,
help="a number used to initialize a pseudorandom number generator.",
)
parser.add_argument(
"--resume",
action="store_true",
help="Add --resume option if you start training from checkpoint.",
)
return parser.parse_args()
def main() -> None:
# argparser
args = get_arguments()
# configuration
config = get_config(args.config)
result_path = os.path.dirname(args.config)
seed = args.seed
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
# cpu or cuda
device = "cuda" if torch.cuda.is_available() else "cpu"
if device == "cuda":
torch.backends.cudnn.benchmark = True
# Dataloader
# Temporal downsampling is applied to only videos in 50Salads
downsamp_rate = 2 if config.dataset == "50salads" else 1
train_data = ActionSegmentationDataset(
config.dataset,
transform=Compose([ToTensor(), TempDownSamp(downsamp_rate)]),
mode="trainval" if not config.param_search else "training",
split=config.split,
dataset_dir=config.dataset_dir,
csv_dir=config.csv_dir,
)
train_loader = DataLoader(
train_data,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers,
drop_last=True if config.batch_size > 1 else False,
collate_fn=collate_fn,
)
# if you do validation to determine hyperparams
if config.param_search:
val_data = ActionSegmentationDataset(
config.dataset,
transform=Compose([ToTensor(), TempDownSamp(downsamp_rate)]),
mode="validation",
split=config.split,
dataset_dir=config.dataset_dir,
csv_dir=config.csv_dir,
)
val_loader = DataLoader(
val_data,
batch_size=1,
shuffle=False,
num_workers=config.num_workers,
collate_fn=collate_fn,
)
# load model
print("---------- Loading Model ----------")
n_classes = get_n_classes(config.dataset, dataset_dir=config.dataset_dir)
model = models.ActionSegmentRefinementFramework(
in_channel=config.in_channel,
n_features=config.n_features,
n_classes=n_classes,
n_stages=config.n_stages,
n_layers=config.n_layers,
n_stages_asb=config.n_stages_asb,
n_stages_brb=config.n_stages_brb,
)
# send the model to cuda/cpu
model.to(device)
optimizer = get_optimizer(
config.optimizer,
model,
config.learning_rate,
momentum=config.momentum,
dampening=config.dampening,
weight_decay=config.weight_decay,
nesterov=config.nesterov,
)
# resume if you want
columns = ["epoch", "lr", "train_loss"]
# if you do validation to determine hyperparams
if config.param_search:
columns += ["val_loss", "cls_acc", "edit"]
columns += [
"segment f1s@{}".format(config.iou_thresholds[i])
for i in range(len(config.iou_thresholds))
]
columns += ["bound_acc", "precision", "recall", "bound_f1s"]
begin_epoch = 0
best_loss = float("inf")
log = pd.DataFrame(columns=columns)
best_edit = -999.
if args.resume:
if os.path.exists(os.path.join(result_path, "checkpoint.pth")):
checkpoint = resume(result_path, model, optimizer)
begin_epoch, model, optimizer, best_loss = checkpoint
log = pd.read_csv(os.path.join(result_path, "log.csv"))
print("training will start from {} epoch".format(begin_epoch))
else:
print("there is no checkpoint at the result folder")
# criterion for loss
if config.class_weight:
class_weight = get_class_weight(
config.dataset,
split=config.split,
dataset_dir=config.dataset_dir,
csv_dir=config.csv_dir,
mode="training" if config.param_search else "trainval",
)
class_weight = class_weight.to(device)
else:
class_weight = None
criterion_cls = ActionSegmentationLoss(
ce=config.ce,
focal=config.focal,
tmse=config.tmse,
gstmse=config.gstmse,
weight=class_weight,
ignore_index=255,
ce_weight=config.ce_weight,
focal_weight=config.focal_weight,
tmse_weight=config.tmse_weight,
gstmse_weight=config.gstmse,
)
pos_weight = get_pos_weight(
dataset=config.dataset,
split=config.split,
csv_dir=config.csv_dir,
mode="training" if config.param_search else "trainval",
).to(device)
criterion_bound = BoundaryRegressionLoss(pos_weight=pos_weight)
# train and validate model
print("---------- Start training ----------")
for epoch in range(begin_epoch, config.max_epoch):
# training
train_loss = train(
train_loader,
model,
criterion_cls,
criterion_bound,
config.lambda_b,
optimizer,
epoch,
device,
)
# if you do validation to determine hyperparams
if config.param_search:
(
val_loss,
cls_acc,
edit_score,
segment_f1s,
bound_acc,
precision,
recall,
bound_f1s,
) = validate(
val_loader,
model,
criterion_cls,
criterion_bound,
config.lambda_b,
device,
config.dataset,
config.dataset_dir,
config.iou_thresholds,
config.boundary_th,
config.tolerance,
)
# save a model if top1 acc is higher than ever
if best_loss > val_loss:
best_loss = val_loss
torch.save(
model.state_dict(),
os.path.join(result_path, "best_loss_model.prm"),
)
# save checkpoint every epoch
save_checkpoint(result_path, epoch, model, optimizer, best_loss)
# write logs to dataframe and csv file
tmp = [epoch, optimizer.param_groups[0]["lr"], train_loss]
# if you do validation to determine hyperparams
if config.param_search:
tmp += [
val_loss,
cls_acc,
edit_score,
]
tmp += segment_f1s
tmp += [
bound_acc,
precision,
recall,
bound_f1s,
]
tmp_df = pd.Series(tmp, index=log.columns)
log = log.append(tmp_df, ignore_index=True)
log.to_csv(os.path.join(result_path, "log.csv"), index=False)
if config.param_search:
# if you do validation to determine hyperparams
print(
"epoch: {}\tlr: {:.4f}\ttrain loss: {:.4f}\tval loss: {:.4f}\tval_acc: {:.4f}\tedit: {:.4f}".format(
epoch,
optimizer.param_groups[0]["lr"],
train_loss,
val_loss,
cls_acc,
edit_score,
)
)
if edit_score > best_edit:
best_edit = edit_score
torch.save(model.state_dict(), os.path.join(result_path, 'best_val_edit.prm'))
else:
print(
"epoch: {}\tlr: {:.4f}\ttrain loss: {:.4f}".format(
epoch, optimizer.param_groups[0]["lr"], train_loss
)
)
if (epoch + 1) % 10 == 0:
torch.save(model.state_dict(), os.path.join(result_path, "model_{}.prm".format(epoch+1)))
# delete checkpoint
os.remove(os.path.join(result_path, "checkpoint.pth"))
# save models
torch.save(model.state_dict(), os.path.join(result_path, "final_model.prm"))
print("Done!")
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
ChinaYi.noreply@github.com
|
227083a6426424a55be3c286049966f07d5526b7
|
904a5d68bd9ce20f46031b1b30690daa6f101f77
|
/scorelib-import.py
|
55a87e7c9e1e1fd8e7caa1a8084f4d1591522e5c
|
[] |
no_license
|
lubcik/PV248
|
21d42796e29410391b0b06d149c39cbce81268ae
|
2cb11e5315b2ea1c8b4b4413116f88bc9c888dd4
|
refs/heads/master
| 2021-05-05T01:05:46.029546
| 2018-01-29T20:54:23
| 2018-01-29T20:54:23
| 119,612,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,383
|
py
|
import re # regular expressions
import sqlite3
# This is a base class for objects that represent database items. It implements
# the store() method in terms of fetch_id and do_store, which need to be
# implemented in every derived class (see Person below for an example).
class DBItem:
def __init__( self, conn ):
self.id = None
self.cursor = conn.cursor()
def store( self ):
self.fetch_id()
if ( self.id is None ):
self.do_store()
self.cursor.execute( "select last_insert_rowid()" )
self.id = self.cursor.fetchone()[ 0 ]
# Example of a class which represents a single row of a single database table.
# This is a very simple example, since it does not contain any references to
# other objects.
class Person( DBItem ):
def __init__( self, conn, string ):
super().__init__( conn )
self.born = self.died = None
self.name = re.sub( '\([0-9/+-]+\)', '', string ).strip()
m = re.search( "([0-9]+)--([0-9]+)", string )
if not m is None:
self.born = int( m.group( 1 ) )
self.died = int( m.group( 2 ) )
def fetch_id( self ):
self.cursor.execute( "select id from person where name = ?", (self.name,) )
res = self.cursor.fetchone()
if not res is None: # TODO born/died update should be done inside this if
self.id = res[ 0 ]
self.cursor.execute(
"update person set born = ?, died = ? where id = ?", (
self.born, self.died, self.id))
def do_store( self ):
print ("storing '%s'" % self.name)
# NB. Part of the exercise was adding the born/died columns to the below query.
self.cursor.execute( "insert into person (name, born, died) values (?, ?, ?)",
( self.name, self.born, self.died ) )
# Process a single line of input.
def process( k, v ):
if k == 'Composer' or k == 'Editor':
for c in v.split(';'):
p = Person( conn, c.strip() )
p.store()
# Database initialisation: sqlite3 scorelib.dat ".read scorelib.sql"
conn = sqlite3.connect( 'scorelib.dat' )
rx = re.compile( r"(.*): (.*)" )
for line in open( 'scorelib.txt', 'r', encoding='utf-8' ):
m = rx.match( line )
if m is None: continue
process( m.group( 1 ), m.group( 2 ) )
conn.commit()
|
[
"lubica.kramarekova@kiwi.com"
] |
lubica.kramarekova@kiwi.com
|
e743b9ad9f620a3fdcf330cd029aede1dbd35801
|
64c7445ed33d43c4d56f96c1916d55bf63a80ce6
|
/allelic_dist.py
|
4c573b7fb0ebda38fcafcf14307193229df65068
|
[] |
no_license
|
dorbarker/mist-tools
|
d28dba2d2705ae1cf6854717ca6b982672ea9a8e
|
b18929b489f204b3b4411ad9d9d1d88ed48db1ce
|
refs/heads/master
| 2020-06-12T18:06:00.718335
| 2015-01-09T18:16:17
| 2015-01-09T18:16:17
| 28,106,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,717
|
py
|
import argparse
import csv
import mistutils
def arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-j', '--jsons', nargs='+', required=True, help='Path(s) to JSONs')
parser.add_argument('-t', '--tests', required=True, help="MIST test name")
parser.add_argument('-o', '--out', required=True, help='Matrix outpath')
return parser.parse_args()
def hamming_distance(strain1, strain2):
""" Finds the Hamming Distance
between two strains based on allele differences.
"""
h_dist = 0
order = strain1.keys()
for item in order:
if strain1[item] != strain2[item]:
h_dist += 1
return h_dist
def prepare_dist_matrix_csv(mat):
"""Perpares distance matrix dict as a 2D array."""
order = mat.keys()
prepped_mat = []
row = ['genomes']
row.extend(order)
prepped_mat.append(row)
for strain1 in order:
row = [strain1]
for strain2 in order:
row.append(mat[strain1][strain2])
prepped_mat.append(row)
return prepped_mat
def write_dist_matrix_csv(outpath, mat):
"""Writes distance matrix to CSV."""
prepped_mat = prepare_dist_matrix_csv(mat)
with open(outpath, 'w') as f:
out = csv.writer(f)
for row in prepped_mat:
out.writerow(row)
def build_matrix(strains_calls):
"""Constructs Hamming Distance matrix."""
dist_mat = {}
for strain1 in strains_calls:
dist_mat[strain1] = {}
for strain2 in strains_calls:
h_dist = hamming_distance(strains_calls[strain1], strains_calls[strain2])
dist_mat[strain1][strain2] = h_dist
return dist_mat
def parse_json(genes, test):
"""Returns dict of allele matches."""
d = {}
for gene in genes:
if genes[gene]["BlastResults"] is None or genes[gene]["IsContigTruncation"]:
d[gene] = "NA"
print("Beware! {} is missing {}. This is treated as a valid 'allele' in the matrix.".format(genes[gene]["StrainName"], gene))
else:
d[gene] = genes[gene]["AlleleMatch"]
return d
def process(args):
strains_calls = {}
jsons = mistutils.get_jsons(args.jsons)
for j in jsons:
data = mistutils.load_json(j)
for test in args.tests:
for strain, genes in mistutils.loop_json_genomes(data, test):
strains_calls[strain] = parse_json(genes, test)
mat = build_matrix(strains_calls)
write_dist_matrix_csv(args.out, mat)
def main():
args = arguments()
process(args)
if __name__ == '__main__':
main()
|
[
"dor.barker@gmail.com"
] |
dor.barker@gmail.com
|
295e21ae589f0e72622d30ceb5f1fdd2443680b9
|
e6c1ce30389e87986e9db8fad7a3351e8fdb1259
|
/budget/migrations/0002_remove_customer_profile_pic.py
|
810105431dfc7cbe49f3161ed2f13c64f1641a68
|
[] |
no_license
|
Prabu-N/monthly-expenses-app
|
ed6652c8955d0654d2778f9ef3377ba274109080
|
2de193d2dcb0978a92aa03cd7e13c89846facd6a
|
refs/heads/master
| 2022-12-27T09:02:50.254939
| 2020-10-06T05:59:49
| 2020-10-06T05:59:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
# Generated by Django 3.0.7 on 2020-09-12 09:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('budget', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='profile_pic',
),
]
|
[
"prabu6964@gmail.com"
] |
prabu6964@gmail.com
|
61d41b77446a8bf350c21e4782b2e2bce3f5ad6a
|
4fcd166f2d6a9a31daed609feff8c7e37b3e359b
|
/librfid/FIFO.py
|
585810db1e73dd0ac5d82fc47148fadf64949d32
|
[] |
no_license
|
fchorney/rfid_daemon
|
6bba2106d38489037b0666a1f07802d8c2ebe312
|
739119e48c6ed69820486668c33a0bd835b1a631
|
refs/heads/master
| 2021-06-14T14:34:25.211859
| 2017-02-03T18:34:18
| 2017-02-03T18:34:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
# -*- coding: UTF-8 -*-
from os import mkfifo, stat
from os.path import exists
from stat import S_ISFIFO
def isfifo(path):
if exists(path):
return S_ISFIFO(stat(path).st_mode)
return False
def openfifo(path, mode):
if not isfifo(path):
mkfifo(path)
return open(path, mode)
class FIFO():
def __init__(self, path='fifo'):
self.path = path
def write(self, text):
fifo = openfifo(self.path, 'w')
fifo.write(text)
fifo.close()
def read(self):
fifo = openfifo(self.path, 'rb')
text = fifo.read()
fifo.close()
return text
|
[
"github@djsbx.com"
] |
github@djsbx.com
|
b05c63535ada6a48faeb8b3038fc4d314df9e2a0
|
f891bda5635da422a2c76edbcb1af0061aa12574
|
/rest/migrations/0002_auto_20210314_1159.py
|
ba6e398bf6c9d3cb86518665d0bc887f01a8dc4f
|
[] |
no_license
|
darkydash/olimpiada_ya_prof_task2_pi
|
60edc4c51297303a80d0b9386ec68c196054c06d
|
287f3f38b12dcc0beed39468f1a5feffb248a1ed
|
refs/heads/master
| 2023-03-18T23:02:35.523806
| 2021-03-14T09:19:42
| 2021-03-14T09:19:42
| 347,589,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
# Generated by Django 2.2.13 on 2021-03-14 08:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rest', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='notes',
name='title',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"darkydashie@gmail.com"
] |
darkydashie@gmail.com
|
16c4b2fcf10ed1d8ed1bbe25f5cb06e02759fa77
|
1107d86d52dd5cdb1cb6dd1625ac5676ba66b540
|
/server/models.py
|
bd1d9054a4e66dd13c99164f19ee03fc905fbb0d
|
[] |
no_license
|
anirudh1200/flask_react_jwt_auth
|
98238df3c43d16f356c9ab5c03b6ef9269ef8d7e
|
0430570f5a3abbd9d9e0addd4d27e5586e55d2f6
|
refs/heads/master
| 2020-06-21T07:16:26.191991
| 2019-06-11T15:23:23
| 2019-06-11T15:23:23
| 197,379,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
firstName = db.Column(db.String(50))
lastName = db.Column(db.String(50))
username = db.Column(db.String(50))
password = db.Column(db.String(100))
|
[
"anirudh.b.123@gmail.com"
] |
anirudh.b.123@gmail.com
|
74db1e67a767be1ccd50a8e60cbe29e1b58c56e9
|
bf30258c306c27ae7851fe5a701e311df7ba7293
|
/chatproject/routing.py
|
8da9fc78d866f9a68200f131a1d17dcc0ee49346
|
[] |
no_license
|
soomin-jeong/chat-app
|
499076c57c3f4bc6fbd1b7db2a0353d89aababd4
|
6cdb3958ebdd48c7635f373c4e0e5f6dd02fb232
|
refs/heads/master
| 2020-06-30T08:18:57.635161
| 2019-08-06T08:47:42
| 2019-08-06T08:47:42
| 200,775,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from channels import include
application = [
# To set up the websocket routing
include("chat_app.routing.routing_websocket", path=r"^/socket/"),
# For chat join, leave, and send
include("chat_app.routing.routing_chat"),
]
|
[
"smjeong@ogqcorp.com"
] |
smjeong@ogqcorp.com
|
f8245119fd7270f578f5d81fd88be98f949dea5a
|
83ac04c3ef23c06b497cb6d8913cee34645b818d
|
/UNIT 1/W1_Lecture 3.py
|
54783520f6ed219c61521fbf9627a9aa95877c40
|
[] |
no_license
|
markjluo/MITx_6.00.2x
|
adc29297148144426de053e2e8f49c98fa8af113
|
b5c8ba805cfaaee8e2847d68751fe9a95b6dc0e2
|
refs/heads/master
| 2020-05-27T00:29:32.746081
| 2019-05-28T21:55:11
| 2019-05-28T21:55:11
| 188,425,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,558
|
py
|
class Node(object):
def __init__(self, name):
"""Assume name is a string"""
self.name = name
def getName(self):
return self.name
def __str__(self):
return self.name
class Edge(object):
def __init__(self, src, dest):
"""Assumes src(Source) and dest(Destination) are nodes"""
self.src = src
self.dest = dest
def getSource(self):
return self.src
def getDest(self):
return self.dest
def __str__(self):
return self.src.getName() + '->'+ self.dest.getName()
class Digraph(object):
"""edges is a dict mapping each node to a list of its children"""
def __init__(self):
self.edges = {}
def addNode(self, node):
if node in self.edges:
raise ValueError('Duplicate node')
else:
self.edges[node] = []
def addEdge(self, edge):
src = edge.getSource()
dest = edge.getDest()
if not (src in self.edges and dest in self.edges):
raise ValueError('Node not in graph')
self.edges[src].append(dest)
def childrenof(self, node):
return self.edges[node]
def hasNode(self, node):
return node in self.edges
def getNode(self, name):
for n in self.edges:
if n.getName() == name:
return n
raise NameError(name)
def __str__(self):
result = ''
for src in self.edges:
for dest in self.edges[src]:
result = result + src.getName() + '->' + dest.getName() + '\n'
return result[:-1] # Omit final newline
class Graph(Digraph):
def addEdge(self, edge):
Digraph.addEdge(self, edge)
rev = Edge(edge.getDest(), edge.getSource())
Digraph.addEdge(self, rev)
def buildCityGraph(graphtype):
g = graphtype()
for name in ('Boston', 'Providence', 'New York', 'Chicago', 'Denver', 'Phoenix', 'Los Angeles'): #Create 7 nodes
g.addNode(Node(name))
g.addEdge(Edge(g.getNode('Boston'), g.getNode('Providence')))
g.addEdge(Edge(g.getNode('Boston'), g.getNode('New York')))
g.addEdge(Edge(g.getNode('Providence'), g.getNode('Boston')))
g.addEdge(Edge(g.getNode('Providence'), g.getNode('New York')))
g.addEdge(Edge(g.getNode('New York'), g.getNode('Chicago')))
g.addEdge(Edge(g.getNode('Chicago'), g.getNode('Denver')))
g.addEdge(Edge(g.getNode('Denver'), g.getNode('Phoenix')))
g.addEdge(Edge(g.getNode('Denver'), g.getNode('New York')))
g.addEdge(Edge(g.getNode('Chicago'), g.getNode('Phoenix')))
g.addEdge(Edge(g.getNode('Los Angeles'), g.getNode('Boston')))
return g
def DFS(graph, start, end, path, shortest, toPrint = False):
path = path + [start]
if start == end:
return path
for node in graph.childrenOf(start):
if node not in path: # avoid cycles
if shortest == None or len(path) < len(shortest):
newPath = DFS(graph, node, end, path, shortest, toPrint)
if newPath != None:
shortest = newPath
return shortest
def shortestPath(graph, start, end):
return DFS(graph, start, end, [], None, toPrint)
def testSP(source, destination):
g = buildGraph()
sp = shortestPath(g, g.getNode(source), g.getNode(destination))
if sp != None:
print('Shortest path from', source, 'to', destination, 'is', printPath(sp))
else:
print('There is no path from', source, 'to', destination)
buildCityGraph(Digraph)
a = Node('abc')
print(str(a))
|
[
"mark.j.luo@gmail.com"
] |
mark.j.luo@gmail.com
|
e5356c009de0a60d2476a1ab574e460d8f281c61
|
40426dffc4024327a28511bb1178f5d07a9eacdb
|
/natas/natas5.py
|
0c1c58f1055d1ec5496332b8b6eca7d7e353eccc
|
[] |
no_license
|
dr01dz/overthewire
|
52e3eac3d437bf30848738496b844ac5b6b645e0
|
e822a9a327dba0734ea6ec202c2cb22cfceb02cf
|
refs/heads/master
| 2021-05-18T07:16:59.847040
| 2020-03-30T01:55:29
| 2020-03-30T01:55:29
| 251,175,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
import requests
import re
username = "natas5"
password = "iX6IOfmpN7AYOQGPwtn3fXpbaJVJcHfq"
url = f"http://{username}.natas.labs.overthewire.org/"
session = requests.Session()
response = session.get(url, auth=(username, password), cookies={'loggedin': '1'})
content = response.text
flag = re.findall(r"The password for natas6 is (.*)<", content)[0]
print(flag)
|
[
"dr01dz@pm.me"
] |
dr01dz@pm.me
|
5458a59d2c1a1fc92e3d4f82b705bf473f5f329c
|
f3335c28c0eec1587f81c492289e83ca96fb066a
|
/integrationtests/TestYouTubeUserScraper.py
|
d264e51c26815fd41b79fa280c49e4d134caad3b
|
[] |
no_license
|
souzaonofre/youtube-xbmc-plugin
|
1e373f05d7df5262cca0fa55f445a5c5aa56b901
|
80330ea03eadd72027dc5c1184f8df9c4f6d7a06
|
refs/heads/master
| 2021-01-18T10:37:58.637182
| 2015-04-11T17:42:36
| 2015-04-11T17:42:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 826
|
py
|
import BaseTestCase
import nose
import sys
class TestYouTubeUserScraper(BaseTestCase.BaseTestCase):
def test_plugin_should_scrape_liked_videos_list_correctly(self):
sys.modules["__main__"].settings.load_strings("./resources/basic-login-settings-logged-in.xml")
sys.modules["__main__"].settings.setSetting("cookies_saved", "false")
self.navigation.listMenu({"scraper": "liked_videos", 'login': 'true', "path": "/root/liked_videos"})
self.assert_directory_count_greater_than_or_equals(10)
self.assert_directory_count_less_than_or_equals(51)
self.assert_directory_is_a_video_list()
self.assert_directory_contains_almost_only_unique_video_items()
self.assert_directory_items_should_have_external_thumbnails()
if __name__ == "__main__":
nose.runmodule()
|
[
"commander.john.crichton@gmail.com"
] |
commander.john.crichton@gmail.com
|
e224aba3ce164996f1d8fa2ecae19ad56194e37c
|
e50e73e3d18bd297eda0561421e5f5b44e47fde8
|
/ops.py
|
f58171f9239858581a8f6a789289992744f156e2
|
[] |
no_license
|
JabariHolder/squad-tensorflow
|
57d1251eeb6dd2ee5922e5e27fec7ace748201f7
|
14ef5bb3b1119aa071f2324930e7f9e488ef212d
|
refs/heads/master
| 2020-03-24T19:37:50.991101
| 2018-06-13T05:58:16
| 2018-06-13T05:58:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,710
|
py
|
import tensorflow as tf
import os
from tensorflow.contrib.rnn.python.ops.rnn_cell import AttentionCellWrapper
from tensorflow.contrib.tensorboard.plugins import projector
def dropout(x, keep_prob):
return tf.nn.dropout(x, keep_prob)
def lstm_cell(cell_dim, layer_num, keep_prob):
with tf.variable_scope('LSTM_Cell') as scope:
def get_cell(cd, kp):
cell = tf.contrib.rnn.BasicLSTMCell(
cd, forget_bias=1.0, activation=tf.tanh, state_is_tuple=True)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=kp)
return cell
stacked_rnn = tf.contrib.rnn.MultiRNNCell([get_cell(cell_dim, keep_prob)
for _ in range(layer_num)])
return stacked_rnn
def gru_cell(cell_dim, layer_num, keep_prob):
with tf.variable_scope('GRU_Cell') as scope:
cell = tf.contrib.rnn.GRUCell(cell_dim)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
return tf.contrib.rnn.MultiRNNCell([cell] * layer_num)
def rnn_reshape(inputs, input_dim, max_time_step):
with tf.variable_scope('Reshape') as scope:
inputs_tr = tf.transpose(inputs, [1, 0, 2])
inputs_tr_reshape = tf.reshape(inputs_tr, [-1, input_dim])
inputs_tr_reshape_split = tf.split(axis=0, num_or_size_splits=max_time_step, value=inputs_tr_reshape)
return inputs_tr_reshape_split
def rnn_model(inputs, input_len, max_time_step, cell, params, gather_last=False):
dim_rnn_cell = params['dim_rnn_cell']
with tf.variable_scope('RNN') as scope:
outputs, state = tf.contrib.rnn.static_rnn(cell, inputs, sequence_length=input_len, dtype=tf.float32, scope=scope)
outputs = tf.transpose(tf.stack(outputs), [1, 0, 2])
if gather_last:
indices = tf.concat(axis=1, values=[tf.expand_dims(tf.range(0, tf.shape(input_len)[0]), 1), tf.expand_dims(input_len-1, 1)])
gathered_outputs = tf.gather_nd(outputs, indices)
else:
gathered_outputs = outputs
return gathered_outputs
def bi_rnn_model(inputs, input_len, fw_cell, bw_cell,
fw_init_state=None, bw_init_state=None):
with tf.variable_scope('Bi-RNN') as scope:
outputs, state = tf.nn.bidirectional_dynamic_rnn(
fw_cell, bw_cell, inputs,
sequence_length=input_len,
initial_state_fw=fw_init_state,
initial_state_bw=bw_init_state,
dtype=tf.float32, scope=scope)
outputs = tf.concat(axis=2, values=[outputs[0], outputs[1]])
return outputs, state
def embedding_lookup(inputs, voca_size, embedding_dim, initializer=None, trainable=True,
draw=False, visual_dir=None, config=None,
reuse=False, scope='Embedding'):
with tf.variable_scope(scope, reuse=reuse) as scope:
if initializer is not None:
embedding_table = tf.get_variable("embed",
initializer=initializer, trainable=trainable, dtype=tf.float32)
else:
embedding_table = tf.get_variable("embed", [voca_size, embedding_dim],
dtype=tf.float32, trainable=trainable)
inputs_embed = tf.nn.embedding_lookup(embedding_table, inputs)
# print(inputs_embed)
if draw:
embedding = config.embeddings.add()
embedding.tensor_name = embedding_table.name
embedding.metadata_path = os.path.join(visual_dir, '%s_metadata.tsv'%scope.name)
return inputs_embed, projector
else:
return inputs_embed
def mask_by_index(batch_size, input_len, max_time_step):
with tf.variable_scope('Masking') as scope:
input_index = tf.range(0, batch_size) * max_time_step + (input_len - 1)
lengths_transposed = tf.expand_dims(input_index, 1)
lengths_tiled = tf.tile(lengths_transposed, [1, max_time_step])
mask_range = tf.range(0, max_time_step)
range_row = tf.expand_dims(mask_range, 0)
range_tiled = tf.tile(range_row, [batch_size, 1])
mask = tf.less_equal(range_tiled, lengths_tiled)
weight = tf.select(mask, tf.ones([batch_size, max_time_step]),
tf.zeros([batch_size, max_time_step]))
weight = tf.reshape(weight, [-1])
return weight
def linear(inputs, output_dim, dropout_rate=1.0, regularize_rate=0, activation=None, scope='Linear'):
with tf.variable_scope(scope) as scope:
input_dim = inputs.get_shape().as_list()[-1]
inputs = tf.reshape(inputs, [-1, input_dim])
weights = tf.get_variable('Weights', [input_dim, output_dim],
initializer=tf.random_normal_initializer())
variable_summaries(weights, scope.name + '/Weights')
biases = tf.get_variable('Biases', [output_dim],
initializer=tf.constant_initializer(0.0))
variable_summaries(biases, scope.name + '/Biases')
if activation is None:
return dropout((tf.matmul(inputs, weights) + biases), dropout_rate)
else:
return dropout(activation(tf.matmul(inputs, weights) + biases), dropout_rate)
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev/' + name, stddev)
tf.summary.scalar('max/' + name, tf.reduce_max(var))
tf.summary.scalar('min/' + name, tf.reduce_min(var))
tf.summary.histogram(name, var)
|
[
"jinhyuk_lee@korea.ac.kr"
] |
jinhyuk_lee@korea.ac.kr
|
368cd162c2321582941fb96c9ef02d7aa65f8a41
|
3e7b9373afd1dc4ec30f0afd12f38b607a823747
|
/simple-sklearn-demo/test14/balanceTest.py
|
d52f7283569994b2c5f2d1ad9490cc3d0c8becb5
|
[
"MIT"
] |
permissive
|
GlintW/Intern.MT
|
767512a42ba4f450d8b2b24fecf8595294d6fe7c
|
29538b083f29720bfdda7565eaca89292f7e3723
|
refs/heads/master
| 2020-06-09T10:40:43.720827
| 2019-07-05T03:36:21
| 2019-07-05T03:36:21
| 193,424,380
| 2
| 1
|
MIT
| 2019-07-05T07:04:31
| 2019-06-24T03:01:31
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,389
|
py
|
"""
以癌症判断为例, 假设原本的样本有100个, 真实的正确标记是99个癌症, 1个正常, 将癌症视作正例的话,
假设不进行任何科学预测, 而是直接把100个都胡乱猜测为正例, 则:
精确率为 99%
召回率为 100%
F1-score为 2 * 0.99 * 1 / (0.99 + 1) = 1.98 / 1.99 = 0.99497487437185929648241206030151
在这种情况下, 因为样本不够均衡, 精确率/召回率/F1-score都无法用来判断真实的模型能力, 会认为胡乱猜测的结果预测很准确.
TPR True Positive Rate 真正例比率
TP / (TP + FN)
所有真实为1的样本中, 预测也为1的比率(其实就是召回率)
FPR False Positive Rate 伪正例比率
FP / (FP + TN)
所有真实为0的样本中, 预测为1的比率
以FPR为横坐标, TPR为纵坐标, 构造平面直角坐标系. 绘制每一个点的FPR和TPR, 得到的曲线被称作ROC(Receiver Operating Characteristic)曲线
ROC曲线和横纵坐标围成的面积, 被称作AUC(Area Under the Curve)指标, AUC也就是预测正样本大于预测负样本的比率
如果在某一点上,
若TPR == FPR, 该点的斜率为1, 则斜线正好是与横纵轴夹角都为45°的斜线,
该情况可视作是无论真实类别为0或1, 预测成1的概率都相等, 也就是random guess
该情况下, ROC曲线也就是random guess线与横纵坐标围成的面积是直角三角形, 面积为0.5
若TPR > FPR, 则斜线陡峭,
该情况下, 若所有真实为1的样本预测为1, 且所有真实为0的样本中, 都不预测为1, 则斜线变成Y轴, AUC值达到最大, 即面积为正方形, 面积为1
若TPR < FPR, 则斜线平缓
该情况下, AUC小于0.5, 可以反向使用模型.
最终可以认为, AUC的范围是在0.5~1之间, 越接近1越好.
以上面的极端情况为例(假设原本的样本有100个, 真实的正确标记是99个癌症, 1个正常),
此时的TPR为召回率 100%,
此时的FPR为(真实为0的样本数1中, 预测为1的样本数1) 1 / 1 = 100%
此时的ROC曲线为random guess, AUC为0.5, 可以看出其实是胡乱猜测的.
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
def balance_test():
df = pd.read_csv(r"./breast-cancer-wisconsin_data.csv",
names=["Sample code number",
"Clump Thickness",
"Uniformity of Cell Size",
"Uniformity of Cell Shape",
"Marginal Adhesion",
"Single Epithelial Cell Size",
"Bare Nuclei",
"Bland Chromatin",
"Normal Nucleoli",
"Mitoses",
"Class"])
df = df.replace(to_replace="?", value=np.nan)
# 删除空值
df.dropna(inplace=True)
# 检查是否包含空值
df.isnull().any()
# 分出data和target
data = df.iloc[:, :-1]
target = df["Class"]
# 拆分
data_train, data_test, target_train, target_test = train_test_split(data, target)
# 标准化
sd_scaler = StandardScaler()
data_train = sd_scaler.fit_transform(data_train)
data_test = sd_scaler.transform(data_test)
# 线性回归
lr = LogisticRegression()
lr.fit(data_train, target_train)
# 权重系数
print(lr.coef_)
# 偏置
print(lr.intercept_)
# 模型评估
target_predict = lr.predict(data_test)
print(target_predict == target_test)
print(lr.score(data_test, target_test))
# 精确率 召回率 F1-score
# labels 指target结果的范围, target_names 指target结果的名称
report = classification_report(target_test, target_predict, labels=[2, 4], target_names=["良性", "恶性"])
# 结果中的support表示数量
print(report)
# y_true参数必须c处理成取值为0或1的正反例
# 该例子中将患癌症作为正例1
target_predict_positive = np.where(target_predict == 4, 1, 0)
auc = roc_auc_score(target_predict_positive, target_predict)
print("auc指标为")
print(auc)
return None
if __name__ == '__main__':
balance_test()
|
[
"crackedcd@qq.com"
] |
crackedcd@qq.com
|
7234b46068e6c69c07d561b4565db25d1df52a21
|
5c38f7b1c479345ae02b9805458335b5f07acda5
|
/2. Useful methods/2_lesson2_step8.py
|
20533e59e843ca5f4c37ef8fa24578f28ece3926
|
[] |
no_license
|
BenjaminPetrik/stepik-auto-tests-course
|
b3a667b34593220f4b73681d51466c812dc48919
|
ae40d40a1833e728afc660896d7ef62289c7a37e
|
refs/heads/master
| 2022-11-07T06:28:00.135119
| 2020-06-23T13:58:22
| 2020-06-23T13:58:22
| 272,705,087
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
import os, time
from selenium import webdriver
from selenium.webdriver.common.by import By
try:
browser = webdriver.Chrome()
browser.get("http://suninjuly.github.io/file_input.html")
elements = browser.find_elements(By.CSS_SELECTOR, "[type='text']")
a = 1
for element in elements:
element.send_keys('test' + str(a))
a+=1
file = browser.find_element(By.ID, "file")
current_dir = os.path.abspath(os.path.dirname(__file__)) # получаем путь к директории текущего исполняемого файла
file_path = os.path.join(current_dir, '2_lesson2_step8.txt') # добавляем к этому пути имя файла
file.send_keys(file_path)
button = browser.find_element(By.CSS_SELECTOR, ".btn")
button.click()
finally:
time.sleep(10)
browser.quit()
|
[
"veniaminproductengine@lindenlab.com"
] |
veniaminproductengine@lindenlab.com
|
a1c051123e8faa5112afbc7549b0e7b355131a1a
|
19ba3e5edfd76fb1801ff8e290ed87e6e4a3500e
|
/select_huge_glyphs.py
|
0a8157138e07a05fb459d9fec68d1564cacab9a7
|
[] |
no_license
|
thundernixon/robofont-scaling-scripts
|
b0785d0dd93687e77a02e02716841be6b39c87ef
|
4a510c2e0bad5ef144d12b5e95c4321181f60cca
|
refs/heads/master
| 2021-08-23T21:23:37.288458
| 2017-12-06T16:18:01
| 2017-12-06T16:18:01
| 113,058,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
f = CurrentFont()
sel = []
for g in f:
for contour in g:
for seg in contour:
for point in seg:
if point.y > f.info.ascender + 200:
if g.name not in sel:
sel.append(g.name)
print sel
f.selection = sel
|
[
"swr410@gmail.com"
] |
swr410@gmail.com
|
38781e7b0e37c1a1936673f0ab5b178c0fbbc6f6
|
3b074226ba22a9438cfcb960b9aade03d2293394
|
/.c9/metadata/environment/addWebAccounts/cart/context.py
|
842496ca315773dfb440d42bf691cbee3491aaa6
|
[] |
no_license
|
MACmidiDEV/e-comm
|
6e5b376c15acf3dd8b757cc441b3da8b91986b57
|
923963d9fae8d22a877844649bad72b784c0b5cc
|
refs/heads/master
| 2022-11-25T14:49:05.131637
| 2020-07-03T01:56:12
| 2020-07-03T01:56:12
| 224,777,076
| 0
| 1
| null | 2022-03-12T00:06:16
| 2019-11-29T04:31:54
|
Python
|
UTF-8
|
Python
| false
| false
| 8,650
|
py
|
{"changed":true,"filter":false,"title":"context.py","tooltip":"/addWebAccounts/cart/context.py","value":"","undoManager":{"mark":0,"position":23,"stack":[[{"start":{"row":0,"column":0},"end":{"row":21,"column":85},"action":"insert","lines":["from django.shortcuts import get_object_or_404","from products.models import Product","","","def cart_contents(request):"," \"\"\""," Ensures that the cart contents are available when rendering"," every page"," \"\"\""," cart = request.session.get('cart', {})",""," cart_items = []"," total = 0"," product_count = 0"," "," for id, quantity in cart.items():"," product = get_object_or_404(Product, pk=id)"," total += quantity * product.price"," product_count += quantity"," cart_items.append({'id': id, 'quantity': quantity, 'product': product})"," "," return {'cart_items': cart_items, 'total': total, 'product_count': product_count}"],"id":1}],[{"start":{"row":3,"column":0},"end":{"row":3,"column":1},"action":"insert","lines":["#"],"id":2}],[{"start":{"row":3,"column":1},"end":{"row":3,"column":2},"action":"insert","lines":[" "],"id":3},{"start":{"row":3,"column":2},"end":{"row":3,"column":3},"action":"insert","lines":["t"]},{"start":{"row":3,"column":3},"end":{"row":3,"column":4},"action":"insert","lines":["e"]},{"start":{"row":3,"column":4},"end":{"row":3,"column":5},"action":"insert","lines":["m"]}],[{"start":{"row":3,"column":5},"end":{"row":3,"column":6},"action":"insert","lines":["p"],"id":4},{"start":{"row":3,"column":6},"end":{"row":3,"column":7},"action":"insert","lines":["e"]},{"start":{"row":3,"column":7},"end":{"row":3,"column":8},"action":"insert","lines":["r"]},{"start":{"row":3,"column":8},"end":{"row":3,"column":9},"action":"insert","lines":["y"]}],[{"start":{"row":3,"column":9},"end":{"row":3,"column":10},"action":"insert","lines":[" "],"id":5},{"start":{"row":3,"column":10},"end":{"row":3,"column":11},"action":"insert","lines":["s"]},{"start":{"row":3,"column":11},"end":{"row":3,"column":12},"action":"insert","lines":["t"]},{"start":{"row":3,"column":12},"end":{"row":3,"column":13},"action":"insert","lines":["o"]}],[{"start":{"row":3,"column":13},"end":{"row":3,"column":14},"action":"insert","lines":["r"],"id":6},{"start":{"row":3,"column":14},"end":{"row":3,"column":15},"action":"insert","lines":["a"]},{"start":{"row":3,"column":15},"end":{"row":3,"column":16},"action":"insert","lines":["g"]},{"start":{"row":3,"column":16},"end":{"row":3,"column":17},"action":"insert","lines":["e"]}],[{"start":{"row":3,"column":17},"end":{"row":3,"column":18},"action":"insert","lines":[" "],"id":7}],[{"start":{"row":3,"column":17},"end":{"row":3,"column":18},"action":"remove","lines":[" "],"id":8},{"start":{"row":3,"column":16},"end":{"row":3,"column":17},"action":"remove","lines":["e"]},{"start":{"row":3,"column":15},"end":{"row":3,"column":16},"action":"remove","lines":["g"]},{"start":{"row":3,"column":14},"end":{"row":3,"column":15},"action":"remove","lines":["a"]},{"start":{"row":3,"column":13},"end":{"row":3,"column":14},"action":"remove","lines":["r"]},{"start":{"row":3,"column":12},"end":{"row":3,"column":13},"action":"remove","lines":["o"]},{"start":{"row":3,"column":11},"end":{"row":3,"column":12},"action":"remove","lines":["t"]},{"start":{"row":3,"column":10},"end":{"row":3,"column":11},"action":"remove","lines":["s"]},{"start":{"row":3,"column":9},"end":{"row":3,"column":10},"action":"remove","lines":[" "]},{"start":{"row":3,"column":8},"end":{"row":3,"column":9},"action":"remove","lines":["y"]},{"start":{"row":3,"column":7},"end":{"row":3,"column":8},"action":"remove","lines":["r"]},{"start":{"row":3,"column":6},"end":{"row":3,"column":7},"action":"remove","lines":["e"]}],[{"start":{"row":3,"column":6},"end":{"row":3,"column":7},"action":"insert","lines":["a"],"id":9},{"start":{"row":3,"column":7},"end":{"row":3,"column":8},"action":"insert","lines":["r"]},{"start":{"row":3,"column":8},"end":{"row":3,"column":9},"action":"insert","lines":["y"]}],[{"start":{"row":3,"column":9},"end":{"row":3,"column":10},"action":"insert","lines":[" "],"id":10},{"start":{"row":3,"column":10},"end":{"row":3,"column":11},"action":"insert","lines":["s"]},{"start":{"row":3,"column":11},"end":{"row":3,"column":12},"action":"insert","lines":["t"]},{"start":{"row":3,"column":12},"end":{"row":3,"column":13},"action":"insert","lines":["o"]},{"start":{"row":3,"column":13},"end":{"row":3,"column":14},"action":"insert","lines":["r"]},{"start":{"row":3,"column":14},"end":{"row":3,"column":15},"action":"insert","lines":["a"]},{"start":{"row":3,"column":15},"end":{"row":3,"column":16},"action":"insert","lines":["g"]}],[{"start":{"row":3,"column":16},"end":{"row":3,"column":17},"action":"insert","lines":["e"],"id":11}],[{"start":{"row":3,"column":17},"end":{"row":3,"column":18},"action":"insert","lines":[" "],"id":12}],[{"start":{"row":3,"column":2},"end":{"row":3,"column":3},"action":"insert","lines":["s"],"id":13},{"start":{"row":3,"column":3},"end":{"row":3,"column":4},"action":"insert","lines":["t"]},{"start":{"row":3,"column":4},"end":{"row":3,"column":5},"action":"insert","lines":["o"]},{"start":{"row":3,"column":5},"end":{"row":3,"column":6},"action":"insert","lines":["r"]},{"start":{"row":3,"column":6},"end":{"row":3,"column":7},"action":"insert","lines":["e"]}],[{"start":{"row":3,"column":7},"end":{"row":3,"column":8},"action":"insert","lines":["s"],"id":14}],[{"start":{"row":3,"column":8},"end":{"row":3,"column":9},"action":"insert","lines":[" "],"id":15}],[{"start":{"row":3,"column":9},"end":{"row":3,"column":10},"action":"insert","lines":["i"],"id":16},{"start":{"row":3,"column":10},"end":{"row":3,"column":11},"action":"insert","lines":["n"]}],[{"start":{"row":3,"column":11},"end":{"row":3,"column":12},"action":"insert","lines":[" "],"id":17}],[{"start":{"row":3,"column":28},"end":{"row":3,"column":29},"action":"insert","lines":["a"],"id":18},{"start":{"row":3,"column":29},"end":{"row":3,"column":30},"action":"insert","lines":["l"]},{"start":{"row":3,"column":30},"end":{"row":3,"column":31},"action":"insert","lines":["l"]}],[{"start":{"row":3,"column":31},"end":{"row":3,"column":32},"action":"insert","lines":[" "],"id":19},{"start":{"row":3,"column":32},"end":{"row":3,"column":33},"action":"insert","lines":["c"]},{"start":{"row":3,"column":33},"end":{"row":3,"column":34},"action":"insert","lines":["a"]},{"start":{"row":3,"column":34},"end":{"row":3,"column":35},"action":"insert","lines":["r"]},{"start":{"row":3,"column":35},"end":{"row":3,"column":36},"action":"insert","lines":["t"]},{"start":{"row":3,"column":36},"end":{"row":3,"column":37},"action":"insert","lines":["s"]}],[{"start":{"row":3,"column":36},"end":{"row":3,"column":37},"action":"remove","lines":["s"],"id":20},{"start":{"row":3,"column":35},"end":{"row":3,"column":36},"action":"remove","lines":["t"]},{"start":{"row":3,"column":34},"end":{"row":3,"column":35},"action":"remove","lines":["r"]},{"start":{"row":3,"column":33},"end":{"row":3,"column":34},"action":"remove","lines":["a"]},{"start":{"row":3,"column":32},"end":{"row":3,"column":33},"action":"remove","lines":["c"]}],[{"start":{"row":3,"column":32},"end":{"row":3,"column":33},"action":"insert","lines":["i"],"id":21},{"start":{"row":3,"column":33},"end":{"row":3,"column":34},"action":"insert","lines":["t"]},{"start":{"row":3,"column":34},"end":{"row":3,"column":35},"action":"insert","lines":["e"]},{"start":{"row":3,"column":35},"end":{"row":3,"column":36},"action":"insert","lines":["m"]},{"start":{"row":3,"column":36},"end":{"row":3,"column":37},"action":"insert","lines":["s"]}],[{"start":{"row":3,"column":37},"end":{"row":3,"column":38},"action":"insert","lines":[" "],"id":22},{"start":{"row":3,"column":38},"end":{"row":3,"column":39},"action":"insert","lines":["i"]},{"start":{"row":3,"column":39},"end":{"row":3,"column":40},"action":"insert","lines":["n"]}],[{"start":{"row":3,"column":40},"end":{"row":3,"column":41},"action":"insert","lines":[" "],"id":23},{"start":{"row":3,"column":41},"end":{"row":3,"column":42},"action":"insert","lines":["c"]},{"start":{"row":3,"column":42},"end":{"row":3,"column":43},"action":"insert","lines":["a"]},{"start":{"row":3,"column":43},"end":{"row":3,"column":44},"action":"insert","lines":["r"]}],[{"start":{"row":3,"column":44},"end":{"row":3,"column":45},"action":"insert","lines":["t"],"id":24}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":3,"column":45},"end":{"row":3,"column":45},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1574977459388}
|
[
"mcamacho1990@icloud.com"
] |
mcamacho1990@icloud.com
|
1c5b3e260feb0abf34d105b4ab31197434afe5f3
|
3b6930a9196fe14312065238dfa65e45c3782210
|
/Python Web Django/Python Django/project1/board/urls.py
|
f41f592be312240ad03af23567c9ea05c2ec435e
|
[] |
no_license
|
KIMJINMINININN/Python
|
22d73487290d29f1c3f4c95b52e4c45917ca17ac
|
25db3f492203bccd61b57a59e59359e2ab4ae0e6
|
refs/heads/master
| 2020-08-10T11:10:18.698661
| 2020-01-16T07:48:49
| 2020-01-16T07:48:49
| 214,330,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
from django.urls import path
from . import views # 현재 패키지에서 views 모듈을 가져옴
urlpatterns = [
path('index', views.index, name='index'),
path('list', views.list, name='list'),
path('write', views.write, name='write'),
path('edit', views.edit, name='edit'),
path('content', views.content, name='content'),
path('delete', views.delete, name='delete'),
path('home', views.home, name='home')
]
|
[
"kjm9596@gmail.com"
] |
kjm9596@gmail.com
|
650b1cddafdcf3bf377bbce779edd5211ecc1465
|
425b40227fc001db71b6e1ca1605f89fe397be7b
|
/reviews/models.py
|
19afd8e395b2e74984af10649d99d4736d3ed9e1
|
[] |
no_license
|
WilsonKinyua/django-jwt-authentication
|
46c4fc33822e268d960d8209ca8ae41e29a2fdb1
|
d9cde71e19fd395d6b3a63a54b71ed1c8a90e587
|
refs/heads/main
| 2023-08-30T14:26:37.467549
| 2021-11-11T18:18:15
| 2021-11-11T18:18:15
| 427,054,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
from django.db import models
from django.contrib.auth.models import User
class Company(models.Model):
name = models.CharField(max_length=255)
url = models.TextField()
def __str__(self):
return self.name
class ProductSize(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=255)
content = models.TextField()
category = models.ManyToManyField(Category, related_name='products')
created = models.DateField(auto_now_add=True)
updated = models.DateField(auto_now=True)
class Meta:
ordering = ['-created']
def __str__(self):
return self.name
class ProductSite(models.Model):
name = models.CharField(max_length=255)
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='sites', related_query_name='site')
company = models.ForeignKey(Company, on_delete=models.CASCADE, related_name='sites', related_query_name='site')
productsize = models.ForeignKey(ProductSize, on_delete=models.CASCADE, related_name='sites', related_query_name='site')
price = models.DecimalField(max_digits=9, decimal_places=2)
url = models.TextField()
created = models.DateField(auto_now_add=True)
updated = models.DateField(auto_now=True)
def __str__(self):
return self.name
class Comment(models.Model):
title = models.CharField(max_length=255)
content = models.TextField()
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='comments', related_query_name='comment')
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='comments', related_query_name='comment')
created = models.DateField(auto_now_add=True)
updated = models.DateField(auto_now=True)
def __str__(self):
return self.title
|
[
"wilsonkinyuam@gmail.com"
] |
wilsonkinyuam@gmail.com
|
33b61bf2932f4411bf9ef228e7bde07fef9b852b
|
9face6b1854d76ed54cc8655de4b4af3a644b08a
|
/tt/lm/lm_interface.py
|
eb3cf88c5c07747109daecd037c0d53f422c2ab8
|
[] |
no_license
|
oshindow/Transformer-Transducer
|
3b20597de3ed5e503130167df8b846fc62e0124d
|
aad37a26c4513fb0f2759e1c29c8cae77355bf6d
|
refs/heads/master
| 2023-03-15T18:22:29.659790
| 2021-03-11T11:14:02
| 2021-03-11T11:14:02
| 267,882,594
| 16
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,571
|
py
|
"""Language model interface."""
import argparse
from tt.interface.scorer_interface import ScorerInterface
from tt.dynamic_import import dynamic_import
from tt.fill_missing_args import fill_missing_args
class LMInterface(ScorerInterface):
"""LM Interface for ESPnet model implementation."""
@staticmethod
def add_arguments(parser):
"""Add arguments to command line argument parser."""
return parser
@classmethod
def build(cls, n_vocab: int, **kwargs):
"""Initialize this class with python-level args.
Args:
idim (int): The number of vocabulary.
Returns:
LMinterface: A new instance of LMInterface.
"""
# local import to avoid cyclic import in lm_train
from espnet.bin.lm_train import get_parser
def wrap(parser):
return get_parser(parser, required=False)
args = argparse.Namespace(**kwargs)
args = fill_missing_args(args, wrap)
args = fill_missing_args(args, cls.add_arguments)
return cls(n_vocab, args)
def forward(self, x, t):
"""Compute LM loss value from buffer sequences.
Args:
x (torch.Tensor): Input ids. (batch, len)
t (torch.Tensor): Target ids. (batch, len)
Returns:
tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Tuple of
loss to backward (scalar),
negative log-likelihood of t: -log p(t) (scalar) and
the number of elements in x (scalar)
Notes:
The last two return values are used
in perplexity: p(t)^{-n} = exp(-log p(t) / n)
"""
raise NotImplementedError("forward method is not implemented")
predefined_lms = {
"pytorch": {
"default": "espnet.nets.pytorch_backend.lm.default:DefaultRNNLM",
"seq_rnn": "espnet.nets.pytorch_backend.lm.seq_rnn:SequentialRNNLM",
"transformer": "espnet.nets.pytorch_backend.lm.transformer:TransformerLM",
},
"chainer": {"default": "espnet.lm.chainer_backend.lm:DefaultRNNLM"},
}
def dynamic_import_lm(module, backend):
"""Import LM class dynamically.
Args:
module (str): module_name:class_name or alias in `predefined_lms`
backend (str): NN backend. e.g., pytorch, chainer
Returns:
type: LM class
"""
model_class = dynamic_import(module, predefined_lms.get(backend, dict()))
assert issubclass(
model_class, LMInterface
), f"{module} does not implement LMInterface"
return model_class
|
[
"walston874848612@163.com"
] |
walston874848612@163.com
|
4d93c3ab89f7bf0f9692c97824d0989cfbb687fc
|
6f54ce52f08806075f0445e7dd206baae96ebdca
|
/PoweredBy/languages/pt-pt.py
|
80a971191b36d62890865c8f3723871283037ee6
|
[
"BSD-3-Clause"
] |
permissive
|
ykanggit/web2py-appliances
|
a93d318a214aa5b3e5cd6b47b642f2c12addba46
|
5ca7a04d5403f04aad9e90e99e10dbc05a08a50a
|
refs/heads/master
| 2022-05-06T08:55:11.089350
| 2022-04-14T19:25:02
| 2022-04-14T19:25:02
| 49,680,074
| 0
| 0
| null | 2016-01-14T22:41:45
| 2016-01-14T22:41:45
| null |
UTF-8
|
Python
| false
| false
| 4,186
|
py
|
# coding: utf8
{
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': '%s linhas eliminadas',
'%s rows updated': '%s linhas actualizadas',
'About': 'About',
'Available databases and tables': 'Available databases and tables',
'Cannot be empty': 'Cannot be empty',
'Check to delete': 'Check to delete',
'Client IP': 'Client IP',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Created By': 'Created By',
'Created On': 'Created On',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'DB Model': 'DB Model',
'Database': 'Database',
'Delete:': 'Delete:',
'Description': 'Description',
'Edit': 'Edit',
'Edit This App': 'Edit This App',
'Edit current record': 'Edit current record',
'Email': 'Email',
'Featured': 'Featured',
'First Name': 'First Name',
'Group ID': 'Group ID',
'Hello World': 'Olá Mundo',
'Import/Export': 'Import/Export',
'Index': 'Index',
'Internal State': 'Internal State',
'Invalid Query': 'Consulta Inválida',
'Invalid email': 'Invalid email',
'Last Name': 'Last Name',
'Layout': 'Layout',
'Logged in': 'Logged in',
'Login': 'Login',
'Main Menu': 'Main Menu',
'Menu Model': 'Menu Model',
'Modified By': 'Modified By',
'Modified On': 'Modified On',
'Name': 'Name',
'New Record': 'New Record',
'No databases in this application': 'No databases in this application',
'Object or table name': 'Object or table name',
'Origin': 'Origin',
'Password': 'Password',
'Powered by': 'Powered by',
'Query:': 'Query:',
'Record ID': 'Record ID',
'Role': 'Role',
'Rows in table': 'Rows in table',
'Rows selected': 'Rows selected',
'Screenshot': 'Screenshot',
'Site Create': 'Site Create',
'Stylesheet': 'Stylesheet',
'Sure you want to delete this object?': 'Tem a certeza que deseja eliminar este objecto?',
'Table name': 'Table name',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'Timestamp': 'Timestamp',
'Update:': 'Update:',
'Url': 'Url',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User %(id)s Logged-in': 'User %(id)s Logged-in',
'User ID': 'User ID',
'Username': 'Username',
'View': 'View',
'Welcome': 'Welcome',
'Welcome %s': 'Welcome %s',
'Welcome to Gluonization': 'Bem vindo ao Web2py',
'Welcome to web2py': 'Welcome to web2py',
'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel',
'cache': 'cache',
'change password': 'change password',
'click here for online examples': 'Clique aqui para exemplos online',
'click here for the administrative interface': 'Clique aqui para o painel administrativo',
'customize me!': 'customize me!',
'data uploaded': 'informação enviada',
'database': 'database',
'database %s select': 'database %s select',
'db': 'bd',
'design': 'design',
'done!': 'concluído!',
'edit profile': 'edit profile',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'export as csv file': 'export as csv file',
'forgot username?': 'forgot username?',
'insert new': 'insert new',
'insert new %s': 'insert new %s',
'invalid request': 'Pedido Inválido',
'login': 'login',
'logout': 'logout',
'new record inserted': 'novo registo inserido',
'next 100 rows': 'next 100 rows',
'or import from csv file': 'or import from csv file',
'previous 100 rows': 'previous 100 rows',
'profile': 'profile',
'record': 'record',
'record does not exist': 'registo inexistente',
'record id': 'record id',
'register': 'register',
'selected': 'selected',
'state': 'estado',
'table': 'table',
'unable to parse csv file': 'não foi possível carregar ficheiro csv',
}
|
[
"massimodipierro@Massimos-MacBook-Air.local"
] |
massimodipierro@Massimos-MacBook-Air.local
|
ad258023d7e6c5bee3653c2eab16b6bb7b1e268b
|
0dc2d21387fc245d9ef6cc59ca89b5e45e2f242a
|
/TranscodeVideos.py
|
b66e98502ca47041583636a272af641f36dc948c
|
[] |
no_license
|
lambdan/video_bla
|
b49b0580919927e2e77cb60a9f51d59b290c2d45
|
985a688b087270b2ae61e7cefab0bbea4e168293
|
refs/heads/master
| 2020-05-21T23:52:09.313204
| 2018-01-06T17:19:35
| 2018-01-06T17:19:35
| 58,374,192
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,600
|
py
|
# TranscodeVideos.py - transcode a bunch of videos and move or remove the originals
# (I used it for converting all my various .AVI files to x264 & AAC .MP4 files)
# https://github.com/lambdan/video_bla/blob/master/TranscodeVideos.py
import os
import subprocess
import sys
import shutil
import random
import time
# Configuration ##################
pathstxt = 'winpaths.txt' # path to txt file with all absolute paths to the avi files (or mpg etc).
# you can use `ls -d -1 $PWD/**/*.avi` to get a list of fullpaths to avi files
donetxt = 'done.txt' # txt file with files we have done
mode = "move" # 'move' or 'remove' original files
movepath = './delete/' # originals will be moved here, end with a / !!!
# you should also change the transcode command in the transcode function below to suit your needs
# ################################
def transcode(inputfile, outputfile):
print "\nTranscoding " + inputfile
try:
# transcode command
subprocess.call(['ruby', os.path.abspath("C:/Ruby22-x64/bin/transcode-video"), '--audio-width', 'all=surround', '--no-log', '--small', '--mp4', inputfile, '--output', outputfile])
except:
error("Transcode failed",inputfile)
def verify(inputfile, outputfile):
# verifies transcodes by comparing duration of original video and new video
print "\nVerifying... ",
try:
originalLength = int(subprocess.check_output(['mediainfo', '--Inform=General;%Duration%', inputfile]))
except: # original file probably contained an illegal character
error("Illegal char or not good path",inputfile)
addToDone(inputfile)
return False
# if the new output is corrupt, mediainfo wont return a length
try:
newLength = int(subprocess.check_output(['mediainfo', '--Inform=General;%Duration%', outputfile]))
except: # corrupt
newLength = 0
diffLength = abs(originalLength - newLength)
if diffLength > 5000: # 5 seconds in difference
print "bad"
return False
else:
print "ok!"
return True
def moveOriginal(inputfile):
# moves or removes original files
if mode == "move":
filename = os.path.basename(inputfile)
dest = movepath + filename
if os.path.isfile(dest): # maybe file with identical name already exists
i = 0
while os.path.isfile(dest): # increase i until the new filename doesnt exist
name, ext = os.path.splitext(filename)
dest = movepath + name + "_" + str(i) + ext #suffix with _i
i += 1
print "\nMoving original file...",
shutil.move(inputfile, os.path.abspath(dest))
print " ok!"
elif mode == "remove" or "delete":
print "\nRemoving original file...",
os.remove(inputfile)
print " ok!"
else:
print "Unsupported mode: " + mode
sys.exit(1)
def addToDone(path):
# adds the original files path to a txt so we can keep track of which we have done
with open(donetxt, "a") as myfile:
myfile.write(path)
print "\nAdded to " + donetxt
def error(reason, path):
# writes out error messages to a txt
if not os.path.isfile('errors.txt'): # create file if not exist
open('errors.txt', 'w').close()
with open('errors.txt', "a") as myfile:
myfile.write("These files have been skipped. You need to do these manually.\n\n")
with open('errors.txt', "a") as myfile:
myfile.write(reason + ": " + path + "\n")
print "\nERROR: " + reason + ": " + path
# make sure we have all files we need to run
if not os.path.isfile(pathstxt):
print 'Textfile "' + pathstxt + '" with paths to files to be processed not found. Please create one.'
raw_input("Press the <ENTER> key to continue...")
sys.exit(1)
if not os.path.isfile(donetxt):
print 'Creating ' + donetxt
open(donetxt, 'w').close()
if not os.path.isdir(movepath):
print 'Creating ' + movepath + ' folder'
os.makedirs(movepath)
x = 0 # this will be how many we have done
num_lines = sum(1 for line in open(pathstxt)) # count how many lines (files) we have to do
with open(pathstxt) as f:
for filepath in f:
x += 1
# check if we have already done this file
donefiles = open(donetxt, 'r')
donelist = donefiles.readlines()
donefiles.close()
found = False
for line in donelist:
if filepath in line:
found = True
#print "Already done: " + filepath[:-1]
continue
# we have not transcoded this video
if found == False:
if os.name == "nt":
# set title of cmd window in windows to show how many files we've done
os.system("title (" + str(x) + "/" + str(num_lines) + ") Converting: " + filepath[:-1])
#TODO: do the same in unix
# set up input and output filepaths
inputfile = r"" + os.path.abspath(filepath[:-1]) + ""
outputfile = r"" + os.path.abspath(filepath[:-4] + 'mp4') + ""
transcode(inputfile, outputfile)
if verify(inputfile, outputfile): # good transcode on the first try, all good
moveOriginal(inputfile)
addToDone(filepath)
else: # length differs by more than 5 seconds, or output is corrupt
try:
os.remove(outputfile)
except: # file doesnt exist or original file had illegal character
continue
print "\nLength differs by more than 5 seconds, trying to make a new transcode"
transcode(inputfile, outputfile)
if verify(inputfile, outputfile): # succeeded now
moveOriginal(inputfile)
addToDone(filepath)
else:
os.remove(outputfile)
error("Length kept on differing", filepath)
addToDone(filepath)
# give user some time to stop me
print "\nSleeping 3 seconds"
print "Hit CTRL C now to stop script"
time.sleep(3)
|
[
"noreply@github.com"
] |
lambdan.noreply@github.com
|
344d8580155be95612f6e0637afcc43da9de33d3
|
a85b6f4b704cde9b6a7fbee55c171e49970b8eb8
|
/Module2/m2l2s6.py
|
079778fdc9001f083c4129f84afb675fa949f26f
|
[] |
no_license
|
mamboosjka/stepik-auto-tests-course
|
690fb5aee3d3a4c8afa9ef0e5e72dba3125f4b1a
|
dfc8c31428ba33169f1494cab97f467f3ad0492a
|
refs/heads/main
| 2023-04-08T03:49:42.124582
| 2021-04-08T11:05:53
| 2021-04-08T11:05:53
| 355,862,515
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
import time
import math
from selenium import webdriver
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
link = "http://suninjuly.github.io/execute_script.html"
browser = webdriver.Chrome()
try:
browser.get(link)
x = int(browser.find_element_by_id("input_value").text)
res = calc(x)
answer = browser.find_element_by_id("answer")
answer.send_keys(str(res))
cb = browser.find_element_by_css_selector('input[type="checkbox"]')
browser.execute_script('return arguments[0].scrollIntoView(true);', cb)
cb.click()
rb = browser.find_element_by_css_selector('input[type="radio"][name="ruler"][value="robots"]')
browser.execute_script('return arguments[0].scrollIntoView(true);', rb)
rb.click()
submit_btn = browser.find_element_by_css_selector('button.btn[type="submit"]')
browser.execute_script('return arguments[0].scrollIntoView(true);', submit_btn)
submit_btn.click()
finally:
time.sleep(10)
browser.quit()
|
[
"mamboosjka@gmail.com"
] |
mamboosjka@gmail.com
|
dd6541d9d8ff9ab4794e02ad2543b073688d076c
|
68c9a6e674a19b3e09909dcd92edea2ee0fdbfbe
|
/ichwiwiko/testboard/sorts/quicksort.py
|
1d7da340da69278f470e6cac8110594f2bfa1bad
|
[] |
no_license
|
gomsang/AlgorithmTraining
|
b5446beb34ceeb8baa0c3448ec2165faf48a5a90
|
d4307e809953aa4a71796a179bb8e8867fc5327b
|
refs/heads/master
| 2023-03-16T11:16:35.204193
| 2022-10-20T11:35:15
| 2022-10-20T11:35:15
| 235,547,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
array = [5, 7, 9, 0, 3, 1, 6, 2, 4, 8]
def quick_sort(array, start, end):
if start >= end:
return
pivot = start
left = start + 1
right = end
while left <= right:
while left <= end and array[left] <= array[pivot]:
left += 1
while right > start and array[right] >= array[pivot]:
right -= 1
if left > right:
array[right], array[pivot] = array[pivot], array[right]
else:
array[left], array[right] = array[right], array[left]
quick_sort(array, start, right - 1)
quick_sort(array, right + 1, end)
quick_sort(array, 0, len(array) - 1)
print(array)
|
[
"gyeongrok.kim@gomsang.com"
] |
gyeongrok.kim@gomsang.com
|
09309d271545275a88c3de88371ac5663368f43a
|
61d7f7dec12990c93f9fef622004adba9365cd6f
|
/mac/shop/views.py
|
e876fa9df8a2844ac61b6c6c8fdbbfe03567e157
|
[] |
no_license
|
dheeraj-iitk/E-commerce
|
647cedff7281cd57eb4507593c07c6544ccb0bab
|
7981671934e6514e7ecad24ba17527085394c3ed
|
refs/heads/master
| 2022-12-01T20:55:46.273180
| 2020-08-09T07:50:51
| 2020-08-09T07:50:51
| 286,191,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,490
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Product,Contact,Order,OrderUpdate
from math import ceil
import json
# Create your views here.
def index(request):
# products = Product.objects.all()
# print(products)
# n = len(products)
# nSlides = n//4 + ceil((n/4)-(n//4))
allProds = []
catprods = Product.objects.values('category', 'id')
cats = {item['category'] for item in catprods}
for cat in cats:
prod = Product.objects.filter(category=cat)
n = len(prod)
nSlides = n // 4 + ceil((n / 4) - (n // 4))
allProds.append([prod, range(1, nSlides), nSlides])
# params = {'no_of_slides':nSlides, 'range': range(1,nSlides),'product': products}
# allProds = [[products, range(1, nSlides), nSlides],
# [products, range(1, nSlides), nSlides]]
params = {'allProds':allProds}
return render(request, 'index.html', params)
def about(request):
return render(request,'about.html')
def searchmatch(query,item):
if query in item.category.lower() or query in item.product_name.lower() or query in item.desc.lower():
return True
else:
return False
def search(request):
query=request.GET.get('search')
allProds = []
catprods = Product.objects.values('category', 'id')
cats = {item['category'] for item in catprods}
for cat in cats:
prodtemp = Product.objects.filter(category=cat)
prod=[item for item in prodtemp if searchmatch(query,item) ]
print(prod)
n = len(prod)
nSlides = n // 4 + ceil((n / 4) - (n // 4))
if len(prod) != 0:
allProds.append([prod, range(1, nSlides), nSlides])
params = {'allProds': allProds, "msg": ""}
if len(allProds) == 0 or len(query)==0:
params = {'msg': "Please make sure to enter relevant search query"}
return render(request,'search.html',params)
def prodview(request,myid): #this is django default id
product=Product.objects.filter(id=myid)
print(product)
params={'product':product}
return render(request,'prodview.html',params)
def checkout(request):
if request.method=="POST":
items_json = request.POST.get('itemsJson', '')
amount = request.POST.get('amount', '')
name = request.POST.get('name', '')
email = request.POST.get('email', '')
address = request.POST.get('address1', '') + " " + request.POST.get('address2', '')
city = request.POST.get('city', '')
state = request.POST.get('state', '')
zip_code = request.POST.get('zip_code', '')
phone = request.POST.get('phone', '')
order = Order(items_json=items_json, name=name, email=email, address=address, city=city,
state=state, zip_code=zip_code, phone=phone,amount=amount)
order.save()
update=OrderUpdate(order_id=order.order_id,update_desc="Your item is placed")
update.save()
thank = True
id = order.order_id
return render(request, 'checkout.html', {'thank':thank, 'id': id})
return render(request,'checkout.html')
def tracker(request):
if request.method=="POST":
orderId = request.POST.get('orderId', '')
email = request.POST.get('email', '')
try:
order = Order.objects.filter(order_id=orderId, email=email)
print(order)
if len(order)>0:
update = OrderUpdate.objects.filter(order_id=orderId)
updates = []
for item in update:
updates.append({'text': item.update_desc, 'time': item.timestamp})
response = json.dumps({"status":"success", "updates": updates, "itemsJson": order[0].items_json}, default=str)
return HttpResponse(response)
else:
return HttpResponse('{"status":"noitem"}')
except Exception as e:
return HttpResponse('{"status":"error"}')
return render(request, 'tracker.html')
def contact(request):
thank=False
if(request.method=='POST'):
name2=request.POST.get('name','')
email2=request.POST.get('email','')
phone2=request.POST.get('phone','')
desc2=request.POST.get('desc','')
print(name2,email2,phone2,desc2)
contact=Contact(name=name2,email=email2,phone=phone2,desc=desc2)
contact.save()
thank=True
return render(request,'contact.html',{'Thank':thank})
|
[
"adheeraj"
] |
adheeraj
|
aa4e25862d208c3ca7a5087ea05eb8aa0943adab
|
fd928e7226d629fbc75c51c88a7523ecc9b0ec6e
|
/src/test/mylist.py
|
42cdb538aeff19f4f153e958f2e891733b6cd726
|
[] |
no_license
|
z-mac/MyFirstWork
|
eb20a0d1cf3184f23ab5d913eaf02c615d97253d
|
62ff219e7a572c4f63cee0fadddaf2020e586dac
|
refs/heads/master
| 2021-04-27T07:21:40.261998
| 2018-02-25T02:11:42
| 2018-02-25T02:11:42
| 122,630,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
'''
Created on 2018年2月23日
@author: Administrator
'''
from builtins import repr
class MyList(object):
def __init__(self, start = []):
self.data = []
for x in start: self.data.append(x)
def __add__(self, other):
return MyList(self.data + other)
def __mul__(self, time):
return MyList(self.data * time)
def __getitem__(self, offset):
return self.data[offset]
def __len__(self):
return len(self.data)
def __getslice__(self, low, high):
return MyList(self.data[low, high])
def append(self, node):
self.data.append(node)
def __getattr__(self, name):
return getattr(self.data, name)
def __repr__(self):
return repr(self.data)
|
[
"zhudg06@163.com"
] |
zhudg06@163.com
|
8558affeb8a8ca35f2353bd99f82f3e24fbb62b4
|
13bf5804662ade86f3ba2801d436374134638ada
|
/filt_func_lam_git.py
|
907f796776f2ed43e946650f94d4ba3cf1116ca5
|
[] |
no_license
|
Timothy-L-Baron/accum_filt_func_lamb
|
20cb50e99c30093e2f40709cc5c8353a6484dc25
|
f36fd9904b7211e88124495348f567cee24b0ad8
|
refs/heads/master
| 2020-12-10T03:58:05.474125
| 2020-01-13T02:36:25
| 2020-01-13T02:36:25
| 233,495,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
#Your choice of function or lambda expression
l1 = ['left', 'up', 'front']
l2 = ['right', 'down', 'back']
"""def length_tester(x1, x2):
if x1 > 3 and x2 > 3:
return True
else:
return False
filter(lambda x1: x1 > 3, l1)
filter(lambda x2: x2 > 3, l2)
opposites = [lambda x1, x2: length_tester, zip(l1,l2)]
print(opposites)"""
opposites = list(filter(lambda x: (len(x[0]) > 3 and len(x[1]) > 3), zip(l1, l2)))
|
[
"noreply@github.com"
] |
Timothy-L-Baron.noreply@github.com
|
15c2b5e4fd096ba9e34456682e877ad061103472
|
ccb29eb93e59bebbeacc76cd5de1b83d2963019e
|
/cache/.mako.tmp/comments_helper.tmpl.py
|
266dfe7096456f039860c344e67139308da3be9d
|
[] |
no_license
|
Henrilin28/blog_nikola
|
0bedc654f134d06e9caabc745a43cf366a6ae193
|
14e2cc08c4f17543d41e79c1cc3ca961ed81dc82
|
refs/heads/master
| 2021-01-01T04:50:25.487948
| 2016-05-22T02:53:45
| 2016-05-22T02:53:45
| 59,391,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,164
|
py
|
# -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1463885624.645596
_enable_loop = True
_template_filename = '/Users/Henrilin28/.pyenv/versions/Nikola/lib/python3.5/site-packages/nikola/data/themes/base/templates/comments_helper.tmpl'
_template_uri = 'comments_helper.tmpl'
_source_encoding = 'utf-8'
_exports = ['comment_link_script', 'comment_form', 'comment_link']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
ns = runtime.TemplateNamespace('muut', context._clean_inheritance_tokens(), templateuri='comments_helper_muut.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, 'muut')] = ns
ns = runtime.TemplateNamespace('livefyre', context._clean_inheritance_tokens(), templateuri='comments_helper_livefyre.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, 'livefyre')] = ns
ns = runtime.TemplateNamespace('intensedebate', context._clean_inheritance_tokens(), templateuri='comments_helper_intensedebate.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, 'intensedebate')] = ns
ns = runtime.TemplateNamespace('isso', context._clean_inheritance_tokens(), templateuri='comments_helper_isso.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, 'isso')] = ns
ns = runtime.TemplateNamespace('facebook', context._clean_inheritance_tokens(), templateuri='comments_helper_facebook.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, 'facebook')] = ns
ns = runtime.TemplateNamespace('googleplus', context._clean_inheritance_tokens(), templateuri='comments_helper_googleplus.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, 'googleplus')] = ns
ns = runtime.TemplateNamespace('disqus', context._clean_inheritance_tokens(), templateuri='comments_helper_disqus.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, 'disqus')] = ns
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
__M_writer('\n')
__M_writer('\n')
__M_writer('\n')
__M_writer('\n')
__M_writer('\n')
__M_writer('\n')
__M_writer('\n')
__M_writer('\n\n')
__M_writer('\n\n')
__M_writer('\n\n')
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_link_script(context):
__M_caller = context.caller_stack._push_frame()
try:
comment_system = context.get('comment_system', UNDEFINED)
intensedebate = _mako_get_namespace(context, 'intensedebate')
googleplus = _mako_get_namespace(context, 'googleplus')
disqus = _mako_get_namespace(context, 'disqus')
muut = _mako_get_namespace(context, 'muut')
livefyre = _mako_get_namespace(context, 'livefyre')
isso = _mako_get_namespace(context, 'isso')
facebook = _mako_get_namespace(context, 'facebook')
__M_writer = context.writer()
__M_writer('\n')
if comment_system == 'disqus':
__M_writer(' ')
__M_writer(str(disqus.comment_link_script()))
__M_writer('\n')
elif comment_system == 'livefyre':
__M_writer(' ')
__M_writer(str(livefyre.comment_link_script()))
__M_writer('\n')
elif comment_system == 'intensedebate':
__M_writer(' ')
__M_writer(str(intensedebate.comment_link_script()))
__M_writer('\n')
elif comment_system == 'muut':
__M_writer(' ')
__M_writer(str(muut.comment_link_script()))
__M_writer('\n')
elif comment_system == 'googleplus':
__M_writer(' ')
__M_writer(str(googleplus.comment_link_script()))
__M_writer('\n')
elif comment_system == 'facebook':
__M_writer(' ')
__M_writer(str(facebook.comment_link_script()))
__M_writer('\n')
elif comment_system == 'isso':
__M_writer(' ')
__M_writer(str(isso.comment_link_script()))
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_form(context,url,title,identifier):
__M_caller = context.caller_stack._push_frame()
try:
comment_system = context.get('comment_system', UNDEFINED)
intensedebate = _mako_get_namespace(context, 'intensedebate')
googleplus = _mako_get_namespace(context, 'googleplus')
disqus = _mako_get_namespace(context, 'disqus')
muut = _mako_get_namespace(context, 'muut')
livefyre = _mako_get_namespace(context, 'livefyre')
isso = _mako_get_namespace(context, 'isso')
facebook = _mako_get_namespace(context, 'facebook')
__M_writer = context.writer()
__M_writer('\n')
if comment_system == 'disqus':
__M_writer(' ')
__M_writer(str(disqus.comment_form(url, title, identifier)))
__M_writer('\n')
elif comment_system == 'livefyre':
__M_writer(' ')
__M_writer(str(livefyre.comment_form(url, title, identifier)))
__M_writer('\n')
elif comment_system == 'intensedebate':
__M_writer(' ')
__M_writer(str(intensedebate.comment_form(url, title, identifier)))
__M_writer('\n')
elif comment_system == 'muut':
__M_writer(' ')
__M_writer(str(muut.comment_form(url, title, identifier)))
__M_writer('\n')
elif comment_system == 'googleplus':
__M_writer(' ')
__M_writer(str(googleplus.comment_form(url, title, identifier)))
__M_writer('\n')
elif comment_system == 'facebook':
__M_writer(' ')
__M_writer(str(facebook.comment_form(url, title, identifier)))
__M_writer('\n')
elif comment_system == 'isso':
__M_writer(' ')
__M_writer(str(isso.comment_form(url, title, identifier)))
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_link(context,link,identifier):
__M_caller = context.caller_stack._push_frame()
try:
comment_system = context.get('comment_system', UNDEFINED)
intensedebate = _mako_get_namespace(context, 'intensedebate')
googleplus = _mako_get_namespace(context, 'googleplus')
disqus = _mako_get_namespace(context, 'disqus')
muut = _mako_get_namespace(context, 'muut')
livefyre = _mako_get_namespace(context, 'livefyre')
isso = _mako_get_namespace(context, 'isso')
facebook = _mako_get_namespace(context, 'facebook')
__M_writer = context.writer()
__M_writer('\n')
if comment_system == 'disqus':
__M_writer(' ')
__M_writer(str(disqus.comment_link(link, identifier)))
__M_writer('\n')
elif comment_system == 'livefyre':
__M_writer(' ')
__M_writer(str(livefyre.comment_link(link, identifier)))
__M_writer('\n')
elif comment_system == 'intensedebate':
__M_writer(' ')
__M_writer(str(intensedebate.comment_link(link, identifier)))
__M_writer('\n')
elif comment_system == 'muut':
__M_writer(' ')
__M_writer(str(muut.comment_link(link, identifier)))
__M_writer('\n')
elif comment_system == 'googleplus':
__M_writer(' ')
__M_writer(str(googleplus.comment_link(link, identifier)))
__M_writer('\n')
elif comment_system == 'facebook':
__M_writer(' ')
__M_writer(str(facebook.comment_link(link, identifier)))
__M_writer('\n')
elif comment_system == 'isso':
__M_writer(' ')
__M_writer(str(isso.comment_link(link, identifier)))
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"filename": "/Users/Henrilin28/.pyenv/versions/Nikola/lib/python3.5/site-packages/nikola/data/themes/base/templates/comments_helper.tmpl", "line_map": {"23": 6, "26": 4, "29": 5, "32": 9, "35": 8, "38": 7, "41": 3, "44": 0, "49": 2, "50": 3, "51": 4, "52": 5, "53": 6, "54": 7, "55": 8, "56": 9, "57": 27, "58": 45, "59": 63, "65": 47, "77": 47, "78": 48, "79": 49, "80": 49, "81": 49, "82": 50, "83": 51, "84": 51, "85": 51, "86": 52, "87": 53, "88": 53, "89": 53, "90": 54, "91": 55, "92": 55, "93": 55, "94": 56, "95": 57, "96": 57, "97": 57, "98": 58, "99": 59, "100": 59, "101": 59, "102": 60, "103": 61, "104": 61, "105": 61, "111": 11, "123": 11, "124": 12, "125": 13, "126": 13, "127": 13, "128": 14, "129": 15, "130": 15, "131": 15, "132": 16, "133": 17, "134": 17, "135": 17, "136": 18, "137": 19, "138": 19, "139": 19, "140": 20, "141": 21, "142": 21, "143": 21, "144": 22, "145": 23, "146": 23, "147": 23, "148": 24, "149": 25, "150": 25, "151": 25, "157": 29, "169": 29, "170": 30, "171": 31, "172": 31, "173": 31, "174": 32, "175": 33, "176": 33, "177": 33, "178": 34, "179": 35, "180": 35, "181": 35, "182": 36, "183": 37, "184": 37, "185": 37, "186": 38, "187": 39, "188": 39, "189": 39, "190": 40, "191": 41, "192": 41, "193": 41, "194": 42, "195": 43, "196": 43, "197": 43, "203": 197}, "uri": "comments_helper.tmpl", "source_encoding": "utf-8"}
__M_END_METADATA
"""
|
[
"imhenry@me.com"
] |
imhenry@me.com
|
7000a4849970f918ef076d96b36b84c8793c749d
|
0e0a51274af5ebee7972bf3aaaf913ca3ef9c339
|
/lab5prog.py
|
98e80636fa143477d0b293dbe67b8d3b074f17aa
|
[] |
no_license
|
abhirungta15/abhishek
|
f2eee351fdf33951866cd2849a619e411954e9c9
|
aaf39a7835656d11fe5d31425465d93cacbb8d48
|
refs/heads/master
| 2020-07-01T14:12:02.841423
| 2019-11-21T07:17:49
| 2019-11-21T07:17:49
| 201,193,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
class callDetails:
def __init__(self,c,r,d,t):
self.cal = c
self.rec = r
self.dur = d
self.typ = t
def disp(self):
print("\nCaller: ",self.cal)
print("Reciever: ",self.rec)
print("Duration: ",self.dur)
print("Type: ",self.typ)
class Util:
def __init__(self):
self.list_of_call_objects = []
self.count = 0
self.count1 = 0
self.count2 = 0
def parse_customer(self,list_of_call_string):
for i in list_of_call_string:
x = i.split(",")
for j in x:
if j == "STD":
self.count+= 1
elif j == "ISD":
self.count1+= 1
elif j == "Local":
self.count2+= 1
o = callDetails(*x)
self.list_of_call_objects.append(o)
def disp(self):
for i in self.list_of_call_objects:
i.disp()
print("\nSTD: ",self.count)
print("ISD: ",self.count1)
print("Local: ",self.count2)
call = '9123848912,12385612934,23,STD'
call2 = '2395713534,29435812359,12,Local'
call3 = '123854295,105949324,18,ISD'
call4 = '134845,34953460,19,ISD'
list_of_call_string = [call, call2, call3, call4]
util = Util()
util.parse_customer(list_of_call_string)
util.disp()
|
[
"noreply@github.com"
] |
abhirungta15.noreply@github.com
|
09b35917c3d630de765d4c01397568171fea5663
|
a14ec6e367e6a471bfc74c066fb958ef585bc269
|
/2021/25/a.py
|
4071853842bf6b9a62759256250857218792d9f3
|
[] |
no_license
|
jimhendy/AoC
|
90641814ed431f46a8500ff0f022c6c957567563
|
a1727f88bc2e6f739d65902dce188377966b3fb4
|
refs/heads/master
| 2023-09-02T14:48:39.860352
| 2023-08-28T08:09:19
| 2023-08-28T08:09:19
| 225,152,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,423
|
py
|
import os
def step(east, south, width, height):
next_east = set()
next_south = set()
for current in east:
dest = (
(current[0] + 1, current[1]) if current[0] < width - 1 else (0, current[1])
)
if dest not in east and dest not in south:
next_east.add(dest)
else:
next_east.add(current)
for current in south:
dest = (
(current[0], current[1] + 1) if current[1] < height - 1 else (current[0], 0)
)
if dest not in next_east and dest not in south:
next_south.add(dest)
else:
next_south.add(current)
return next_east, next_south
def run(inputs):
east = set()
south = set()
for i, line in enumerate(inputs.split(os.linesep)):
for j, char in enumerate(line):
if char == ">":
east.add((j, i))
elif char == "v":
south.add((j, i))
elif char != ".":
msg = f"Unexpected map character: {char}"
raise RuntimeError(msg)
width = j + 1
height = i + 1
n_steps = 0
while True:
next_east, next_south = step(east, south, width, height)
n_steps += 1
if len(next_east | east) == len(east) and len(next_south | south) == len(south):
return n_steps
else:
east, south = next_east, next_south
|
[
"jimhendy88@gmail.com"
] |
jimhendy88@gmail.com
|
6f0364bb09295c95f320d0a1705d65ce785226a2
|
d568222a97c64b6a98365188b45abda2a3a1a21a
|
/app/core/tests/test_admin.py
|
5caf249936c3414f6e54e62b76e79e63ef994f44
|
[
"MIT"
] |
permissive
|
nguyenlinh171/recipe-app-api
|
454b5f6889371bfb5997fcf43e5cb442ba718c98
|
339feb3df6aed6d424a19ca0748a49cd6ed173be
|
refs/heads/master
| 2022-12-18T18:21:11.310735
| 2020-09-12T22:37:53
| 2020-09-12T22:37:53
| 279,976,158
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,119
|
py
|
from django.test import TestCase, Client
"""Additionally import the test client which allows us to make test
requests to our application in our unit tests"""
from django.contrib.auth import get_user_model
from django.urls import reverse
"""reverse is a helper function which allows us to create url for our
admin page"""
class AdminSiteTests(TestCase):
def setUp(self):
""""Setup function is run before every test is run, sometimes there are
setups that need to be run before every test in our test case class
Setup consists of creating test client. We're gonna add a new user
that we can use to test, make sure the user is logged into our client,
create a regular user that is not authenticated or that we can use to
list in our admin page"""
self.client = Client()
"""Set to self a Client variable, accessed to other tests"""
self.admin_user = get_user_model().objects.create_superuser(
email='nguyenlinh171@gmail.com',
password='password123'
)
self.client.force_login(self.admin_user)
"""Log the admin user to the Client. Use the Client helper function
that allows you to log a user in with the Django authentication"""
self.user = get_user_model().objects.create_user(
email='lihn.n@yahoo.com',
password='test123',
name='Test user full name'
)
"""Make changes to the admin.py file to make sure it supports our custom
user model using email instead of username"""
def test_users_listed(self):
"""Test 1: Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
"""Generate a URL for our list user page. This url is defined in the
Django admin documentation listed in the resources. Reverse function
helps to update all changes at once"""
res = self.client.get(url)
"""response = use our test client to perform a http test on the url"""
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
"""The contains assertion checks our response contains certain items
it also check that the http response is http200"""
def test_user_change_page(self):
"""Test 2: Test that the user edit page works,
w/ status code = http200"""
url = reverse('admin:core_user_change', args=[self.user.id])
"""the reverse function will create an url like this
/admin/core/user/user.id, args = arguments, anything passing
to args will be assigned to the url"""
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test 3: Test that the create user page works"""
url = reverse('admin:core_user_add')
"""admin:core_user_add is the standard url, no need args
/admin/core/user/add"""
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
|
[
"54919030+nguyenlinh171@users.noreply.github.com"
] |
54919030+nguyenlinh171@users.noreply.github.com
|
8431d0865f52e94db4a1aa801d6483375ecccee0
|
ff05d487fa7a79e3045c139421ce9031b2aa7360
|
/effdet/evaluator.py
|
8a7222c1ea527e4546e0f47650c00a22faf68442
|
[
"Apache-2.0"
] |
permissive
|
phager90/efficientdet-pytorch
|
eb92e6d5d4eb6c367d23081ce6abd0b2d1fa0cf2
|
bbd84c0e7ec2a23c6ae7447c437789524ba141dd
|
refs/heads/master
| 2023-07-17T13:33:25.835335
| 2021-08-30T18:23:37
| 2021-08-30T18:23:37
| 291,003,268
| 0
| 0
|
Apache-2.0
| 2020-08-28T09:14:34
| 2020-08-28T09:14:34
| null |
UTF-8
|
Python
| false
| false
| 3,130
|
py
|
import torch
import torch.distributed as dist
import abc
import json
from .distributed import synchronize, is_main_process, all_gather_container
from pycocotools.cocoeval import COCOeval
class Evaluator:
def __init__(self):
pass
@abc.abstractmethod
def add_predictions(self, output, target):
pass
@abc.abstractmethod
def evaluate(self):
pass
class COCOEvaluator(Evaluator):
def __init__(self, coco_api, distributed=False):
super().__init__()
self.coco_api = coco_api
self.distributed = distributed
self.distributed_device = None
self.img_ids = []
self.predictions = []
def reset(self):
self.img_ids = []
self.predictions = []
def add_predictions(self, detections, target):
if self.distributed:
if self.distributed_device is None:
# cache for use later to broadcast end metric
self.distributed_device = detections.device
synchronize()
detections = all_gather_container(detections)
#target = all_gather_container(target)
sample_ids = all_gather_container(target['img_id'])
if not is_main_process():
return
else:
sample_ids = target['img_id']
detections = detections.cpu()
sample_ids = sample_ids.cpu()
for index, sample in enumerate(detections):
image_id = int(sample_ids[index])
for det in sample:
score = float(det[4])
if score < .001: # stop when below this threshold, scores in descending order
break
coco_det = dict(
image_id=image_id,
bbox=det[0:4].tolist(),
score=score,
category_id=int(det[5]))
self.img_ids.append(image_id)
self.predictions.append(coco_det)
def evaluate(self):
if not self.distributed or dist.get_rank() == 0:
assert len(self.predictions)
json.dump(self.predictions, open('./temp.json', 'w'), indent=4)
results = self.coco_api.loadRes('./temp.json')
coco_eval = COCOeval(self.coco_api, results, 'bbox')
coco_eval.params.imgIds = self.img_ids # score only ids we've used
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
metric = coco_eval.stats[0] # mAP 0.5-0.95
if self.distributed:
dist.broadcast(torch.tensor(metric, device=self.distributed_device), 0)
else:
metric = torch.tensor(0, device=self.distributed_device)
dist.broadcast(metric, 0)
metric = metric.item()
self.reset()
return metric
class FastMapEvalluator(Evaluator):
def __init__(self, distributed=False):
super().__init__()
self.distributed = distributed
self.predictions = []
def add_predictions(self, output, target):
pass
def evaluate(self):
pass
|
[
"rwightman@gmail.com"
] |
rwightman@gmail.com
|
062ed619a2a9785ee7c7bb6df7787b25c9ac2d5d
|
7c955fedadad8428f2dfe377e419784e89dc34cf
|
/lab2del1/Lab-2-del-1-grupp-27 1/incremental.py
|
58a8f9901c9478e0a6e14358bfabc7c271488edc
|
[] |
no_license
|
magdulator/d0012e
|
b5551708a27739375128bc7940bd5e1bf3a3f8f7
|
a6b8c27ed0a100f936a5a3f7121a3e9a383fd943
|
refs/heads/main
| 2023-01-31T22:47:46.779795
| 2020-12-15T11:59:55
| 2020-12-15T11:59:55
| 311,751,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
import random
import math
def generateList(maxNum, length):
randomList = []
for i in range(0, length):
n = random.randint(0,maxNum)
randomList.append(n)
return randomList
def incrementalAlgo(randomList):
print(randomList)
minList = [math.inf]*3
if len(randomList) < 3:
return "List too small"
for i in range(len(randomList)):
current = randomList[i]
if(current < minList[0]):
minList = [current, minList[0], minList[1]]
elif(current < minList[1]):
minList = [minList[0], current, minList[1]]
elif(current < minList[2]):
minList[2] = current
print (minList)
def main():
random = generateList(20, 10)
incrementalAlgo(random)
main()
# W(3) = 3
# W(n) = 3(n-3) = 3n - 9, n>3
# 3n -9 = O(n)
|
[
"marcus99.ma@gmail.com"
] |
marcus99.ma@gmail.com
|
c2c159837beaa43638ecdef24b0aa89e5d32595c
|
b4c405599e7ece55cd65647b2d08ec434890c0cc
|
/ValidAnagram0242.py
|
8423a294f21d9d23424e34cd46db0fca5269c3cf
|
[] |
no_license
|
Naveen1789/leetcode
|
0f5742c26ea74af3bfaaf95c2302471816fb3e7b
|
59facf3c3806db0405de957850b1266a3fd39f7c
|
refs/heads/master
| 2020-04-26T20:59:12.843796
| 2019-07-08T07:30:50
| 2019-07-08T07:30:50
| 173,828,509
| 0
| 0
| null | 2019-07-08T07:30:51
| 2019-03-04T22:02:47
|
Python
|
UTF-8
|
Python
| false
| false
| 198
|
py
|
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
return ''.join(sorted(s)) == ''.join(sorted(t))
|
[
"nnarayan@groupon.com"
] |
nnarayan@groupon.com
|
d68cade990aa93e0622fa323fa783729d6f2b902
|
3134101271dda48daab261d8719303a00212bb89
|
/nlu/sklearn_Classification/train.py
|
ab8086246ff589d165ad0e5ac93060e11b2ed07e
|
[] |
no_license
|
ykklin/KBQA-for-Diagnosis
|
e944f71264b7bd90c16776d6ec0a7f230fe381f9
|
8847ef445beb1000b1905b8fea57114ef8d56597
|
refs/heads/main
| 2023-05-28T07:20:20.836969
| 2021-06-07T05:39:58
| 2021-06-07T05:39:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,214
|
py
|
# -*- coding:utf-8 -*-
import os
import pickle
import random
import numpy as np
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics import f1_score
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
seed = 222
random.seed(seed)
np.random.seed(seed)
def load_data(data_path):
X,y = [],[]
with open(data_path,'r',encoding='utf8') as f:
for line in f.readlines():
text,label = line.strip().split(',')
text = ' '.join(list(text.lower()))
X.append(text)
y.append(label)
index = np.arange(len(X))
np.random.shuffle(index)
X = [X[i] for i in index]
y = [y[i] for i in index]
return X,y
def run(data_path,model_save_path):
X,y = load_data(data_path)
label_set = sorted(list(set(y)))
label2id = {label:idx for idx,label in enumerate(label_set)}
id2label = {idx:label for label,idx in label2id.items()}
y = [label2id[i] for i in y]
label_names = sorted(label2id.items(), key = lambda kv:kv[1], reverse=False)
target_names = [i[0] for i in label_names]
labels = [i[1] for i in label_names]
train_X, text_X, train_y, text_y = train_test_split(X, y, test_size=0.15, random_state=42)
vec = TfidfVectorizer(ngram_range=(1,3),min_df=0, max_df=0.9,analyzer='char',use_idf=1,smooth_idf=1, sublinear_tf=1)
train_X = vec.fit_transform(train_X)
text_X = vec.transform(text_X)
# svc_clf = svm.LinearSVC(tol=0.00001, C=6.0, multi_class='ovr',class_weight='balanced',random_state=122, max_iter=1500)
# -------------LR--------------
LR = LogisticRegression(C=8, dual=False,n_jobs=4,max_iter=400,multi_class='ovr',random_state=122)
LR.fit(train_X, train_y)
pred = LR.predict(text_X)
print(classification_report(text_y, pred,target_names=target_names))
print(confusion_matrix(text_y, pred,labels=labels))
# -------------gbdt--------------
gbdt = GradientBoostingClassifier(n_estimators=450, learning_rate=0.01,max_depth=8, random_state=24)
gbdt.fit(train_X, train_y)
pred = gbdt.predict(text_X)
print(classification_report(text_y, pred,target_names=target_names))
print(confusion_matrix(text_y, pred,labels=labels))
# -------------融合--------------
pred_prob1 = LR.predict_proba(text_X)
pred_prob2 = gbdt.predict_proba(text_X)
pred = np.argmax((pred_prob1+pred_prob2)/2, axis=1)
print(classification_report(text_y, pred,target_names=target_names))
print(confusion_matrix(text_y, pred,labels=labels))
pickle.dump(id2label,open(os.path.join(model_save_path,'id2label.pkl'),'wb'))
pickle.dump(vec,open(os.path.join(model_save_path,'vec.pkl'),'wb'))
pickle.dump(LR,open(os.path.join(model_save_path,'LR.pkl'),'wb'))
pickle.dump(gbdt,open(os.path.join(model_save_path,'gbdt.pkl'),'wb'))
if __name__ == '__main__':
run("./data/intent_recog_data.txt", "./model_file/")
|
[
"350625640@qq.com"
] |
350625640@qq.com
|
1a0fb8aa0fa8b64a69ac5a8040510b607facc284
|
f9128baa55e0b638567a8c1af17149a0278bdd68
|
/video/video.py
|
8a4391538a34f5efc626f0d3003a377eaebd9911
|
[] |
no_license
|
super1peng/spider
|
5d3be575f2d8faf3d7d09d5a2ee2e250c0a7d9d9
|
d437f880927a5ec5172904698252f818190c227b
|
refs/heads/master
| 2021-01-01T19:37:40.269731
| 2019-03-24T05:06:07
| 2019-03-24T05:06:07
| 98,631,916
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,521
|
py
|
#coding:utf-8
'''
抓取最新的电影排行榜榜单
url:http://dianying.2345.com/top/
使用 requests --bs4 线路
python 版本:2.7
OS: Mac os
'''
import requests
import bs4
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def get_html(url):
try:
r = requests.get(url, timeout = 30)
r.raise_for_status()
r.encoding = 'gbk'
return r.text
except:
return "Something Wrong !"
def get_content(url):
html = get_html(url)
soup = bs4.BeautifulSoup(html, 'lxml')
#找到电影排行榜的 ul 列表
movies_list = soup.find('ul', class_='picList clearfix')
movies = movies_list.find_all('li')
for top in movies:
#找到图片链接
img_url = top.find('img')['src']
name = top.find('span', class_='sTit').a.text
# 异常处理,防止没有上映时间的出现
try:
time = top.find('span', class_='sIntro').text
except:
time = "暂无上映时间"
#用bs4库迭代找出 “pACtor”的所有子孙节点,即每一位演员解决了名字分割问题
actors = top.find('p', class_='pActor')
actor = ''
for act in actors.contents:
actor = actor + act.string + ' '
#找到影片简介
intro = top.find('p', class_='pTxt pIntroShow').text
print("片名:{}\t{}\n{}\n{} \n \n ".format(name,time,actor,intro) )
#图片下载:
with open('/Users/lxp/spider/video/img/' + name +'.png','wb+') as f:
f.write(requests.get(img_url).content)
def main():
url = 'http://dianying.2345.com/top/'
get_content(url)
if __name__=="__main__":
main()
|
[
"lxp@lxpdeMacBook-Pro.local"
] |
lxp@lxpdeMacBook-Pro.local
|
cc4d76ca0b6ccea81ce0a0572fb374e11948d47b
|
220a35d689724475130a1e746d126cdf83561a82
|
/__init__.py
|
dc356837fd09d394c539e2b10e533164935217d2
|
[] |
no_license
|
argeweb/plugin-shop-point
|
5525e2fbb7c76c2a0be500956023fae555192634
|
21aa0cc62e2eb5c98000a3181221107a5b937b17
|
refs/heads/master
| 2021-01-24T18:26:18.297526
| 2017-12-31T00:02:39
| 2017-12-31T00:02:39
| 84,439,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,906
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created with YooLiang Technology (侑良科技).
# Author: Qi-Liang Wen (温啓良)
# Web: http://www.yooliang.com/
# Date: 2017/2/24.
import time
from argeweb import ViewDatastore
from argeweb.core.events import on
from .models.user_shop_point_model import UserShopPointModel
from .models.user_shop_point_order_model import UserShopPointOrderModel
from .models.user_shop_point_history_model import UserShopPointHistoryModel
from .models.user_shop_point_product_model import UserShopPointProductModel
@on('after_user_delete')
def after_user_delete(controller, key, *args, **kwargs):
data_list = UserShopPointModel.query(UserShopPointModel.user==key).fetch()
for data in data_list:
data_list_2 = UserShopPointHistoryModel.query(UserShopPointHistoryModel.shop_point_target==data.key).fetch()
for data_2 in data_list_2:
data_2.delete()
data.delete()
@on('buy_user_shop_point')
def buy_user_shop_point(controller, user, point_product_name, payment_type, callback_uri='', *args, **kwargs):
if payment_type.name == 'user_shop_point':
return None
product = UserShopPointProductModel.get_by_name(point_product_name)
if product is None or user is None:
return None
order = UserShopPointOrderModel.gen_order(product=product, payment_type=payment_type)
controller.fire(
event_name='create_payment',
title=u'購買 %s' % order.product_title,
detail=u'支付訂單 %s 使用 %s ' % (order.order_no, payment_type.title),
amount=order.need_pay_amount,
source=order,
source_params={'order_no': order.order_no, 'callback_uri': callback_uri, 'point': product.point},
source_callback_uri='user_shop_point:user_shop_point:after_pay_buy_point',
payment_type=payment_type,
user=user,
status='pending_payment',
)
return controller.payment_record
@on('after_order_checkout')
def after_order_checkout(controller, order_list, user, *args, **kwargs):
session = controller.session
shopping_cash = 0.0
if 'shop_point_use' in session:
shopping_cash = session['shop_point_use']
controller.logging.info(shopping_cash)
total_amount_for_all_order = 0.0
for order in order_list:
total_amount_for_all_order = total_amount_for_all_order + order.total_amount
user_point_item = UserShopPointModel.get_or_create(order.user.get())
ds = shopping_cash
n = 0
from models.config_model import ConfigModel
config = ConfigModel.get_config()
for order in order_list:
n += 1
p = order.total_amount / total_amount_for_all_order
s = shopping_cash * p // 1.0
if s > 0 and ds - s >= 0:
ds = ds - s
if len(order_list) == n and ds >= 0:
s = s + ds
controller.logging.info(s)
order.add_discount(u'購物金折抵', s)
order.total_discount_amount = s
order.currency_total_discount_amount = s
user_point_item.decrease_point(
order.total_discount_amount, u'由訂單 %s 扣除' % order.order_no,
order.order_no, order.total_amount)
user_point_item.put()
order.need_pay_amount = float(order.total_amount) - float(order.total_discount_amount)
if config.give_time == u'after_order_checkout':
user_point_item.increase_point(
order.total_amount * config.available_point / 100.0,
u'由訂單 %s 增加' % order.order_no,
order.order_no, order.total_amount
)
user_point_item.put()
session['shop_point_use'] = 0.0
return
@on('after_order_close')
def after_order_close(controller, *args, **kwargs):
# 訂單完成後
order = None
if 'order' in kwargs:
order = kwargs['order']
if order is None:
return
user_point_item = UserShopPointModel.get_or_create(order.user.get())
from models.config_model import ConfigModel
config = ConfigModel.get_config()
if config.give_time == u'after_order_close':
user_point_item.increase_point(
order.total_amount * config.available_point / 100.0,
u'由訂單(完成) %s 增加' % order.order_no,
order.order_no, order.total_amount
)
user_point_item.put()
ViewDatastore.register('shop_point', UserShopPointModel.get_or_create)
ViewDatastore.register('shop_point_history', UserShopPointHistoryModel.all_enable)
ViewDatastore.register('shop_point_product', UserShopPointProductModel.all_enable)
plugins_helper = {
'title': u'購物金',
'desc': u'擴展網站的購物金功能,用於購買後的贈送點數',
'controllers': {
'user_shop_point': {
'group': u'購物金',
'actions': [
{'action': 'list', 'name': u'購物金管理'},
{'action': 'edit', 'name': u'編輯購物金'},
{'action': 'view', 'name': u'檢視購物金'},
{'action': 'delete', 'name': u'刪除購物金'},
{'action': 'plugins_check', 'name': u'啟用停用模組'},
]
},
'user_shop_point_product': {
'group': u'購物金產品',
'actions': [
{'action': 'add', 'name': u'新增購物金產品'},
{'action': 'list', 'name': u'購物金產品管理'},
{'action': 'edit', 'name': u'編輯購物金產品'},
{'action': 'view', 'name': u'檢視購物金產品'},
{'action': 'delete', 'name': u'刪除購物金產品'},
]
},
'config': {
'group': u'購物金設定',
'actions': [
{'action': 'config', 'name': u'購物金設定'},
]
}
}
}
|
[
"cwen0708@gmail.com"
] |
cwen0708@gmail.com
|
253a1628bdd94f6cf6ccc6232414e9be938ae8f8
|
afd7207ec79198ed8b515c66a4ff951692fc5756
|
/Backend/classbase/apps.py
|
fa9aeea553b29740f8fad006f1087cfc90cc50a0
|
[] |
no_license
|
mdarifulislamroni21/Backend-project
|
469e58ee1c8395a56f45434efc238eccd2adea77
|
4a999c7cb520c811fb0a051015822944f5d8479d
|
refs/heads/master
| 2023-06-24T19:55:57.562157
| 2021-07-23T08:39:37
| 2021-07-23T08:39:37
| 388,731,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from django.apps import AppConfig
class ClassbaseConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'classbase'
|
[
"mdarifulislamroni21@gmail.com"
] |
mdarifulislamroni21@gmail.com
|
c64849e80bea851fe7f4e51272613346a3637d16
|
8e924c9309c6a6e18df9c20b7fba7bad3158365c
|
/ad_os_homework1/server.py
|
4c56bfa6d1821cc30eb494e3d8b961379e1c9034
|
[] |
no_license
|
LumingSun/Homeworks
|
e6fb96654caa21120e81f211b5268dbdf10e12b5
|
767a51ad91940ad80dd99debad70240bc3ef303a
|
refs/heads/master
| 2020-08-30T16:03:54.142283
| 2019-10-30T02:38:16
| 2019-10-30T02:38:16
| 218,428,308
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,958
|
py
|
from socket import *
def judge(player_1, player_2):
if(player_1 == player_2):
return 0
elif(player_1== 'J'and player_2== 'B'):
return 1
elif(player_1== 'C' and player_2 == 'J'):
return 1
elif(player_1== 'B' and player_2 == 'C'):
return 1
else:
return 2
def server():
HOST = ''
PORT = 10521
ADDR = (HOST, PORT)
server_socket = socket(AF_INET, SOCK_STREAM) #调用socket构造函数,AF_UNIX(Unix域,用于同一台机器上的进程间通讯),
# 也可以是AF_INET(对于IPV4协议的TCP和 UDP)套接字类型stream
server_socket.bind(ADDR) #将socket绑定到指定地址
server_socket.listen(2) #最大连接数1
print( 'Waiting for connecting ......')
tcpclientsocket, addr = server_socket.accept() #客户请求连接时,方法建立连接并返回服务器。 accept进入waiting状态#topclientsocket是新的socket对象,服务器与其通信
print('Connected by play1', addr)
info = '1'
tcpclientsocket.send(info.encode())
tcpclientsocket_2, addr_2 = server_socket.accept()
print('Connected by play2', addr_2)
info = '2'
tcpclientsocket.send(info.encode())
tcpclientsocket_2.send(info.encode()) #ready
play_1 = tcpclientsocket.recv(1024).decode()
play_2 = tcpclientsocket_2.recv(1024).decode()
print(play_1,play_2)
judgement = judge(play_1,play_2)
if(judgement == 0):
info = '平局'
tcpclientsocket.send(info.encode())
tcpclientsocket_2.send(info.encode())
elif(judgement == 1):
tcpclientsocket.send("Win!".encode())
tcpclientsocket_2.send("Lose out".encode())
else:
tcpclientsocket.send("Lose out".encode())
tcpclientsocket_2.send("Win!".encode())
server_socket.close()
while True:
server()
|
[
"sunluming@ruc.edu.cn"
] |
sunluming@ruc.edu.cn
|
582c3f6652991b01a81065cd669ecf1d36a67de6
|
a8cea6f25cadd904a13d83115d1500806f1d9e0d
|
/utils.py
|
418ecff098adfb3b5bf6ed06d8aa683edae5b383
|
[] |
no_license
|
VladRim/learnbot
|
c93fa10e58a902454a374d033288f9cf050a01e0
|
30181a42e50662d793d442acd9348bf6ce969af4
|
refs/heads/master
| 2023-02-16T11:49:44.534805
| 2022-12-08T14:47:54
| 2022-12-08T14:47:54
| 240,859,359
| 0
| 0
| null | 2023-02-07T23:14:50
| 2020-02-16T08:49:17
|
Python
|
UTF-8
|
Python
| false
| false
| 872
|
py
|
from random import choice
from telegram import ReplyKeyboardMarkup, KeyboardButton
from emoji import emojize
import settings
def get_user_emo(user_data):
if 'emo' in user_data:
return user_data['emo']
else:
user_data['emo'] = emojize(choice(settings.USER_EMOJI), use_aliases=True)
return user_data['emo']
def get_keyboard():
contact_button = KeyboardButton('Прислать контакты', request_contact=True)
location_button = KeyboardButton('Прислать координаты', request_location=True)
my_keyboard = ReplyKeyboardMarkup([['Прислать котика', 'Сменить аватарку'],
[contact_button, location_button]
], resize_keyboard=True
)
return my_keyboard
|
[
"vlad_rym@mail.ru"
] |
vlad_rym@mail.ru
|
2bc469b0e94182a6576d4291fb741e8d0a1aab08
|
19d313ce232eb91c625cb55ba38849440e2c8d94
|
/web/test_demo.py
|
4afe0a1dcf865097b10c3b37872ec30c7833a8d2
|
[] |
no_license
|
xyqiang/pydemo
|
57d9d6ee0a6ad181818c1abfa39fc003e093fab5
|
74776ca4bbf4a7e621b27874797d392708c68273
|
refs/heads/master
| 2022-12-04T23:41:27.932696
| 2020-08-27T15:51:17
| 2020-08-27T15:51:17
| 288,662,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 991
|
py
|
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
class TestBaidu():
def setup(self):
self.driver = webdriver.Chrome()
self.driver.maximize_window() #窗口最大化
# self.driver.implicitly_wait(3) #隐式等待
def teardown(self):
self.driver.quit()
def test_demo(self):
self.driver.get("https://www.baidu.com")
# sleep(1) #显示等待
self.driver.find_element_by_id("kw").click()
# sleep(1)
def wait(x):
# return self.driver.find_element_by_id("su")
# return False
WebDriverWait(self.driver,10).until(expected_conditions.element_to_be_clickable(By.ID,("su")))
self.driver.find_element_by_id("kw").send_keys("python")
# sleep(1)
self.driver.find_element_by_id("su").click()
|
[
"xuyanqiangak@gmail.com"
] |
xuyanqiangak@gmail.com
|
a1ccebc348e9f5fcadcd168ada5d25f7b5eac74c
|
f1b1f58e3186d75641c4368ce85fd296eabf34d1
|
/checkall.py
|
b119a4b3546f6cd3a9b569a3723c9b21cc9fb8e2
|
[] |
no_license
|
NCMohit/FingerPrintDB
|
fe6940cfc8b32948e59575cdce2d670302e326f4
|
0893a8038c54c9fb502ebdacc9ff19e7ba8720f1
|
refs/heads/master
| 2020-05-31T23:06:18.717820
| 2019-06-12T15:08:56
| 2019-06-12T15:08:56
| 190,532,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
#script to check all buyers and users
import requests
response = requests.post('http://127.0.0.1:5000/checkall', json="Cowbunga") #Change server IP
if response.ok:
print(response.json())
|
[
"noreply@github.com"
] |
NCMohit.noreply@github.com
|
ef15b221bdbd47dcbd54d248ca66b5df938b3512
|
37784d6cc1236061c5a20acff860ce23880d72a0
|
/RemoveCharacters.py
|
411399e833a9d453ac92dddac258344b04578341
|
[] |
no_license
|
ayyelle/CodeEval-Solutions
|
847f1aaa60d3529c07222a24de9e7caca21041dc
|
9ed426b4168af289629b65983bde8ff7490eb045
|
refs/heads/master
| 2021-05-04T10:59:22.820731
| 2017-02-05T09:17:36
| 2017-02-05T09:17:36
| 47,165,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
# I solved the Remove Characters challenge on @codeeval. http://www.codeeval.com/browse/13
import sys
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
splitArray = test.split(",");
word = splitArray[0].strip();
takeway = splitArray[1].strip();
for letter in takeway:
for otherLetter in word:
if (otherLetter == letter):
word= word.replace(otherLetter,"");
print(word);
test_cases.close()
|
[
"awyleong@interchange.ubc.ca"
] |
awyleong@interchange.ubc.ca
|
ed58c2f7d2768861e44c0d6759b99b24c66bbbcb
|
14d2e54652c884bbb7ce8ad00abe17af8305eb89
|
/basespider/htmldownloader.py
|
a297b5dc4eb168e72d8939f4ec76c30b9eacdd5e
|
[] |
no_license
|
oujx28/Spider_study
|
1a4d49283867cfd1756d21bfbded863e954375c4
|
68495ffcc7d90a1833c11307104d1b368ed3b8b6
|
refs/heads/master
| 2021-09-06T06:34:38.475200
| 2018-02-03T08:21:06
| 2018-02-03T08:21:06
| 118,708,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
# coding:utf-8
import requests
class HtmlDownLoader(object):
def download(self, url):
if url is None:
return None
user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0'
headers = {'User-Agent': user_agent}
r = requests.get(url=url, headers=headers)
if r.status_code == 200:
r.encoding = 'utf-8'
return r.text
return None
|
[
"oujx28@163.com"
] |
oujx28@163.com
|
03aad048fd326465dacdba68767b5185be0599e9
|
0a89c26b9ca935f184b4d204c49131878337b27e
|
/popular-movies.py
|
d391031d6a2379497d054c77466424a0d88c1f94
|
[] |
no_license
|
ajcse1/Movie-Recommendation-System
|
4d464cbaf405faa9dfdb80dc6347b1fcbd3927a5
|
05d4658ed4d570ac9945412b8fa6ae1f8cd42190
|
refs/heads/master
| 2021-01-11T15:19:22.616987
| 2017-01-29T06:17:09
| 2017-01-29T06:17:09
| 80,331,667
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local").setAppName("PopularMovies")
sc = SparkContext(conf = conf)
lines = sc.textFile("file:///SparkCourse/ml-100k/u.data")
movies = lines.map(lambda x: (int(x.split()[1]), 1))
movieCounts = movies.reduceByKey(lambda x, y: x + y)
flipped = movieCounts.map( lambda (x, y) : (y, x) )
sortedMovies = flipped.sortByKey()
results = sortedMovies.collect()
for result in results:
print result
|
[
"ajcse1@gmail.com"
] |
ajcse1@gmail.com
|
8f2f936f6beaf453053dadd039d75bb75430f225
|
c0792645c156cb9e20a1aa2b28c565150358bc6e
|
/apps/inmueble/serializers.py
|
d9166f0f6f0260446d36b4092b9aaf89ad6ba48b
|
[] |
no_license
|
clioo/Praver
|
b22fd92886e0399845adb4366663cae6a7d7853b
|
523f0d78e0a2039a5bae3e539c93e2c2415a0840
|
refs/heads/master
| 2020-03-11T12:38:54.272392
| 2018-06-28T18:24:21
| 2018-06-28T18:24:21
| 130,003,043
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,115
|
py
|
from rest_framework import serializers
from apps.inmueble.models import Inmueble,ImagenesInmbueble,Localidades
from django.contrib.auth.models import User
from apps.usuarios.models import Profile
class InmuebleSerializer(serializers.ModelSerializer):
imagenes = serializers.SerializerMethodField('obtenerImagenes')
descripciones = serializers.SerializerMethodField('obtenerDescripciones')
datosVendedor = serializers.SerializerMethodField('obtenerDatosVendedor')
class Meta:
model = Inmueble
fields = '__all__'
def obtenerImagenes(self,inmueble):
imagenes = ImagenesInmbueble.objects.filter(inmueble=inmueble)
serializer = ImagenesInmbuebleSerializer(imagenes,many=True,context=self.context)
if serializer.data:
return serializer.data
return None
def obtenerDescripciones(self, inmueble):
localidad = Localidades.objects.filter(id=inmueble.colonia)
serializer = DescripcionLocalidades(localidad,many=True,context=self.context)
return serializer.data
def obtenerDatosVendedor(self,inmueble):
perfil = Profile.objects.filter(user=inmueble.user.id)
serializer = ProfileSerializer(perfil,many=True,context=self.context)
return serializer.data
class ImagenesInmbuebleSerializer(serializers.ModelSerializer):
class Meta:
model = ImagenesInmbueble
fields = '__all__'
class LocalidadesSerializer(serializers.ModelSerializer):
class Meta:
model = Localidades
fields = '__all__'
class DescripcionLocalidades(serializers.Serializer):
d_ciudad = serializers.CharField(max_length=30)
d_asenta = serializers.CharField(max_length=30)
D_mnpio = serializers.CharField(max_length=30)
d_estado = serializers.CharField(max_length=30)
class ProfileSerializer(serializers.ModelSerializer):
email = serializers.SerializerMethodField('obtenerEmail')
class Meta:
model = Profile
fields= '__all__'
def obtenerEmail(request,user):
usuario = User.objects.get(id=user.user.id)
return usuario.email
|
[
"jesus_acosta1996@hotmail.com"
] |
jesus_acosta1996@hotmail.com
|
4fb36500fa15632ac15b7957bf643f29310c6c60
|
f6dbab4737f462d1126420fe28581c4584f8777b
|
/onsetDetectionFunctions.py
|
eca272ed03261f2141e385e563aa77c42318ba38
|
[] |
no_license
|
bradgowland/BirdEmporium
|
77570bad94f128143f3bf1d20132781fd713cc76
|
830df0e9dc20b25f652f67f7d6df3373397c84ae
|
refs/heads/master
| 2021-05-08T11:31:33.204777
| 2018-01-22T19:57:22
| 2018-01-22T19:57:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,321
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 11 23:36:03 2017
@author: DavidVanDusen
"""
import numpy as np
from basicFunctions import bufferSig,getSpectrogram
from scipy.stats.mstats import gmean
from scipy.signal import medfilt, butter, filtfilt
def plotSpectrogram(array,win_size,hop_size,fs):
#Read in file
lenInTime = len(array)/fs
#Length of spectrogram window
specLen = int(1+(win_size/2))
#Sample overlap
overlap = win_size - hop_size
#Break the signal into frames
buf=bufferSig(array, win_size,overlap)
#Take the fft of every frame
buf = np.fft.fft(buf,win_size,0)
#Cut them down to size
buf = np.abs(buf[0:specLen,:])
buf = 20*np.log10(buf, where=True)
F = np.linspace(0,fs/2,specLen)
T = np.linspace(0,lenInTime,buf.shape[1])
return buf,F,T
def localEnergy(array,win_size,hop_size,fs):
overlap = win_size - hop_size
buf = bufferSig(array,win_size,overlap)
localEnergies = np.zeros(buf.shape[1])
for i in range(0,buf.shape[1]):
localEnergies[i] = np.sum(np.square(buf[:,i]))
for i in range(0,len(localEnergies)):
if localEnergies[i] > 0:
localEnergies[i] = np.log10(localEnergies[i])
# localEnergies = np.log10(localEnergies, where=trueSpot)
localEnergies = np.diff(localEnergies)
localEnergies = np.append(np.mean(localEnergies),localEnergies)
localEnergies = localEnergies-np.min(localEnergies)
localEnergies = localEnergies/np.max(localEnergies)
le_fs = fs/hop_size
return localEnergies, le_fs
def spectralFlux(array,win_size,hop_size,fs):
spec,F,T = getSpectrogram(array,win_size,hop_size,fs)
specFlux = np.diff(spec)
specFlux = 0.5*(specFlux+np.abs(specFlux))
specFluxVals = np.sum(specFlux,axis=0)
specFluxVals = specFluxVals/specFluxVals.shape[0]
sf_fs = fs/hop_size
return specFluxVals,sf_fs
def findPeaks(signal):
signal = np.diff(signal)
#Returns indices of zero cross in diff
np.append([0],signal)
diffZC = np.where((np.diff(np.sign(signal))))[0]
diffZC+=1
#Only find upper peaks
diffZC = -1 * diffZC
diffZC = (diffZC + np.abs(diffZC))/2
return diffZC
def noveltyLPF(nov, fs, w_c):
w_c = 2*w_c/fs
[b, a] = butter(3,w_c,btype='low')
filtered_le = filtfilt(b,a,nov)
return filtered_le
def createThreshold(array,filtLen):
threshold = medfilt(array, filtLen)
return threshold
def spectralFlatness(spec):
numerator = gmean(spec,axis=0)
denom = np.mean(spec,axis=0)
output = numerator/denom
return output
def threshPeaks(le,thresh):
le_diff = np.diff(le)
#Returns indices of zero cross in diff
le_diff = np.append(0,le_diff)
#signal = np.sign(signal)
diffZC = np.diff(np.sign(le_diff))
#Only find upper peaks
diffZC = -1 * diffZC
diffZC = (diffZC + np.abs(diffZC))/2
output = np.where(diffZC)
output = np.asarray(output) + 1
# output = np.asarray(output)
values = le[output[:]]
threshAtPeaks = thresh[output[:]]
peak_diff = values-threshAtPeaks
threshAtPeaks = peak_diff + np.abs(peak_diff)
properPeaks = np.where(threshAtPeaks)
peaks = values[properPeaks[:]]
times = output[properPeaks[:]]
return peaks, times
|
[
"vandusen5000@gmail.com"
] |
vandusen5000@gmail.com
|
80216700b3417c217422132d2a882e988c1c97f7
|
db849509c78603e2b4820b6e27f0ffaa648b8e5a
|
/catkin_mapping_ws/build/turtlebot/turtlebot_bringup/catkin_generated/pkg.develspace.context.pc.py
|
8c35d2d54212acd7fbb209eab051e21a6e12bd84
|
[] |
no_license
|
AnkushKansal/GridBasedFASTSLAM
|
684550f9cea1e9c885fa1cceaea7721941ee8d47
|
94c472c4d8145d154dd2a69c6ee6188197037d9c
|
refs/heads/main
| 2023-04-16T17:44:40.219283
| 2021-04-13T18:18:33
| 2021-04-13T18:18:33
| 357,618,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot_bringup"
PROJECT_SPACE_DIR = "/home/workspace/catkin_mapping_ws/devel"
PROJECT_VERSION = "2.4.2"
|
[
"ankush.kansal19@gmail.com"
] |
ankush.kansal19@gmail.com
|
0d6d4e41a52a8b721e2cf46ea08845715b6135d5
|
2a394194f77dfd600913a811868df9e644a999fa
|
/meiduo_mall/meiduo_mall/apps/carts/utils.py
|
8d62d6c1b479bb67b00d3f6a76a60e3e5fc616e4
|
[] |
no_license
|
18882028307/meiduo_project
|
01a66053e6c74457377e6f34ff9c17ed251b32a3
|
2814979eb1f5280ad5d37a5f7188506894d7c8c8
|
refs/heads/master
| 2020-08-16T00:35:04.745494
| 2019-11-01T10:27:33
| 2019-11-01T10:27:33
| 215,431,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,698
|
py
|
import base64
import pickle
from django_redis import get_redis_connection
def merge_cart_cookie_to_redis(request, user, response):
"""
合并购物车,cookie保存到redis中
:return:
"""
# 从cookie中取出购物车数据
cart_str = request.COOKIES.get('cart')
if not cart_str:
return response
cookie_cart = pickle.loads(base64.b64decode(cart_str.encode()))
# {
# sku_id: {
# # 数量
# "count": xxx,
# # 是否勾选
# "selected": True
# },
# sku_id: {
# "count": xxx,
# "selected": False
# },
# }
# 从redis中取出购物车数据
redis_conn = get_redis_connection('cart')
cart_redis = redis_conn.hgetall('cart_%s' % user.id)
# 把redis取出的字典的键值对数据类型 转换为int
cart = {}
for sku_id, count in cart_redis.items():
cart[int(sku_id)] = int(count)
# {
# sku_id: count,
# sku_id: count
# }
selected_sku_id_list = []
for sku_id, selected_count_dict in cookie_cart.items():
# 如果redis购物车中原有商品数据,数量覆盖,如果没有,新添记录
cart[sku_id] = selected_count_dict['count']
# 处理勾选状态,
if selected_count_dict['selected']:
selected_sku_id_list.append(sku_id)
# 将cookie的购物车合并到redis中
pl = redis_conn.pipeline()
pl.hmset('cart_%s' % user.id, cart)
pl.sadd('cart_selected_%s' % user.id, *selected_sku_id_list)
pl.execute()
# 清除cookie中的购物车数据
response.delete_cookie('cart')
return response
|
[
"710363437@qq.com"
] |
710363437@qq.com
|
2d16a9054e22480fa489ec2ed89ab54a86f8c85c
|
60aeff792a1cd3a476a34ca333b8bab2715d787d
|
/simple_audio_recognition/generate_streaming_test_wav.py
|
a2ed95edc054a3922f2883ebb287aafb7396e697
|
[] |
no_license
|
ziippy/tensorflow-speech-recognition-challenge
|
9e377b17657cbce9f3d5326e860d12bf0b326e07
|
3304b44c5ae1c7af5f161af6fbd3ba64699974c3
|
refs/heads/master
| 2023-01-24T08:46:36.466238
| 2020-12-04T03:31:43
| 2020-12-04T03:31:43
| 112,940,339
| 1
| 0
| null | 2017-12-04T14:34:15
| 2017-12-03T15:39:37
|
Python
|
UTF-8
|
Python
| false
| false
| 10,932
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Saves out a .wav file with synthesized conversational data and labels.
The best way to estimate the real-world performance of an audio recognition
model is by running it against a continuous stream of data, the way that it
would be used in an application. Training evaluations are only run against
discrete individual samples, so the results aren't as realistic.
To make it easy to run evaluations against audio streams, this script uses
samples from the testing partition of the data set, mixes them in at random
positions together with background noise, and saves out the result as one long
audio file.
Here's an example of generating a test file:
bazel run tensorflow/examples/speech_commands:generate_streaming_test_wav -- \
--data_dir=/tmp/my_wavs --background_dir=/tmp/my_backgrounds \
--background_volume=0.1 --test_duration_seconds=600 \
--output_audio_file=/tmp/streaming_test.wav \
--output_labels_file=/tmp/streaming_test_labels.txt
Once you've created a streaming audio file, you can then use the
test_streaming_accuracy tool to calculate accuracy metrics for a model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import math
import sys
import numpy as np
import tensorflow as tf
import input_data
import models
FLAGS = None
def mix_in_audio_sample(track_data, track_offset, sample_data, sample_offset,
clip_duration, sample_volume, ramp_in, ramp_out):
"""Mixes the sample data into the main track at the specified offset.
Args:
track_data: Numpy array holding main audio data. Modified in-place.
track_offset: Where to mix the sample into the main track.
sample_data: Numpy array of audio data to mix into the main track.
sample_offset: Where to start in the audio sample.
clip_duration: How long the sample segment is.
sample_volume: Loudness to mix the sample in at.
ramp_in: Length in samples of volume increase stage.
ramp_out: Length in samples of volume decrease stage.
"""
ramp_out_index = clip_duration - ramp_out
track_end = min(track_offset + clip_duration, track_data.shape[0])
track_end = min(track_end,
track_offset + (sample_data.shape[0] - sample_offset))
sample_range = track_end - track_offset
for i in range(sample_range):
if i < ramp_in:
envelope_scale = i / ramp_in
elif i > ramp_out_index:
envelope_scale = (clip_duration - i) / ramp_out
else:
envelope_scale = 1
sample_input = sample_data[sample_offset + i]
track_data[track_offset
+ i] += sample_input * envelope_scale * sample_volume
def main(_):
words_list = input_data.prepare_words_list(FLAGS.wanted_words.split(','))
model_settings = models.prepare_model_settings(
len(words_list), FLAGS.sample_rate, FLAGS.clip_duration_ms,
FLAGS.window_size_ms, FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)
audio_processor = input_data.AudioProcessor(
'', FLAGS.data_dir, FLAGS.silence_percentage, 10,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
FLAGS.testing_percentage, model_settings)
output_audio_sample_count = FLAGS.sample_rate * FLAGS.test_duration_seconds
output_audio = np.zeros((output_audio_sample_count,), dtype=np.float32)
# Set up background audio.
background_crossover_ms = 500
background_segment_duration_ms = (
FLAGS.clip_duration_ms + background_crossover_ms)
background_segment_duration_samples = int(
(background_segment_duration_ms * FLAGS.sample_rate) / 1000)
background_segment_stride_samples = int(
(FLAGS.clip_duration_ms * FLAGS.sample_rate) / 1000)
background_ramp_samples = int(
((background_crossover_ms / 2) * FLAGS.sample_rate) / 1000)
# Mix the background audio into the main track.
how_many_backgrounds = int(
math.ceil(output_audio_sample_count / background_segment_stride_samples))
for i in range(how_many_backgrounds):
output_offset = int(i * background_segment_stride_samples)
background_index = np.random.randint(len(audio_processor.background_data))
background_samples = audio_processor.background_data[background_index]
background_offset = np.random.randint(
0, len(background_samples) - model_settings['desired_samples'])
background_volume = np.random.uniform(0, FLAGS.background_volume)
mix_in_audio_sample(output_audio, output_offset, background_samples,
background_offset, background_segment_duration_samples,
background_volume, background_ramp_samples,
background_ramp_samples)
# Mix the words into the main track, noting their labels and positions.
output_labels = []
word_stride_ms = FLAGS.clip_duration_ms + FLAGS.word_gap_ms
word_stride_samples = int((word_stride_ms * FLAGS.sample_rate) / 1000)
clip_duration_samples = int(
(FLAGS.clip_duration_ms * FLAGS.sample_rate) / 1000)
word_gap_samples = int((FLAGS.word_gap_ms * FLAGS.sample_rate) / 1000)
how_many_words = int(
math.floor(output_audio_sample_count / word_stride_samples))
all_test_data, all_test_labels = audio_processor.get_unprocessed_data(
-1, model_settings, 'testing')
for i in range(how_many_words):
output_offset = (
int(i * word_stride_samples) + np.random.randint(word_gap_samples))
output_offset_ms = (output_offset * 1000) / FLAGS.sample_rate
is_unknown = np.random.randint(100) < FLAGS.unknown_percentage
if is_unknown:
wanted_label = input_data.UNKNOWN_WORD_LABEL
else:
wanted_label = words_list[2 + np.random.randint(len(words_list) - 2)]
test_data_start = np.random.randint(len(all_test_data))
found_sample_data = None
index_lookup = np.arange(len(all_test_data), dtype=np.int32)
np.random.shuffle(index_lookup)
for test_data_offset in range(len(all_test_data)):
test_data_index = index_lookup[(
test_data_start + test_data_offset) % len(all_test_data)]
current_label = all_test_labels[test_data_index]
if current_label == wanted_label:
found_sample_data = all_test_data[test_data_index]
break
mix_in_audio_sample(output_audio, output_offset, found_sample_data, 0,
clip_duration_samples, 1.0, 500, 500)
output_labels.append({'label': wanted_label, 'time': output_offset_ms})
input_data.save_wav_file(FLAGS.output_audio_file, output_audio,
FLAGS.sample_rate)
tf.logging.info('Saved streaming test wav to %s', FLAGS.output_audio_file)
with open(FLAGS.output_labels_file, 'w') as f:
for output_label in output_labels:
f.write('%s, %f\n' % (output_label['label'], output_label['time']))
tf.logging.info('Saved streaming test labels to %s', FLAGS.output_labels_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_url',
type=str,
# pylint: disable=line-too-long
default='http://download.tensorflow.org/data/speech_commands_v0.01.tar.gz',
# pylint: enable=line-too-long
help='Location of speech training data')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/speech_dataset',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--background_dir',
type=str,
default='',
help="""\
Path to a directory of .wav files to mix in as background noise during training.
""")
parser.add_argument(
'--background_volume',
type=float,
default=0.1,
help="""\
How loud the background noise should be, between 0 and 1.
""")
parser.add_argument(
'--background_frequency',
type=float,
default=0.8,
help="""\
How many of the training samples have background noise mixed in.
""")
parser.add_argument(
'--silence_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be silence.
""")
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a test set.')
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a validation set.')
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs.',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs.',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How long the stride is between spectrogram timeslices',)
parser.add_argument(
'--dct_coefficient_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',)
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--output_audio_file',
type=str,
default='/tmp/speech_commands_train/streaming_test.wav',
help='File to save the generated test audio to.')
parser.add_argument(
'--output_labels_file',
type=str,
default='/tmp/speech_commands_train/streaming_test_labels.txt',
help='File to save the generated test labels to.')
parser.add_argument(
'--test_duration_seconds',
type=int,
default=600,
help='How long the generated test audio file should be.',)
parser.add_argument(
'--word_gap_ms',
type=int,
default=2000,
help='How long the average gap should be between words.',)
parser.add_argument(
'--unknown_percentage',
type=int,
default=30,
help='What percentage of words should be unknown.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
[
"ziippy@naver.com"
] |
ziippy@naver.com
|
123cd95d903ac9a0e5514b4f4aea512078990c6b
|
b697e6ba57ae5b70c81762ee9c7b76158d998d4b
|
/functional_tests/test_my_lists.py
|
658a09345200c9e1274a913ce2ff5c3946ff0404
|
[] |
no_license
|
jhagege/book_example
|
419b64acbf7d1ef57bbff7a05290fbadedbb39f5
|
4eb88bf822715b031538a72cc08ea6bb54f7c695
|
refs/heads/master
| 2023-05-31T11:26:18.542916
| 2019-08-12T21:15:32
| 2019-08-12T21:15:32
| 198,066,799
| 0
| 0
| null | 2021-06-10T21:44:46
| 2019-07-21T14:30:11
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,816
|
py
|
from django.conf import settings
from django.contrib.auth import BACKEND_SESSION_KEY, SESSION_KEY, get_user_model
from django.contrib.sessions.backends.db import SessionStore
from functional_tests.management.commands.create_session import create_pre_authenticated_session
from functional_tests.server_tools import create_session_on_server
from .base import FunctionalTest
User = get_user_model()
class MyListsTest(FunctionalTest):
def create_pre_authenticated_session(self, email):
if self.staging_server:
session_key = create_session_on_server(self.staging_server, email)
else:
session_key = create_pre_authenticated_session(email)
## to set a cookie we need to first visit the domain.
## 404 pages load the quickest!
self.browser.get(self.live_server_url + "/404_no_such_url/")
self.browser.add_cookie(dict(
name=settings.SESSION_COOKIE_NAME,
value=session_key,
path='/',
))
def test_logged_in_users_lists_are_saved_as_my_lists(self):
# Edith is a logged-in user
self.create_pre_authenticated_session('edith@example.com')
# She goes to the home page and starts a list
self.browser.get(self.live_server_url)
self.add_list_item('Reticulate splines')
self.add_list_item('Immanentize eschaton')
first_list_url = self.browser.current_url
# She notices a "My lists" link, for the first time.
self.browser.find_element_by_link_text('My lists').click()
# She sees that her list is in there, named according to its
# first list item
self.wait_for(
lambda: self.browser.find_element_by_link_text('Reticulate splines')
)
self.browser.find_element_by_link_text('Reticulate splines').click()
self.wait_for(
lambda: self.assertEqual(self.browser.current_url, first_list_url)
)
# She decides to start another list, just to see
self.browser.get(self.live_server_url)
self.add_list_item('Click cows')
second_list_url = self.browser.current_url
# Under "my lists", her new list appears
self.browser.find_element_by_link_text('My lists').click()
self.wait_for(
lambda: self.browser.find_element_by_link_text('Click cows')
)
self.browser.find_element_by_link_text('Click cows').click()
self.wait_for(
lambda: self.assertEqual(self.browser.current_url, second_list_url)
)
# She logs out. The "My lists" option disappears
self.browser.find_element_by_link_text('Log out').click()
self.wait_for(lambda: self.assertEqual(
self.browser.find_elements_by_link_text('My lists'),
[]
))
|
[
"joachim.hagege@alibaba-inc.com"
] |
joachim.hagege@alibaba-inc.com
|
a57730a7979dc3c57c74c59a29356e7e916dbdc5
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_07_01/models/ipsec_policy_py3.py
|
ada123aa21f28ec5aeaf0746d0048305d73420cc
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,388
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IpsecPolicy(Model):
"""An IPSec Policy configuration for a virtual network gateway connection.
All required parameters must be populated in order to send to Azure.
:param sa_life_time_seconds: Required. The IPSec Security Association
(also called Quick Mode or Phase 2 SA) lifetime in seconds for a site to
site VPN tunnel.
:type sa_life_time_seconds: int
:param sa_data_size_kilobytes: Required. The IPSec Security Association
(also called Quick Mode or Phase 2 SA) payload size in KB for a site to
site VPN tunnel.
:type sa_data_size_kilobytes: int
:param ipsec_encryption: Required. The IPSec encryption algorithm (IKE
phase 1). Possible values include: 'None', 'DES', 'DES3', 'AES128',
'AES192', 'AES256', 'GCMAES128', 'GCMAES192', 'GCMAES256'
:type ipsec_encryption: str or
~azure.mgmt.network.v2018_07_01.models.IpsecEncryption
:param ipsec_integrity: Required. The IPSec integrity algorithm (IKE phase
1). Possible values include: 'MD5', 'SHA1', 'SHA256', 'GCMAES128',
'GCMAES192', 'GCMAES256'
:type ipsec_integrity: str or
~azure.mgmt.network.v2018_07_01.models.IpsecIntegrity
:param ike_encryption: Required. The IKE encryption algorithm (IKE phase
2). Possible values include: 'DES', 'DES3', 'AES128', 'AES192', 'AES256',
'GCMAES256', 'GCMAES128'
:type ike_encryption: str or
~azure.mgmt.network.v2018_07_01.models.IkeEncryption
:param ike_integrity: Required. The IKE integrity algorithm (IKE phase 2).
Possible values include: 'MD5', 'SHA1', 'SHA256', 'SHA384', 'GCMAES256',
'GCMAES128'
:type ike_integrity: str or
~azure.mgmt.network.v2018_07_01.models.IkeIntegrity
:param dh_group: Required. The DH Groups used in IKE Phase 1 for initial
SA. Possible values include: 'None', 'DHGroup1', 'DHGroup2', 'DHGroup14',
'DHGroup2048', 'ECP256', 'ECP384', 'DHGroup24'
:type dh_group: str or ~azure.mgmt.network.v2018_07_01.models.DhGroup
:param pfs_group: Required. The Pfs Groups used in IKE Phase 2 for new
child SA. Possible values include: 'None', 'PFS1', 'PFS2', 'PFS2048',
'ECP256', 'ECP384', 'PFS24', 'PFS14', 'PFSMM'
:type pfs_group: str or ~azure.mgmt.network.v2018_07_01.models.PfsGroup
"""
_validation = {
'sa_life_time_seconds': {'required': True},
'sa_data_size_kilobytes': {'required': True},
'ipsec_encryption': {'required': True},
'ipsec_integrity': {'required': True},
'ike_encryption': {'required': True},
'ike_integrity': {'required': True},
'dh_group': {'required': True},
'pfs_group': {'required': True},
}
_attribute_map = {
'sa_life_time_seconds': {'key': 'saLifeTimeSeconds', 'type': 'int'},
'sa_data_size_kilobytes': {'key': 'saDataSizeKilobytes', 'type': 'int'},
'ipsec_encryption': {'key': 'ipsecEncryption', 'type': 'str'},
'ipsec_integrity': {'key': 'ipsecIntegrity', 'type': 'str'},
'ike_encryption': {'key': 'ikeEncryption', 'type': 'str'},
'ike_integrity': {'key': 'ikeIntegrity', 'type': 'str'},
'dh_group': {'key': 'dhGroup', 'type': 'str'},
'pfs_group': {'key': 'pfsGroup', 'type': 'str'},
}
def __init__(self, *, sa_life_time_seconds: int, sa_data_size_kilobytes: int, ipsec_encryption, ipsec_integrity, ike_encryption, ike_integrity, dh_group, pfs_group, **kwargs) -> None:
super(IpsecPolicy, self).__init__(**kwargs)
self.sa_life_time_seconds = sa_life_time_seconds
self.sa_data_size_kilobytes = sa_data_size_kilobytes
self.ipsec_encryption = ipsec_encryption
self.ipsec_integrity = ipsec_integrity
self.ike_encryption = ike_encryption
self.ike_integrity = ike_integrity
self.dh_group = dh_group
self.pfs_group = pfs_group
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
701c49eedae3939ba4f75ef2a8241253462a7a1d
|
f816ab1b508ebcc6f858e3fd15377bf7bdb97e5e
|
/kolstatapp/views/ajax.py
|
d9b7442ad746e492f379272640e8b80867cc5672
|
[] |
no_license
|
xneby/kolstat
|
4219da46cc1a5784b97ed50fb69572d292686a8e
|
877ffcf5a0c25f0e7c87ad16617a66c7cec2a19b
|
refs/heads/master
| 2020-05-02T12:35:13.096115
| 2013-05-27T18:29:32
| 2013-05-27T18:29:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
import json
from kolstatapp.models import Station, UserFavouriteStation as UFS
from kolstatapp.decorators import ajax
from kolstatapp.exceptions import BadRequest
@ajax
def ajax_station(request):
for field in ('query','type'):
if field not in request.GET:
raise BadRequest()
name = request.GET['query']
type = request.GET['type']
if len(name) < 3 and not (name.isdigit() and type == 'kurs90'):
return []
if type=='name':
return [st.name for st in Station.objects.filter(name__startswith = name)]
if type=='kurs90':
try:
return [Station.objects.get(kurs90ID = int(name)).name]
except Station.DoesNotExist:
return []
return []
@ajax
def ajax_favourites(request):
if not request.user.is_authenticated():
return dict()
return { pic.name: st.get_pretty_name() for pic, st in request.user.get_profile().get_favourites() }
|
[
"karol.farbis@gmail.com"
] |
karol.farbis@gmail.com
|
205855857c122e67c0d6e0db7df6b9f1e94ec102
|
fdbfb2d7302fee1388a8facc2938d1e4259f1615
|
/tensorLayerTest/tensorlayer/rl/test1.py
|
98ec6a789b17cfc895fd55d210d6b6686f6d63ef
|
[] |
no_license
|
kflyddn/tusharetest
|
e261c498f06897e6914c20681941b60f05cc0107
|
a05919a5b41288f97a0b73bb5cec97400daa959b
|
refs/heads/master
| 2020-05-07T14:53:05.957456
| 2018-06-26T15:58:20
| 2018-06-26T15:58:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,567
|
py
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""Example of Synced sequence input and output.
This is a reimpmentation of the TensorFlow official PTB example in :
tensorflow/models/rnn/ptb
The batch_size can be seem as how many concurrent computations.\n
As the following example shows, the first batch learn the sequence information by using 0 to 9.\n
The second batch learn the sequence information by using 10 to 19.\n
So it ignores the information from 9 to 10 !\n
If only if we set the batch_size = 1, it will consider all information from 0 to 20.\n
The meaning of batch_size here is not the same with the MNIST example. In MNIST example,
batch_size reflects how many examples we consider in each iteration, while in
PTB example, batch_size is how many concurrent processes (segments)
for speed up computation.
Some Information will be ignored if batch_size > 1, however, if your dataset
is "long" enough (a text corpus usually has billions words), the ignored
information would not effect the final result.
In PTB tutorial, we setted batch_size = 20, so we cut the dataset into 20 segments.
At the begining of each epoch, we initialize (reset) the 20 RNN states for 20
segments, then go through 20 segments separately.
The training data will be generated as follow:\n
>>> train_data = [i for i in range(20)]
>>> for batch in tl.iterate.ptb_iterator(train_data, batch_size=2, num_steps=3):
>>> x, y = batch
>>> print(x, '\n',y)
... [[ 0 1 2] <---x 1st subset/ iteration
... [10 11 12]]
... [[ 1 2 3] <---y
... [11 12 13]]
...
... [[ 3 4 5] <--- 1st batch input 2nd subset/ iteration
... [13 14 15]] <--- 2nd batch input
... [[ 4 5 6] <--- 1st batch target
... [14 15 16]] <--- 2nd batch target
...
... [[ 6 7 8] 3rd subset/ iteration
... [16 17 18]]
... [[ 7 8 9]
... [17 18 19]]
Hao Dong: This example can also be considered as pre-training of the word
embedding matrix.
About RNN
----------
$ Karpathy Blog : http://karpathy.github.io/2015/05/21/rnn-effectiveness/
More TensorFlow official RNN examples can be found here
---------------------------------------------------------
$ RNN for PTB : https://www.tensorflow.org/versions/master/tutorials/recurrent/index.html#recurrent-neural-networks
$ Seq2seq : https://www.tensorflow.org/versions/master/tutorials/seq2seq/index.html#sequence-to-sequence-models
$ translation : tensorflow/models/rnn/translate
tensorflow (0.9.0)
Example / benchmark for building a PTB LSTM model.
Trains the model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
There are 3 supported model configurations:
===========================================
| config | epochs | train | valid | test
===========================================
| small | 13 | 37.99 | 121.39 | 115.91
| medium | 39 | 48.45 | 86.16 | 82.07
| large | 55 | 37.87 | 82.62 | 78.29
The exact results may vary depending on the random initialization.
The hyperparameters used in the model:
- init_scale - the initial scale of the weights
- learning_rate - the initial value of the learning rate
- max_grad_norm - the maximum permissible norm of the gradient
- num_layers - the number of LSTM layers
- num_steps - the number of unrolled steps of LSTM
- hidden_size - the number of LSTM units
- max_epoch - the number of epochs trained with the initial learning rate
- max_max_epoch - the total number of epochs for training
- keep_prob - the probability of keeping weights in the dropout layer
- lr_decay - the decay of the learning rate for each epoch after "max_epoch"
- batch_size - the batch size
The data required for this example is in the data/ dir of the
PTB dataset from Tomas Mikolov's webpage:
$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
$ tar xvf simple-examples.tgz
A) use the zero_state function on the cell object
B) for an rnn, all time steps share weights. We use one matrix to keep all
gate weights. Split by column into 4 parts to get the 4 gate weight matrices.
"""
import time
import numpy as np
import tensorflow as tf
import tensorlayer as tl
flags = tf.flags
flags.DEFINE_string("model", "small", "A type of model. Possible options are: small, medium, large.")
FLAGS = flags.FLAGS
def main(_):
"""
The core of the model consists of an LSTM cell that processes one word at
a time and computes probabilities of the possible continuations of the
sentence. The memory state of the network is initialized with a vector
of zeros and gets updated after reading each word. Also, for computational
reasons, we will process data in mini-batches of size batch_size.
"""
if FLAGS.model == "small":
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
elif FLAGS.model == "medium":
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
# num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 10000
elif FLAGS.model == "large":
init_scale = 0.04
learning_rate = 1.0
max_grad_norm = 10
# num_layers = 2
num_steps = 35
hidden_size = 1500
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = 10000
else:
raise ValueError("Invalid model: %s", FLAGS.model)
# Load PTB dataset
train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()
# train_data = train_data[0:int(100000/5)] # for fast testing
print('len(train_data) {}'.format(len(train_data))) # 929589 a list of int
print('len(valid_data) {}'.format(len(valid_data))) # 73760 a list of int
print('len(test_data) {}'.format(len(test_data))) # 82430 a list of int
print('vocab_size {}'.format(vocab_size)) # 10000
sess = tf.InteractiveSession()
# One int represents one word, the meaning of batch_size here is not the
# same with MNIST example, it is the number of concurrent processes for
# computational reasons.
# Training and Validation
input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
targets = tf.placeholder(tf.int32, [batch_size, num_steps])
# Testing (Evaluation)
input_data_test = tf.placeholder(tf.int32, [1, 1])
targets_test = tf.placeholder(tf.int32, [1, 1])
def inference(x, is_training, num_steps, reuse=None):
"""If reuse is True, the inferences use the existing parameters,
then different inferences share the same parameters.
Note :
- For DynamicRNNLayer, you can set dropout and the number of RNN layer internally.
"""
print("\nnum_steps : %d, is_training : %s, reuse : %s" % (num_steps, is_training, reuse))
initializer = tf.random_uniform_initializer(-init_scale, init_scale)
with tf.variable_scope("model", reuse=reuse):
network = tl.layers.EmbeddingInputlayer(inputs=x, vocabulary_size=vocab_size, embedding_size=hidden_size, E_init=initializer, name='embedding')
network = tl.layers.DropoutLayer(network, keep=keep_prob, is_fix=True, is_train=is_training, name='drop1')
network = tl.layers.RNNLayer(
network,
cell_fn=tf.contrib.rnn.BasicLSTMCell, #tf.nn.rnn_cell.BasicLSTMCell,
cell_init_args={
'forget_bias': 0.0,
'state_is_tuple': True
},
n_hidden=hidden_size,
initializer=initializer,
n_steps=num_steps,
return_last=False,
name='basic_lstm1')
lstm1 = network
network = tl.layers.DropoutLayer(network, keep=keep_prob, is_fix=True, is_train=is_training, name='drop2')
network = tl.layers.RNNLayer(
network,
cell_fn=tf.contrib.rnn.BasicLSTMCell, #tf.nn.rnn_cell.BasicLSTMCell,
cell_init_args={
'forget_bias': 0.0,
'state_is_tuple': True
},
n_hidden=hidden_size,
initializer=initializer,
n_steps=num_steps,
return_last=False,
return_seq_2d=True,
name='basic_lstm2')
lstm2 = network
# Alternatively, if return_seq_2d=False, in the above RNN layer,
# you can reshape the outputs as follow:
# network = tl.layers.ReshapeLayer(network,
# shape=[-1, int(network.outputs._shape[-1])], name='reshape')
network = tl.layers.DropoutLayer(network, keep=keep_prob, is_fix=True, is_train=is_training, name='drop3')
network = tl.layers.DenseLayer(network, n_units=vocab_size, W_init=initializer, b_init=initializer, act=tf.identity, name='output')
return network, lstm1, lstm2
# Inference for Training
network, lstm1, lstm2 = inference(input_data, is_training=True, num_steps=num_steps, reuse=None)
# Inference for Validating
network_val, lstm1_val, lstm2_val = inference(input_data, is_training=False, num_steps=num_steps, reuse=True)
# Inference for Testing (Evaluation)
network_test, lstm1_test, lstm2_test = inference(input_data_test, is_training=False, num_steps=1, reuse=True)
# sess.run(tf.initialize_all_variables())
tl.layers.initialize_global_variables(sess)
def loss_fn(outputs, targets, batch_size):
# See tl.cost.cross_entropy_seq()
# Returns the cost function of Cross-entropy of two sequences, implement
# softmax internally.
# outputs : 2D tensor [batch_size*num_steps, n_units of output layer]
# targets : 2D tensor [batch_size, num_steps], need to be reshaped.
# batch_size : RNN batch_size, number of concurrent processes.
# n_examples = batch_size * num_steps
# so
# cost is the averaged cost of each mini-batch (concurrent process).
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example( # loss = tf.nn.seq2seq.sequence_loss_by_example( # TF0.12
[outputs], [tf.reshape(targets, [-1])], [tf.ones_like(tf.reshape(targets, [-1]), dtype=tf.float32)])
# [tf.ones([batch_size * num_steps])])
cost = tf.reduce_sum(loss) / batch_size
return cost
# Cost for Training
cost = loss_fn(network.outputs, targets, batch_size)
# Cost for Validating
cost_val = loss_fn(network_val.outputs, targets, batch_size)
# Cost for Testing (Evaluation)
cost_test = loss_fn(network_test.outputs, targets_test, 1)
# Truncated Backpropagation for training
with tf.variable_scope('learning_rate'):
lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(lr)
train_op = optimizer.apply_gradients(zip(grads, tvars))
# sess.run(tf.initialize_all_variables())
tl.layers.initialize_global_variables(sess)
network.print_params()
network.print_layers()
tl.layers.print_all_variables()
print("\nStart learning a language model by using PTB dataset")
for i in range(max_max_epoch):
# decreases the initial learning rate after several
# epoachs (defined by ``max_epoch``), by multipling a ``lr_decay``.
new_lr_decay = lr_decay**max(i - max_epoch, 0.0)
sess.run(tf.assign(lr, learning_rate * new_lr_decay))
# Training
print("Epoch: %d/%d Learning rate: %.3f" % (i + 1, max_max_epoch, sess.run(lr)))
epoch_size = ((len(train_data) // batch_size) - 1) // num_steps
start_time = time.time()
costs = 0.0
iters = 0
# reset all states at the begining of every epoch
state1 = tl.layers.initialize_rnn_state(lstm1.initial_state)
state2 = tl.layers.initialize_rnn_state(lstm2.initial_state)
for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, num_steps)):
feed_dict = {
input_data: x,
targets: y,
lstm1.initial_state.c: state1[0],
lstm1.initial_state.h: state1[1],
lstm2.initial_state.c: state2[0],
lstm2.initial_state.h: state2[1],
}
# For training, enable dropout
feed_dict.update(network.all_drop)
_cost, state1_c, state1_h, state2_c, state2_h, _ = \
sess.run([cost,
lstm1.final_state.c,
lstm1.final_state.h,
lstm2.final_state.c,
lstm2.final_state.h,
train_op],
feed_dict=feed_dict
)
state1 = (state1_c, state1_h)
state2 = (state2_c, state2_h)
costs += _cost
iters += num_steps
if step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" % (step * 1.0 / epoch_size, np.exp(costs / iters),
iters * batch_size / (time.time() - start_time)))
train_perplexity = np.exp(costs / iters)
print("Epoch: %d/%d Train Perplexity: %.3f" % (i + 1, max_max_epoch, train_perplexity))
# Validation
start_time = time.time()
costs = 0.0
iters = 0
# reset all states at the begining of every epoch
state1 = tl.layers.initialize_rnn_state(lstm1_val.initial_state)
state2 = tl.layers.initialize_rnn_state(lstm2_val.initial_state)
for step, (x, y) in enumerate(tl.iterate.ptb_iterator(valid_data, batch_size, num_steps)):
feed_dict = {
input_data: x,
targets: y,
lstm1_val.initial_state.c: state1[0],
lstm1_val.initial_state.h: state1[1],
lstm2_val.initial_state.c: state2[0],
lstm2_val.initial_state.h: state2[1],
}
_cost, state1_c, state1_h, state2_c, state2_h, _ = \
sess.run([cost_val,
lstm1_val.final_state.c,
lstm1_val.final_state.h,
lstm2_val.final_state.c,
lstm2_val.final_state.h,
tf.no_op()],
feed_dict=feed_dict
)
state1 = (state1_c, state1_h)
state2 = (state2_c, state2_h)
costs += _cost
iters += num_steps
valid_perplexity = np.exp(costs / iters)
print("Epoch: %d/%d Valid Perplexity: %.3f" % (i + 1, max_max_epoch, valid_perplexity))
print("Evaluation")
# Testing
# go through the test set step by step, it will take a while.
start_time = time.time()
costs = 0.0
iters = 0
# reset all states at the begining
state1 = tl.layers.initialize_rnn_state(lstm1_test.initial_state)
state2 = tl.layers.initialize_rnn_state(lstm2_test.initial_state)
for step, (x, y) in enumerate(tl.iterate.ptb_iterator(test_data, batch_size=1, num_steps=1)):
feed_dict = {
input_data_test: x,
targets_test: y,
lstm1_test.initial_state.c: state1[0],
lstm1_test.initial_state.h: state1[1],
lstm2_test.initial_state.c: state2[0],
lstm2_test.initial_state.h: state2[1],
}
_cost, state1_c, state1_h, state2_c, state2_h = \
sess.run([cost_test,
lstm1_test.final_state.c,
lstm1_test.final_state.h,
lstm2_test.final_state.c,
lstm2_test.final_state.h,
],
feed_dict=feed_dict
)
state1 = (state1_c, state1_h)
state2 = (state2_c, state2_h)
costs += _cost
iters += 1
test_perplexity = np.exp(costs / iters)
print("Test Perplexity: %.3f took %.2fs" % (test_perplexity, time.time() - start_time))
print(
"More example: Text generation using Trump's speech data: https://github.com/zsdonghao/tensorlayer/blob/master/example/tutorial_generate_text.py -- def main_lstm_generate_text():"
)
if __name__ == "__main__":
tf.app.run()
## log of SmallConfig
# Start learning a language model by using PTB dataset
# Epoch: 1 Learning rate: 1.000
# 0.004 perplexity: 5512.735 speed: 4555 wps
# 0.104 perplexity: 841.289 speed: 8823 wps
# 0.204 perplexity: 626.273 speed: 9292 wps
# 0.304 perplexity: 505.628 speed: 9472 wps
# 0.404 perplexity: 435.580 speed: 9551 wps
# 0.504 perplexity: 390.108 speed: 9555 wps
# 0.604 perplexity: 351.379 speed: 9546 wps
# 0.703 perplexity: 324.846 speed: 9579 wps
# 0.803 perplexity: 303.824 speed: 9574 wps
# 0.903 perplexity: 284.468 speed: 9551 wps
# Epoch: 1 Train Perplexity: 269.981
# Epoch: 1 Valid Perplexity: 178.561
# Epoch: 2 Learning rate: 1.000
# 0.004 perplexity: 211.632 speed: 7697 wps
# 0.104 perplexity: 151.509 speed: 9488 wps
# 0.204 perplexity: 158.947 speed: 9674 wps
# 0.304 perplexity: 153.963 speed: 9806 wps
# 0.404 perplexity: 150.938 speed: 9817 wps
# 0.504 perplexity: 148.413 speed: 9824 wps
# 0.604 perplexity: 143.763 speed: 9765 wps
# 0.703 perplexity: 141.616 speed: 9731 wps
# 0.803 perplexity: 139.618 speed: 9781 wps
# 0.903 perplexity: 135.880 speed: 9735 wps
# Epoch: 2 Train Perplexity: 133.771
# Epoch: 2 Valid Perplexity: 142.595
# Epoch: 3 Learning rate: 1.000
# 0.004 perplexity: 146.902 speed: 8345 wps
# 0.104 perplexity: 105.647 speed: 9572 wps
# 0.204 perplexity: 114.261 speed: 9585 wps
# 0.304 perplexity: 111.237 speed: 9586 wps
# 0.404 perplexity: 110.181 speed: 9605 wps
# 0.504 perplexity: 109.383 speed: 9601 wps
# 0.604 perplexity: 106.722 speed: 9635 wps
# 0.703 perplexity: 106.075 speed: 9597 wps
# 0.803 perplexity: 105.481 speed: 9624 wps
# 0.903 perplexity: 103.262 speed: 9618 wps
# Epoch: 3 Train Perplexity: 102.272
# Epoch: 3 Valid Perplexity: 131.884
# Epoch: 4 Learning rate: 1.000
# 0.004 perplexity: 118.127 speed: 7867 wps
# 0.104 perplexity: 85.530 speed: 9330 wps
# 0.204 perplexity: 93.559 speed: 9399 wps
# 0.304 perplexity: 91.141 speed: 9386 wps
# 0.404 perplexity: 90.668 speed: 9462 wps
# 0.504 perplexity: 90.366 speed: 9516 wps
# 0.604 perplexity: 88.479 speed: 9477 wps
# 0.703 perplexity: 88.275 speed: 9533 wps
# 0.803 perplexity: 88.091 speed: 9560 wps
# 0.903 perplexity: 86.430 speed: 9516 wps
# Epoch: 4 Train Perplexity: 85.839
# Epoch: 4 Valid Perplexity: 128.408
# Epoch: 5 Learning rate: 1.000
# 0.004 perplexity: 100.077 speed: 7682 wps
# 0.104 perplexity: 73.856 speed: 9197 wps
# 0.204 perplexity: 81.242 speed: 9266 wps
# 0.304 perplexity: 79.315 speed: 9375 wps
# 0.404 perplexity: 79.009 speed: 9439 wps
# 0.504 perplexity: 78.874 speed: 9377 wps
# 0.604 perplexity: 77.430 speed: 9436 wps
# 0.703 perplexity: 77.415 speed: 9417 wps
# 0.803 perplexity: 77.424 speed: 9407 wps
# 0.903 perplexity: 76.083 speed: 9407 wps
# Epoch: 5 Train Perplexity: 75.719
# Epoch: 5 Valid Perplexity: 127.057
# Epoch: 6 Learning rate: 0.500
# 0.004 perplexity: 87.561 speed: 7130 wps
# 0.104 perplexity: 64.202 speed: 9753 wps
# 0.204 perplexity: 69.518 speed: 9537 wps
# 0.304 perplexity: 66.868 speed: 9647 wps
# 0.404 perplexity: 65.766 speed: 9538 wps
# 0.504 perplexity: 64.967 speed: 9537 wps
# 0.604 perplexity: 63.090 speed: 9565 wps
# 0.703 perplexity: 62.415 speed: 9544 wps
# 0.803 perplexity: 61.751 speed: 9504 wps
# 0.903 perplexity: 60.027 speed: 9482 wps
# Epoch: 6 Train Perplexity: 59.127
# Epoch: 6 Valid Perplexity: 120.339
# Epoch: 7 Learning rate: 0.250
# 0.004 perplexity: 72.069 speed: 7683 wps
# 0.104 perplexity: 53.331 speed: 9526 wps
# 0.204 perplexity: 57.897 speed: 9572 wps
# 0.304 perplexity: 55.557 speed: 9491 wps
# 0.404 perplexity: 54.597 speed: 9483 wps
# 0.504 perplexity: 53.817 speed: 9471 wps
# 0.604 perplexity: 52.147 speed: 9511 wps
# 0.703 perplexity: 51.473 speed: 9497 wps
# 0.803 perplexity: 50.788 speed: 9521 wps
# 0.903 perplexity: 49.203 speed: 9515 wps
# Epoch: 7 Train Perplexity: 48.303
# Epoch: 7 Valid Perplexity: 120.782
# Epoch: 8 Learning rate: 0.125
# 0.004 perplexity: 63.503 speed: 8425 wps
# 0.104 perplexity: 47.324 speed: 9433 wps
# 0.204 perplexity: 51.525 speed: 9653 wps
# 0.304 perplexity: 49.405 speed: 9520 wps
# 0.404 perplexity: 48.532 speed: 9487 wps
# 0.504 perplexity: 47.800 speed: 9610 wps
# 0.604 perplexity: 46.282 speed: 9554 wps
# 0.703 perplexity: 45.637 speed: 9536 wps
# 0.803 perplexity: 44.972 speed: 9493 wps
# 0.903 perplexity: 43.506 speed: 9496 wps
# Epoch: 8 Train Perplexity: 42.653
# Epoch: 8 Valid Perplexity: 122.119
# Epoch: 9 Learning rate: 0.062
# 0.004 perplexity: 59.375 speed: 7158 wps
# 0.104 perplexity: 44.223 speed: 9275 wps
# 0.204 perplexity: 48.269 speed: 9459 wps
# 0.304 perplexity: 46.273 speed: 9564 wps
# 0.404 perplexity: 45.450 speed: 9604 wps
# 0.504 perplexity: 44.749 speed: 9604 wps
# 0.604 perplexity: 43.308 speed: 9619 wps
# 0.703 perplexity: 42.685 speed: 9647 wps
# 0.803 perplexity: 42.022 speed: 9673 wps
# 0.903 perplexity: 40.616 speed: 9678 wps
# Epoch: 9 Train Perplexity: 39.792
# Epoch: 9 Valid Perplexity: 123.170
# Epoch: 10 Learning rate: 0.031
# 0.004 perplexity: 57.333 speed: 7183 wps
# 0.104 perplexity: 42.631 speed: 9592 wps
# 0.204 perplexity: 46.580 speed: 9518 wps
# 0.304 perplexity: 44.625 speed: 9569 wps
# 0.404 perplexity: 43.832 speed: 9576 wps
# 0.504 perplexity: 43.153 speed: 9571 wps
# 0.604 perplexity: 41.761 speed: 9557 wps
# 0.703 perplexity: 41.159 speed: 9524 wps
# 0.803 perplexity: 40.494 speed: 9527 wps
# 0.903 perplexity: 39.111 speed: 9558 wps
# Epoch: 10 Train Perplexity: 38.298
# Epoch: 10 Valid Perplexity: 123.658
# Epoch: 11 Learning rate: 0.016
# 0.004 perplexity: 56.238 speed: 7190 wps
# 0.104 perplexity: 41.771 speed: 9171 wps
# 0.204 perplexity: 45.656 speed: 9415 wps
# 0.304 perplexity: 43.719 speed: 9472 wps
# 0.404 perplexity: 42.941 speed: 9483 wps
# 0.504 perplexity: 42.269 speed: 9494 wps
# 0.604 perplexity: 40.903 speed: 9530 wps
# 0.703 perplexity: 40.314 speed: 9545 wps
# 0.803 perplexity: 39.654 speed: 9580 wps
# 0.903 perplexity: 38.287 speed: 9597 wps
# Epoch: 11 Train Perplexity: 37.477
# Epoch: 11 Valid Perplexity: 123.523
# Epoch: 12 Learning rate: 0.008
# 0.004 perplexity: 55.552 speed: 7317 wps
# 0.104 perplexity: 41.267 speed: 9234 wps
# 0.204 perplexity: 45.119 speed: 9461 wps
# 0.304 perplexity: 43.204 speed: 9519 wps
# 0.404 perplexity: 42.441 speed: 9453 wps
# 0.504 perplexity: 41.773 speed: 9536 wps
# 0.604 perplexity: 40.423 speed: 9555 wps
# 0.703 perplexity: 39.836 speed: 9576 wps
# 0.803 perplexity: 39.181 speed: 9579 wps
# 0.903 perplexity: 37.827 speed: 9554 wps
# Epoch: 12 Train Perplexity: 37.020
# Epoch: 12 Valid Perplexity: 123.192
# Epoch: 13 Learning rate: 0.004
# 0.004 perplexity: 55.124 speed: 8234 wps
# 0.104 perplexity: 40.970 speed: 9391 wps
# 0.204 perplexity: 44.804 speed: 9525 wps
# 0.304 perplexity: 42.912 speed: 9512 wps
# 0.404 perplexity: 42.162 speed: 9536 wps
# 0.504 perplexity: 41.500 speed: 9630 wps
# 0.604 perplexity: 40.159 speed: 9591 wps
# 0.703 perplexity: 39.574 speed: 9575 wps
# 0.803 perplexity: 38.921 speed: 9613 wps
# 0.903 perplexity: 37.575 speed: 9629 wps
# Epoch: 13 Train Perplexity: 36.771
# Epoch: 13 Valid Perplexity: 122.917
# Evaluation
# Test Perplexity: 116.723 took 124.06s
## MediumConfig
# Epoch: 1 Learning rate: 1.000
# 0.008 perplexity: 5173.547 speed: 6469 wps
# 0.107 perplexity: 1219.527 speed: 6453 wps
# 0.206 perplexity: 866.163 speed: 6441 wps
# 0.306 perplexity: 695.163 speed: 6428 wps
# 0.405 perplexity: 598.464 speed: 6420 wps
# 0.505 perplexity: 531.875 speed: 6422 wps
# 0.604 perplexity: 477.079 speed: 6425 wps
# 0.704 perplexity: 438.297 speed: 6428 wps
# 0.803 perplexity: 407.928 speed: 6425 wps
# 0.903 perplexity: 381.264 speed: 6429 wps
# Epoch: 1 Train Perplexity: 360.795
# Epoch: 1 Valid Perplexity: 208.854
# ...
# Epoch: 39 Learning rate: 0.001
# 0.008 perplexity: 56.618 speed: 6357 wps
# 0.107 perplexity: 43.375 speed: 6341 wps
# 0.206 perplexity: 47.873 speed: 6336 wps
# 0.306 perplexity: 46.408 speed: 6337 wps
# 0.405 perplexity: 46.327 speed: 6337 wps
# 0.505 perplexity: 46.115 speed: 6335 wps
# 0.604 perplexity: 45.323 speed: 6336 wps
# 0.704 perplexity: 45.286 speed: 6337 wps
# 0.803 perplexity: 45.174 speed: 6336 wps
# 0.903 perplexity: 44.334 speed: 6336 wps
# Epoch: 39 Train Perplexity: 44.021
# Epoch: 39 Valid Perplexity: 87.516
# Evaluation
# Test Perplexity: 83.858 took 167.58s
|
[
"78732833@qq.com"
] |
78732833@qq.com
|
f270f1f9f205464eecc6286116d5f393e29b632c
|
846dc93416153ef6707e3d8b22fa9ade3e9712d8
|
/Eclipse/GroundIDB.py
|
74e814c9e23451feeef49f989b8a941e19400a26
|
[] |
no_license
|
ehmoni/Datalog-Weighted-Repairs
|
b0103753f6ba09fffd74c48e32297d85143ce03b
|
1bfecd62946574f6704966d2260fc40db3b7754e
|
refs/heads/master
| 2022-02-25T19:54:20.270689
| 2019-11-10T01:54:41
| 2019-11-10T01:54:41
| 205,717,284
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
RuleRecord = {}
RuleRecord["G_01"] = "(admDrugT(Date, PrescribedBy, Drug, Patient, Age) & personSpec(PrescribedBy, Specialist) & drugType(Drug, DrType)"
def GroundIDB(di):
ditk = di[-6:-2]
print(ditk)
body = RuleRecord[ditk]
return 0
# def GroundIDB(di):
# ditk = di[di.find("G_"):di.find("G_")+4]
# dit = di[-6:-2]
# print(di.find("G_"))
# print(dit)
# body = RuleRecord[ditk]
# print(body)
0
s1 = """+bills("14-Keb-18", "Pediatrician", "Restricted", "G_23", 50)"""
s2 = """-bills("28-Mar-18", "Cardiologist", "GeneralSale", val, "G_01")"""
s = [s1,s2]
GroundIDB(s2)
|
[
"noreply@github.com"
] |
ehmoni.noreply@github.com
|
33759813fc74e64798832ff55f2d4bd596a0903c
|
1aa404dc254e64962a67833fe3060045bcd17fd0
|
/Study/project_study/readExcel.py
|
fa8e28f5dff0ecc6ba9e54d70433440a77e45f63
|
[] |
no_license
|
guohuahua2012/samples
|
f1c9d0f988b0309902c3ba94bd1d09d8ba1de5fe
|
18c1744c4077f2a5bb1567aec66839aff3b7f48d
|
refs/heads/master
| 2020-05-19T12:21:57.961943
| 2020-04-11T13:04:14
| 2020-04-11T13:04:14
| 185,012,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 904
|
py
|
# -*- coding: utf-8 -*-
import xlrd
class ReadExecl():
def readExcel(self, filename, sheetname):
cls = []
try:
data = xlrd.open_workbook(filename)
table = data.sheet_by_name(sheetname)
keys = table.row_values(0)
nrows = table.nrows
ncols = table.ncols
for i in range(nrows):
datas = {}
if i == 0:
continue
values = table.row_values(i)
for j in range(ncols):
datas[keys[j]] = values[j]
cls.append(datas)
return cls
except FileNotFoundError:
print("文件不存在", filename)
if __name__ == '__main__':
xls_file = r'E:/samples/Flask/testFile/case/test01.xlsx'
xls_sheet = 'Sheet1'
res = ReadExecl().readExcel(xls_file, xls_sheet)
print(res)
|
[
"guohuahua2012"
] |
guohuahua2012
|
8e79a3aab8b3e2af24d86803a70b2057cade5424
|
3be13eaad65b202ffe4ac333ed48dfd5432db547
|
/single qubit/QL/environment.py
|
bf57f591ed749eda99574ee567629ccdef2fc356
|
[
"MIT"
] |
permissive
|
tuliplan/RL_state_preparation
|
5b68310d0957972c1e86df9ef3cfcf78edecf8d2
|
e2904edb43a55061d77915098633a5c6211d91a3
|
refs/heads/master
| 2022-02-18T12:03:07.139584
| 2019-09-06T03:15:57
| 2019-09-06T03:15:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,868
|
py
|
import math
import cmath
import numpy as np
from scipy.linalg import expm
sx = 1/2 * np.mat([[0, 1],[ 1, 0]], dtype=complex)
sy = 1/2 * np.mat([[0, -1j],[1j, 0]], dtype=complex)
sz = 1/2 * np.mat([[1, 0],[0, -1]], dtype=complex)
def hamiltonian(j):
J = 4
H = (j) * J * sz + sx
return H
psi_target = np.mat([[1],[0]], dtype=complex)
psi_0 = np.mat([[0],[1]], dtype=complex)
dt = np.pi/20
Dtheta = np.pi/30
Dphi = np.pi/30
def phase2(z):
'''
return phase angle in [0, 2pi]
'''
phase = cmath.phase(z)
if phase < 0:
phase += 2*math.pi
return phase
def state_to_lattice_point(state):
'''
Note: phi = 0 or 2pi are the same
return the list [theta_i, phi_i]
'''
if state[0,0] == 0:
## Special case 1: [0, 1]
theta, phi = math.pi, 0
else:
conj = state[0,0].conj()
state_reg = state * (conj/abs(conj))
# print(state_reg[0,0].real)
if (state_reg[0,0].real)>= 1:
# Unitary should preserve norm
theta, phi = 0, 0
else:
# print(state_reg[0,0].imag) # this should be 0
theta = 2 * math.acos(state_reg[0,0].real)
# state_reg[1,0]/sin(theta/2) = cos(pi) + i sin(pi)
if theta == 0:
## Special case 2: [1, 0]
phi = 0
else:
phi = phase2(state_reg[1,0]/math.sin(theta/2)) #force the phase of the first elements to be 0.
theta_i = round(theta/Dtheta)
phi_i = round(phi/Dphi)
if phi_i == round(2*math.pi/Dphi):
phi_i = 0
return [theta_i, phi_i]
# class Maze(object): # for Python 2
class Maze:
# qubit in the Bloch Maze
def __init__(self):
self.action_space = ['0', '1']
self.n_actions = len(self.action_space)
self._build_maze()
def _build_maze(self):
self.state = psi_0
def reset(self):
self.state = psi_0
self.counter = 0
# print(dt)
return state_to_lattice_point(self.state)
def step(self, action):
if action == 0:
U = expm(-(1j) * hamiltonian(0) * dt)
elif action == 1:
U = expm(-(1j) * hamiltonian(1) * dt)
self.state = U.dot(self.state)
self.counter += 1
s_ = self.state
fidelity = (abs(s_.conj().T.dot(psi_target)[0,0]))**2
error = 1-fidelity
if error < 10e-3:
reward = 5000
done = True
s_lattice = 'terminal'
else:
#reward = -1*(error>=0.5) + 10*(error<0.5) + 100*(error<0.1)
reward = 10*(error<0.5) + 100*(error<0.1)
done = (self.counter >= np.pi/dt)
s_lattice = state_to_lattice_point(s_)
return s_lattice, reward, done, fidelity
|
[
"noreply@github.com"
] |
tuliplan.noreply@github.com
|
06f9786c2bd88a921be9c2d98ed62b4f8689be8d
|
aeebc330434929fdec92d0fc3108406faacdbd05
|
/Python/progate/python_study_3/page9/script.py
|
2cba02a4424fde1a3ba5a0a2ac1734ba74fd0650
|
[] |
no_license
|
T-o-s-s-h-y/Learning
|
740ed0ef70792c62e553794f8b4c01be8f8490c7
|
da5c224fb9c1aba726749a2adfa89867dd7e8721
|
refs/heads/master
| 2023-03-19T20:42:40.284533
| 2022-12-26T05:43:41
| 2022-12-26T05:43:41
| 189,822,954
| 0
| 0
| null | 2023-03-17T06:08:27
| 2019-06-02T08:33:55
|
Ruby
|
UTF-8
|
Python
| false
| false
| 741
|
py
|
def validate(hand):
if hand < 0 or hand > 2:
return False
# elseを消してインデントを直してください
return True
def print_hand(hand, name='ゲスト'):
hands = ['グー', 'チョキ', 'パー']
print(name + 'は' + hands[hand] + 'を出しました')
print('じゃんけんをはじめます')
player_name = input('名前を入力してください:')
print('何を出しますか?(0: グー, 1: チョキ, 2: パー)')
player_hand = int(input('数字で入力してください:'))
if validate(player_hand):
if player_name == '':
print_hand(player_hand)
else:
print_hand(player_hand, player_name)
else:
print('正しい数値を入力してください')
|
[
"51285386+T-o-s-s-h-y@users.noreply.github.com"
] |
51285386+T-o-s-s-h-y@users.noreply.github.com
|
05d05de778ec97436a6174507188d1383adac08e
|
7f1325de95175e24eb989081110c169fbcabb777
|
/roomai/texas/TexasHoldemEnv.py
|
66c5fb5ca1d0c13a1530d6e2bd7bc1e2cf196d5d
|
[
"MIT"
] |
permissive
|
jichao6758/RoomAI
|
3bd268f5cc8734f7c4bb09d5014f61ee8d68954e
|
18a3efa0d608c80f2cf8bf9fe743b5911cce9f6b
|
refs/heads/master
| 2020-12-02T09:58:40.171871
| 2018-04-22T07:39:45
| 2018-04-22T07:39:45
| 96,668,863
| 0
| 0
| null | 2017-07-09T07:40:38
| 2017-07-09T07:40:38
| null |
UTF-8
|
Python
| false
| false
| 31,226
|
py
|
#!/bin/python
#coding:utf-8
import random
import copy
import roomai.common
import roomai
import logging
from roomai.common import Info
from roomai.texas.TexasHoldemUtil import *
from roomai.texas.TexasHoldemAction import *
from roomai.texas.TexasHoldemInfo import *
from functools import cmp_to_key
class TexasHoldemEnv(roomai.common.AbstractEnv):
'''
The TexasHoldem game environment
'''
@classmethod
def __check_initialization_configuration__(cls, env):
if len(env.__params__["chips"]) != env.__params__["num_players"]:
raise ValueError("len(env.chips)%d != env.num_players%d" % (len(env.chips), env.num_players))
if env.__params__["num_players"] > 6:
raise ValueError("The maximum of the number of players is 6. Now, the number of players = %d" % (env.num_players))
return True
#@override
def init(self, params = dict()):
'''
Initialize the TexasHoldem game environment with the initialization params.\n
The initialization is a dict with some options\n
1) allcards: the order of all poker cards appearing\n
2) record_history: whether to record all history states. if you need call the backward function, please set it to True. default False\n
3) num_players: how many players are in the game, default 3\n
4) dealer_id: the player id of the dealer, default random\n
5) chips: the initialization chips, default [1000,1000,...]\n
6) big_blind_bet: the number of chips for the big blind bet, default 10\n
An example of the initialization param is {"num_players":2,"record_history":True}
:param params: the initialization params
:return: infos, public_state, person_states, private_state
'''
self.logger = roomai.get_logger()
if "num_players" in params:
self.__params__["num_players"] = params["num_players"]
else:
self.__params__["num_players"] = 3
if "dealer_id" in params:
self.__params__["dealer_id"] = params["dealer_id"]
else:
self.__params__["dealer_id"] = int(random.random() * self.__params__["num_players"])
if "chips" in params:
self.__params__["chips"] = params["chips"]
else:
self.__params__["chips"] = [1000 for i in range(self.__params__["num_players"])]
if "big_blind_bet" in params:
self.__params__["big_blind_bet"] = params["big_blind_bet"]
else:
self.__params__["big_blind_bet"] = 10
if "allcards" in params:
self.__params__["allcards"] = [c.__deepcopy__() for c in params["allcards"]]
else:
self.__params__["allcards"] = list(roomai.common.AllPokerCards_Without_King.values())
random.shuffle(self.__params__["allcards"])
if "record_history" in params:
self.__params__["record_history"] = params["record_history"]
else:
self.__params__["record_history"] = False
self.__check_initialization_configuration__(self)
## public info
small = (self.__params__["dealer_id"] + 1) % self.__params__["num_players"]
big = (self.__params__["dealer_id"] + 2) % self.__params__["num_players"]
self.public_state = TexasHoldemPublicState()
pu = self.public_state
pu.__num_players__ = self.__params__["num_players"]
pu.__dealer_id__ = self.__params__["dealer_id"]
pu.__big_blind_bet__ = self.__params__["big_blind_bet"]
pu.__raise_account__ = self.__params__["big_blind_bet"]
pu.__is_fold__ = [False for i in range(self.__params__["num_players"])]
pu.__num_fold__ = 0
pu.__is_allin__ = [False for i in range(self.__params__["num_players"])]
pu.__num_allin__ = 0
pu.__is_needed_to_action__ = [True for i in range(self.__params__["num_players"])]
pu.__num_needed_to_action__ = pu.num_players
pu.__bets__ = [0 for i in range(self.__params__["num_players"])]
pu.__chips__ = self.__params__["chips"]
pu.__stage__ = StageSpace.firstStage
pu.__turn__ = (big+1)%pu.num_players
pu.__public_cards__ = []
pu.__previous_id__ = None
pu.__previous_action__ = None
if pu.chips[big] > self.__params__["big_blind_bet"]:
pu.__chips__[big] -= self.__params__["big_blind_bet"]
pu.__bets__[big] += self.__params__["big_blind_bet"]
else:
pu.__bets__[big] = pu.chips[big]
pu.__chips__[big] = 0
pu.__is_allin__[big] = True
pu.__num_allin__ += 1
pu.__max_bet_sofar__ = pu.bets[big]
pu.__raise_account__ = self.__params__["big_blind_bet"]
if pu.chips[small] > self.__params__["big_blind_bet"] / 2:
pu.__chips__[small] -= self.__params__["big_blind_bet"] /2
pu.__bets__[small] += self.__params__["big_blind_bet"] /2
else:
pu.__bets__[small] = pu.chips[small]
pu.__chips__[small] = 0
pu.__is_allin__[small] = True
pu.__num_allin__ += 1
pu.__is_terminal__ = False
pu.__scores__ = [0 for i in range(self.__params__["num_players"])]
# private info
self.private_state = TexasHoldemPrivateState()
pr = self.private_state
pr.__keep_cards__ = self.__params__["allcards"][self.__params__["num_players"]*2:self.__params__["num_players"]*2+5]
## person info
self.person_states = [TexasHoldemPersonState() for i in range(self.__params__["num_players"])]
pes = self.person_states
for i in range(self.__params__["num_players"]):
pes[i].__id__ = i
pes[i].__hand_cards__ = self.__params__["allcards"][i*2:(i+1)*2]
pes[pu.turn].__available_actions__ = self.available_actions(pu, pes[pu.turn])
self.__gen_history__()
infos = self.__gen_infos__()
if self.logger.level <= logging.DEBUG:
self.logger.debug("TexasHoldemEnv.init: num_players = %d, dealer_id = %d, chip = %d, big_blind_bet = %d"%(\
pu.num_players,\
pu.dealer_id,\
pu.chips[0],\
pu.big_blind_bet
))
return infos, pu, pes, pr
## we need ensure the action is valid
#@Overide
def forward(self, action):
'''
The TexasHoldem game environments steps with the action taken by the current player
:param action: The action taken by the current player
:return: infos, public_state, person_states, private_state
'''
pu = self.public_state
pe = self.person_states
pr = self.private_state
if not self.is_action_valid(action, pu, pe[pu.turn]):
self.logger.critical("action=%s is invalid" % (action.key))
raise ValueError("action=%s is invalid" % (action.key))
if action.option == TexasHoldemAction.Fold:
self.__action_fold__(action)
elif action.option == TexasHoldemAction.Check:
self.__action_check__(action)
elif action.option == TexasHoldemAction.Call:
self.__action_call__(action)
elif action.option == TexasHoldemAction.Raise:
self.__action_raise__(action)
elif action.option == TexasHoldemAction.AllIn:
self.__action_allin__(action)
else:
raise Exception("action.option(%s) not in [Fold, Check, Call, Raise, AllIn]"%(action.option))
pu.__previous_id__ = pu.turn
pu.__previous_action__ = action
pu.__is_terminal__ = False
pu.__scores__ = [0 for i in range(self.__params__["num_players"])]
# computing_score
if TexasHoldemEnv.__is_compute_scores__(self.public_state):
## need showdown
pu.__public_cards__ = pr.keep_cards[0:5]
pu.__is_terminal__ = True
pu.__scores__ = self.__compute_scores__()
pe[pu.previous_id].__available_actions__ = dict()
# enter into the next stage
elif TexasHoldemEnv.__is_nextround__(self.public_state):
add_cards = []
if pu.stage == StageSpace.firstStage: add_cards = pr.keep_cards[0:3]
if pu.stage == StageSpace.secondStage: add_cards = [pr.keep_cards[3]]
if pu.stage == StageSpace.thirdStage: add_cards = [pr.keep_cards[4]]
pu.__public_cards__.extend(add_cards)
pu.__stage__ = pu.stage + 1
pu.__num_needed_to_action__ = 0
pu.__is_needed_to_action__ = [False for i in range(pu.num_players)]
for i in range(pu.num_players):
if pu.__is_fold__[i] != True and pu.__is_allin__[i] != True:
pu.__is_needed_to_action__[i] = True
pu.__num_needed_to_action__ += 1
pu.__turn__ = pu.dealer_id
pu.__turn__ = self.__next_player__(pu)
pe[self.public_state.previous_id].__available_actions__ = dict()
pe[self.public_state.turn].__available_actions__ = self.available_actions(self.public_state, self.person_states[self.public_state.turn])
##normal
else:
pu.__turn__ = self.__next_player__(pu)
self.person_states[self.public_state.previous_id].__available_actions__ = dict()
self.person_states[self.public_state.turn].__available_actions__ = self.available_actions(self.public_state, self.person_states[self.public_state.turn])
if self.logger.level <= logging.DEBUG:
self.logger.debug("TexasHoldemEnv.forward: num_fold+num_allin = %d+%d = %d, action = %s, stage = %d"%(\
self.public_state.num_fold,\
self.public_state.num_allin,\
self.public_state.num_fold + self.public_state.num_allin,\
action.key,\
self.public_state.stage\
))
self.__gen_history__()
infos = self.__gen_infos__()
return infos, self.public_state, self.person_states, self.private_state
#override
@classmethod
def compete(cls, env, players):
'''
Use the game environment to hold a compete for the players
:param env: The game environment
:param players: The players
:return: scores for the players
'''
total_scores = [0 for i in range(len(players))]
total_count = 1000
for count in range(total_count):
chips = [(1000 + int(random.random() * 200)) for i in range(len(players))]
num_players = len(players)
dealer_id = int(random.random() * len(players))
big_blind_bet = 50
infos, public, persons, private = env.init({"chips":chips,
"num_players":num_players,
"dealer_id":dealer_id,
"big_blind_bet":big_blind_bet})
for i in range(len(players)):
players[i].receive_info(infos[i])
while public.is_terminal == False:
turn = public.turn
action = players[turn].take_action()
#print len(infos[turn].person_state.available_actions),action.key(),turn
infos, public, persons, private = env.forward(action)
for i in range(len(players)):
players[i].receive_info(infos[i])
for i in range(len(players)):
players[i].receive_info(infos[i])
total_scores[i] += public.scores[i]
if (count + 1)%500 == 0:
tmp_scores = [0 for i in range(len(total_scores))]
for i in range(len(total_scores)):
tmp_scores[i] = total_scores[i] / (count+1)
roomai.get_logger().info("TexasHoldem completes %d competitions, scores=%s"%(count+1, ",".join([str(i) for i in tmp_scores])))
for i in range(len(total_scores)):
total_scores[i] /= 1.0 * total_count
return total_scores
def __compute_scores__(self):
pu = self.public_state
pes = self.person_states
pr = self.private_state
## compute score before showdown, the winner takes all
if pu.num_players == pu.num_fold + 1:
scores = [0 for i in range(pu.num_players)]
for i in range(pu.num_players):
if pu.is_fold[i] == False:
scores[i] = sum(pu.bets)
break
## compute score after showdown
else:
scores = [0 for i in range(pu.num_players)]
playerid_pattern_bets = [] #for not_quit players
for i in range(pu.num_players):
if pu.is_fold[i] == True: continue
hand_pattern_cards = self.__cards2pattern_cards__(pes[i].hand_cards, pr.keep_cards)
playerid_pattern_bets.append((i,hand_pattern_cards,pu.bets[i]))
for playerid_pattern_bet in playerid_pattern_bets:
if len(playerid_pattern_bet[1][1]) < 5:
i = 0
playerid_pattern_bets.sort(key=lambda x:self.compute_rank_pattern_cards(x[1]))
pot_line = 0
previous = None
tmp_playerid_pattern_bets = []
for i in range(len(playerid_pattern_bets)-1,-1,-1):
if previous == None:
tmp_playerid_pattern_bets.append(playerid_pattern_bets[i])
previous = playerid_pattern_bets[i]
elif self.__compare_patterns_cards__(playerid_pattern_bets[i][1], previous[1]) == 0:
tmp_playerid_pattern_bets.append(playerid_pattern_bets[i])
previous = playerid_pattern_bets[i]
else:
tmp_playerid_pattern_bets.sort(key = lambda x:x[2])
for k in range(len(tmp_playerid_pattern_bets)):
num1 = len(tmp_playerid_pattern_bets) - k
sum1 = 0
max_win_score = pu.bets[tmp_playerid_pattern_bets[k][0]]
for p in range(pu.num_players):
sum1 += min(max(0, pu.bets[p] - pot_line), max_win_score)
for p in range(k, len(tmp_playerid_pattern_bets)):
scores[tmp_playerid_pattern_bets[p][0]] += sum1 / num1
scores[pu.dealer_id] += sum1 % num1
if pot_line <= max_win_score:
pot_line = max_win_score
tmp_playerid_pattern_bets = []
tmp_playerid_pattern_bets.append(playerid_pattern_bets[i])
previous = playerid_pattern_bets[i]
if len(tmp_playerid_pattern_bets) > 0:
tmp_playerid_pattern_bets.sort(key = lambda x:x[2])
for i in range(len(tmp_playerid_pattern_bets)):
num1 = len(tmp_playerid_pattern_bets) - i
sum1 = 0
max_win_score = pu.bets[tmp_playerid_pattern_bets[i][0]]
for p in range(pu.num_players):
sum1 += min(max(0, pu.bets[p] - pot_line), max_win_score)
for p in range(i, len(tmp_playerid_pattern_bets)):
scores[tmp_playerid_pattern_bets[p][0]] += sum1 / num1
scores[pu.dealer_id] += sum1 % num1
if pot_line <= max_win_score: pot_line = max_win_score
for p in range(pu.num_players):
pu.__chips__[p] += scores[p]
scores[p] -= pu.bets[p]
for p in range(pu.num_players):
scores[p] /= pu.big_blind_bet * 1.0
return scores
def __action_fold__(self, action):
pu = self.public_state
pu.__is_fold__[pu.turn] = True
pu.__num_fold__ += 1
pu.__is_needed_to_action__[pu.turn] = False
pu.__num_needed_to_action__ -= 1
def __action_check__(self, action):
pu = self.public_state
pu.__is_needed_to_action__[pu.turn] = False
pu.__num_needed_to_action__ -= 1
def __action_call__(self, action):
pu = self.public_state
pu.__chips__[pu.turn] -= action.price
pu.__bets__[pu.turn] += action.price
pu.__is_needed_to_action__[pu.turn] = False
pu.__num_needed_to_action__ -= 1
def __action_raise__(self, action):
pu = self.public_state
pu.__raise_account__ = action.price + pu.bets[pu.turn] - pu.max_bet_sofar
pu.__chips__[pu.turn] -= action.price
pu.__bets__[pu.turn] += action.price
pu.__max_bet_sofar__ = pu.bets[pu.turn]
pu.__is_needed_to_action__[pu.turn] = False
pu.__num_needed_to_action__ -= 1
p = (pu.turn + 1)%pu.num_players
while p != pu.turn:
if pu.is_allin[p] == False and pu.is_fold[p] == False and pu.is_needed_to_action[p] == False:
pu.__num_needed_to_action__ += 1
pu.__is_needed_to_action__[p] = True
p = (p + 1) % pu.num_players
def __action_allin__(self, action):
pu = self.public_state
pu.__is_allin__[pu.turn] = True
pu.__num_allin__ += 1
pu.__bets__[pu.turn] += action.price
pu.__chips__[pu.turn] = 0
pu.__is_needed_to_action__[pu.turn] = False
pu.__num_needed_to_action__ -= 1
if pu.bets[pu.turn] > pu.max_bet_sofar:
pu.__max_bet_sofar__ = pu.bets[pu.turn]
p = (pu.turn + 1) % pu.num_players
while p != pu.turn:
if pu.is_allin[p] == False and pu.is_fold[p] == False and pu.is_needed_to_action[p] == False:
pu.__num_needed_to_action__ += 1
pu.__is_needed_to_action__[p] = True
p = (p + 1) % pu.num_players
pu.__max_bet_sofar__ = pu.bets[pu.turn]
#####################################Utils Function ##############################
@classmethod
def __next_player__(self, pu):
i = pu.turn
if pu.num_needed_to_action == 0:
return -1
p = (i+1)%pu.num_players
while pu.is_needed_to_action[p] == False:
p = (p+1)%pu.num_players
return p
@classmethod
def __is_compute_scores__(self, pu):
'''
:return: A boolean variable indicates whether is it time to compute scores
'''
if pu.num_players == pu.num_fold + 1:
return True
# below need showdown
if pu.num_players <= pu.num_fold + pu.num_allin +1 and pu.num_needed_to_action == 0:
return True
if pu.stage == StageSpace.fourthStage and self.__is_nextround__(pu):
return True
return False
@classmethod
def __is_nextround__(self, public_state):
'''
:return: A boolean variable indicates whether is it time to enter the next stage
'''
return public_state.num_needed_to_action == 0
@classmethod
def __cards2pattern_cards__(cls, hand_cards, remaining_cards):
key = cmp_to_key(roomai.common.PokerCard.compare)
pointrank2cards = dict()
for c in hand_cards + remaining_cards:
if c.point_rank in pointrank2cards:
pointrank2cards[c.point_rank].append(c)
else:
pointrank2cards[c.point_rank] = [c]
for p in pointrank2cards:
pointrank2cards[p].sort(key = key)
suitrank2cards = dict()
for c in hand_cards + remaining_cards:
if c.suit_rank in suitrank2cards:
suitrank2cards[c.suit_rank].append(c)
else:
suitrank2cards[c.suit_rank] = [c]
for s in suitrank2cards:
suitrank2cards[s].sort(key=key)
num2point = [[], [], [], [], []]
for p in pointrank2cards:
num = len(pointrank2cards[p])
num2point[num].append(p)
for i in range(5):
num2point[num].sort()
sorted_point = []
for p in pointrank2cards:
sorted_point.append(p)
sorted_point.sort()
##straight_samesuit
for s in suitrank2cards:
if len(suitrank2cards[s]) >= 5:
numStraight = 1
for i in range(len(suitrank2cards[s]) - 2, -1, -1):
if suitrank2cards[s][i].point_rank == suitrank2cards[s][i + 1].point_rank - 1:
numStraight += 1
else:
numStraight = 1
if numStraight == 5:
pattern = AllCardsPattern["Straight_SameSuit"]
return (pattern,suitrank2cards[s][i:i + 5])
##4_1
if len(num2point[4]) > 0:
p4 = num2point[4][0]
p1 = -1
for i in range(len(sorted_point) - 1, -1, -1):
if sorted_point[i] != p4:
p1 = sorted_point[i]
break
pattern = AllCardsPattern["4_1"]
cards = pointrank2cards[p4][0:4]
cards.append(pointrank2cards[p1][0])
return (pattern,cards)
##3_2
if len(num2point[3]) >= 1:
pattern = AllCardsPattern["3_2"]
if len(num2point[3]) == 2:
p3 = num2point[3][1]
cards = pointrank2cards[p3][0:3]
p2 = num2point[3][0]
cards.append(pointrank2cards[p2][0])
cards.append(pointrank2cards[p2][1])
return (pattern,cards)
if len(num2point[2]) >= 1:
p3 = num2point[3][0]
cards = pointrank2cards[p3][0:3]
p2 = num2point[2][len(num2point[2]) - 1]
cards.append(pointrank2cards[p2][0])
cards.append(pointrank2cards[p2][1])
return (pattern,cards)
##SameSuit
for s in suitrank2cards:
if len(suitrank2cards[s]) >= 5:
pattern = AllCardsPattern["SameSuit"]
len1 = len(suitrank2cards[s])
cards = suitrank2cards[s][len1 - 5:len1]
return (pattern,cards)
##Straight_DiffSuit
numStraight = 1
for idx in range(len(sorted_point) - 2, -1, -1):
if sorted_point[idx] + 1 == sorted_point[idx]:
numStraight += 1
else:
numStraight = 1
if numStraight == 5:
pattern = AllCardsPattern["Straight_DiffSuit"]
cards = []
for p in range(idx, idx + 5):
point = sorted_point[p]
cards.append(pointrank2cards[point][0])
return (pattern,cards)
##3_1_1
if len(num2point[3]) == 1:
pattern = AllCardsPattern["3_1_1"]
p3 = num2point[3][0]
cards = pointrank2cards[p3][0:3]
num = 0
for i in range(len(sorted_point) - 1, -1, -1):
p = sorted_point[i]
if p != p3:
cards.append(pointrank2cards[p][0])
num += 1
if num == 2: break
return (pattern,cards)
##2_2_1
if len(num2point[2]) >= 2:
pattern = AllCardsPattern["2_2_1"]
p21 = num2point[2][len(num2point[2]) - 1]
cards = []
for c in pointrank2cards[p21]:
cards.append(c)
p22 = num2point[2][len(num2point[2]) - 2]
for c in pointrank2cards[p22]:
cards.append(c)
flag = False
for i in range(len(sorted_point) - 1, -1, -1):
p = sorted_point[i]
if p != p21 and p != p22:
c = pointrank2cards[p][0]
cards.append(c)
flag = True
if flag == True: break;
return (pattern,cards)
##2_1_1_1
if len(num2point[2]) == 1:
pattern = AllCardsPattern["2_1_1_1"]
p2 = num2point[2][0]
cards = pointrank2cards[p2][0:2]
num = 0
for p in range(len(sorted_point) - 1, -1, -1):
p1 = sorted_point[p]
if p1 != p2:
cards.append(pointrank2cards[p1][0])
if num == 3: break
return (pattern,cards)
##1_1_1_1_1
pattern = AllCardsPattern["1_1_1_1_1"]
count = 0
cards = []
for i in range(len(sorted_point) - 1, -1, -1):
p = sorted_point[i]
for c in pointrank2cards[p]:
cards.append(c)
count += 1
if count == 5: break
if count == 5: break
return (pattern,cards)
@classmethod
def __compare_handcards__(cls, hand_card0, hand_card1, keep_cards):
pattern0 = TexasHoldemEnv.__cards2pattern_cards__(hand_card0, keep_cards)
pattern1 = TexasHoldemEnv.__cards2pattern_cards__(hand_card1, keep_cards)
diff = cls.__compare_patterns_cards__(pattern0, pattern1)
return diff
@classmethod
def compute_rank_pattern_cards(cls, pattern_cards):
rank = pattern_cards[0][5] * 1000
for i in range(5):
rank *= 1000
rank += pattern_cards[1][i].point_rank
return rank
@classmethod
def __compare_patterns_cards__(cls, p1, p2):
return cls.compute_rank_pattern_cards(p1) - cls.compute_rank_pattern_cards(p2)
@classmethod
def available_actions(cls, public_state, person_state):
'''
Generate all valid actions given the public state and the person state
:param public_state:
:param person_state:
:return: all valid actions
'''
pu = public_state
pe = person_state
turn = pu.turn
key_actions = dict()
if pu.turn != pe.id:
return dict()
if pu.is_allin[turn] == True or pu.is_fold[turn] == True:
return dict()
if pu.chips[turn] == 0:
return dict()
## for fold
action = TexasHoldemAction.lookup(TexasHoldemAction.Fold + "_0")
#if cls.is_action_valid(action,public_state, person_state):
key_actions[action.key] = action
## for check
if pu.bets[turn] == pu.max_bet_sofar:
action = TexasHoldemAction.lookup(TexasHoldemAction.Check + "_0")
#if cls.is_action_valid(action, public_state, person_state):
key_actions[action.key] = action
## for call
if pu.bets[turn] != pu.max_bet_sofar and pu.chips[turn] > pu.max_bet_sofar - pu.bets[turn]:
action = TexasHoldemAction.lookup(TexasHoldemAction.Call + "_%d" % (pu.max_bet_sofar - pu.bets[turn]))
#if cls.is_action_valid(action, public_state, person_state):
key_actions[action.key] = action
## for raise
#if pu.bets[turn] != pu.max_bet_sofar and \
if pu.chips[turn] > pu.max_bet_sofar - pu.bets[turn] + pu.raise_account:
num = int((pu.chips[turn] - (pu.max_bet_sofar - pu.bets[turn])) / pu.raise_account)
for i in range(1, num + 1):
price = pu.max_bet_sofar - pu.bets[turn] + pu.raise_account * i
if price == pu.chips[pu.turn]: continue
action = TexasHoldemAction.lookup(TexasHoldemAction.Raise + "_%d" % (price))
#if cls.is_action_valid(action, public_state, person_state):
key_actions[action.key] = action
## for all in
action = TexasHoldemAction.lookup(TexasHoldemAction.AllIn + "_%d" % (pu.chips[turn]))
#if cls.is_action_valid(action, public_state, person_state):
key_actions[action.key] = action
return key_actions
@classmethod
def is_action_valid(cls, action, public_state, person_state):
"""
Args:
action:
public_state:
person_state:
Returns:
"""
'''
pu = public_state
if (not isinstance(public_state, TexasHoldemPublicState)) or (not isinstance(action, TexasHoldemAction)):
return False
if pu.is_allin[pu.turn] == True or pu.is_fold[pu.turn] == True:
return False
if pu.chips[pu.turn] == 0:
return False
if action.option == TexasHoldemAction.Fold:
return True
elif action.option == TexasHoldemAction.Check:
if pu.bets[pu.turn] == pu.max_bet_sofar:
return True
else:
return False
elif action.option == TexasHoldemAction.Call:
if action.price == pu.max_bet_sofar - pu.bets[pu.turn]:
return True
else:
return False
elif action.option == TexasHoldemAction.Raise:
raise_account = action.price - (pu.max_bet_sofar - pu.bets[pu.turn])
if raise_account == 0: return False
if raise_account % pu.raise_account == 0:
return True
else:
return False
elif action.option == TexasHoldemAction.AllIn:
if action.price == pu.chips[pu.turn]:
return True
else:
return False
else:
raise Exception("Invalid action.option" + action.option)
'''
return action.key in person_state.available_actions
|
[
"jichao6758@sina.com"
] |
jichao6758@sina.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.