blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b58b3abc2ae55fc7d7048be2b222fbd402839e47
|
1891968ff5d4157668c64f7c9414984f80d01d68
|
/orders/migrations/0007_auto_20180129_1601.py
|
5c39435a9b66fca6634c20d508deb1666ad54454
|
[] |
no_license
|
kaiforward/ecommerce
|
8bf06f22b4805ab73cc193b082f702741c752b5f
|
d2e791de9b731fe7599e1c2f2e9331790029bfb1
|
refs/heads/master
| 2022-12-13T12:25:53.474040
| 2018-02-01T01:54:35
| 2018-02-01T01:54:35
| 117,998,084
| 0
| 0
| null | 2022-12-08T01:00:41
| 2018-01-18T15:06:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,594
|
py
|
# Generated by Django 2.0 on 2018-01-29 16:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('orders', '0006_auto_20180125_1520'),
]
operations = [
migrations.CreateModel(
name='Shipping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='ShippingChoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=200)),
('price', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('shipping', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='shipping_items', to='orders.Shipping')),
],
),
migrations.AddField(
model_name='customerorder',
name='shipping_type',
field=models.CharField(default='', max_length=100),
),
migrations.AddField(
model_name='customerorder',
name='stripe_id',
field=models.CharField(default='', max_length=100),
),
migrations.AlterField(
model_name='customerorder',
name='country',
field=models.CharField(max_length=200),
),
]
|
[
"kaiforward123@gmail.com"
] |
kaiforward123@gmail.com
|
356944b5eaa4a2018d0858ca1c35ac0c7787559f
|
c9b0dbd8055e914d1e3c7aba1bab81ab45282e3e
|
/onnxruntime/test/python/quantization/test_op_transpose.py
|
f1dd8a780e3b55511b2e0a28412ae63a2dbf3d28
|
[
"MIT"
] |
permissive
|
mosdav/onnxruntime
|
bcabacbb7cfbecf9e2d504e1d3eecff515eb6880
|
03276527b3b77db97f8a73fbce449ca0e0bcee50
|
refs/heads/master
| 2023-09-05T21:06:20.652278
| 2021-10-09T21:39:11
| 2021-10-09T21:39:11
| 275,559,914
| 1
| 0
|
MIT
| 2020-06-28T10:21:41
| 2020-06-28T10:21:40
| null |
UTF-8
|
Python
| false
| false
| 4,197
|
py
|
#!/usr/bin/env python
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import unittest
import onnx
import numpy as np
from onnx import helper, TensorProto
from onnxruntime.quantization import quantize_static, QuantFormat
from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_op_nodes
class TestOpTranspose(unittest.TestCase):
def input_feeds(self, n, name2shape):
input_data_list = []
for i in range(n):
inputs = {}
for name, shape in name2shape.items():
inputs.update({name: np.random.randint(-1, 2, shape).astype(np.float32)})
input_data_list.extend([inputs])
dr = TestDataFeeds(input_data_list)
return dr
def construct_model_matmul_transpose(self, output_model_path, input_shape, weight_shape, output_shape):
# (input)
# |
# MatMul
# |
# Transpose
# |
# (output)
input_name = 'input'
output_name = 'output'
initializers = []
# make MatMul node
weight_name = 'matmul_weight'
matmul_output_name = 'matmul_output'
matmul_inputs = [input_name, weight_name]
matmul_outputs = [matmul_output_name]
matmul_name = 'matmul_node'
matmul_weight_data = np.random.normal(0, 0.1, weight_shape).astype(np.float32)
initializers.append(onnx.numpy_helper.from_array(matmul_weight_data, name=weight_name))
matmul_node = onnx.helper.make_node('MatMul', matmul_inputs, matmul_outputs, name=matmul_name)
# make Transpose node
kwargs = {'perm': (1, 0)}
transpose_node = onnx.helper.make_node('Transpose', [matmul_output_name], [output_name], name="transpose_node", **kwargs)
# make graph
input_tensor = helper.make_tensor_value_info(input_name, TensorProto.FLOAT, input_shape)
output_tensor = helper.make_tensor_value_info(output_name, TensorProto.FLOAT, output_shape)
graph_name = 'Transpose_Quant_Test'
graph = helper.make_graph([matmul_node, transpose_node], graph_name,
[input_tensor], [output_tensor], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 11)])
model.ir_version = 7 # use stable onnx ir version
onnx.save(model, output_model_path)
def test_quantize_transpose(self):
np.random.seed(1)
model_fp32_path = 'transpose_fp32.onnx'
model_uint8_path = 'transpose_uint8.onnx'
model_uint8_qdq_path = 'transpose_uint8_qdq.onnx'
self.construct_model_matmul_transpose(model_fp32_path, [3, 7], [7, 5], [5, 3])
# Verify QOperator model
data_reader = self.input_feeds(1, {'input': [3, 7]})
quantize_static(model_fp32_path, model_uint8_path, data_reader)
# make sure transpose become xint8 operator, its input name could tell that
check_op_nodes(self, model_uint8_path, lambda node: (node.name != "transpose_node" or node.input[0] != 'matmul_output'))
qnode_counts = {'QLinearMatMul': 1, 'QuantizeLinear': 1, 'DequantizeLinear': 1, 'Transpose': 1}
check_op_type_count(self, model_uint8_path, **qnode_counts)
data_reader.rewind()
check_model_correctness(self, model_fp32_path, model_uint8_path, data_reader.get_next())
# Verify QDQ model
data_reader.rewind()
quantize_static(model_fp32_path, model_uint8_qdq_path, data_reader, quant_format=QuantFormat.QDQ)
qdqnode_counts = {'MatMul': 1, 'QuantizeLinear': 3, 'DequantizeLinear': 4, 'Transpose': 1}
check_op_type_count(self, model_uint8_qdq_path, **qdqnode_counts)
data_reader.rewind()
check_model_correctness(self, model_fp32_path, model_uint8_qdq_path, data_reader.get_next())
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
mosdav.noreply@github.com
|
3c8ae4cf2c045f3853a27ccc14d51a31e1c7f5b7
|
c03ce95669f17a648eb31cd2446e32357f339c94
|
/plotting/profiles/T_evolution/Tall_evolution/derived_field_functions.py
|
48b1c2d93116ecbc2d781f8d6883c531809bd320
|
[
"MIT"
] |
permissive
|
cavestruz/L500analysis
|
1d29e3787ae81d6b2a2760dba15c59bc5dbcbb4c
|
9af195f2426f9260145e3d55edc952eb06eccb78
|
refs/heads/master
| 2020-04-17T01:17:49.906420
| 2017-06-15T19:30:44
| 2017-06-15T19:30:44
| 52,736,854
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
from L500analysis.plotting.profiles.T_evolution.Tmw_evolution.derived_field_functions \
import *
from L500analysis.plotting.profiles.T_evolution.Tnt_evolution.derived_field_functions \
import *
from L500analysis.plotting.profiles.T_evolution.Ttot_evolution.derived_field_functions \
import *
from L500analysis.derived_fields.collections.peak_height.derived_field_functions \
import *
|
[
"Camille Avestruz"
] |
Camille Avestruz
|
4ef14a92c3f3e2a32d6da14d21387d800d7e2af7
|
12f47e6cfc8bd2314409d4e443d25d2573837c40
|
/fruitod/plot_scripts/dataset_plots.py
|
139ff32fe95fb3d1a495e80775b6bf0de2565a24
|
[] |
no_license
|
nilskk/fruit_tf_od_api
|
3933ce848d45c2e478bfe0b4d7e99e0648733229
|
c89b331dda6dafa4ce851ae45515d07e9298ec5d
|
refs/heads/master
| 2023-08-04T18:11:33.509010
| 2021-09-22T05:33:58
| 2021-09-22T05:33:58
| 309,675,250
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,665
|
py
|
from lxml import etree as ET
from pathlib import Path
import json
import os
import numpy as np
import seaborn as sns
import pandas as pd
from lxml import etree
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from object_detection.utils import dataset_util
if __name__ == '__main__':
data_dir_string = '/home/nilskk/rewe_project/data/penny'
# data_dir_string = '/data/voc_fruit_weights'
data_directory = Path(data_dir_string)
input_directory = Path(os.path.join(data_dir_string, 'dataset_information'))
output_directory = Path(os.path.join(data_dir_string, 'dataset_plots'))
output_directory.mkdir(exist_ok=True, parents=True)
file_dataframe = pd.read_pickle(os.path.join(input_directory, 'file_dataframe.pkl'))
object_dataframe = pd.read_pickle(os.path.join(input_directory, 'object_dataframe.pkl'))
sns.set_theme()
# Anzahl Objekte pro Klasse Train/Test
plt.figure(figsize=(8,8))
ax = sns.boxenplot(data=file_dataframe, x='class', y='objects', hue='set')
sns.move_legend(ax, loc='upper left', bbox_to_anchor=(1, 1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1.0))
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
plt.xticks(rotation=40, ha="right")
# plt.legend(bbox_to_anchor=(1.01, 1), borderaxespad=0)
plt.xlabel('Klasse')
plt.ylabel('Anzahl der Objekte pro Bild')
plt.savefig(os.path.join(output_directory, 'objects_per_class.png'), bbox_inches='tight')
# Anzahl Bilder pro Klasse Train/Test
plt.figure(figsize=(8,8))
ax = sns.countplot(data=file_dataframe, x='class', hue='set')
sns.move_legend(ax, loc='upper left', bbox_to_anchor=(1, 1))
plt.xticks(rotation=40, ha="right")
# plt.legend(bbox_to_anchor=(1.01, 1), borderaxespad=0)
plt.xlabel('Klasse')
plt.ylabel('Anzahl der Bilder')
for p in ax.patches:
ax.annotate(str(p.get_height()), (p.get_x() * 1.005, p.get_height() * 1.005))
plt.savefig(os.path.join(output_directory, 'images_per_class.png'), bbox_inches='tight')
# Anzahl Größe der Boxen
plt.figure(figsize=(8,8))
markers = {"medium": "s", "large": "X"}
ax = sns.relplot(data=object_dataframe, x='width', y='height', col='class', col_wrap=3, hue='size')
# sns.move_legend(ax, loc='upper left', bbox_to_anchor=(1, 1))
ax.set_xlabels('Boxweite in Pixel', fontsize=15) # not set_label
ax.set_ylabels('Boxhöhe in Pixel', fontsize=15)
plt.legend(bbox_to_anchor=(1.01, 1), borderaxespad=0)
# plt.xlabel('Boxweite in Pixel')
# plt.ylabel('Boxhöhe in Pixel')
plt.savefig(os.path.join(output_directory, 'box_size.png'), bbox_inches='tight')
# Fläche der Boxen
plt.figure(figsize=(9,8))
ax = sns.boxenplot(data=object_dataframe, x='class', y='area')
# sns.move_legend(ax, loc='upper left', bbox_to_anchor=(1, 1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(5000))
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
ax.axhline(96**2, ls='--', c='blue')
ax.axhline(32**2, ls='--', c='orange')
plt.xticks(rotation=40, ha="right")
# plt.legend(bbox_to_anchor=(1.01, 1), borderaxespad=0)
plt.xlabel('Klasse')
plt.ylabel('Fläche in Pixelanzahl')
plt.savefig(os.path.join(output_directory, 'box_area.png'), bbox_inches='tight')
# Seitenverhältnisse der Boxen
plt.figure(figsize=(9,8))
ax = sns.boxenplot(data=object_dataframe, x='class', y='ratio')
ax.set_yscale('log')
# sns.move_legend(ax, loc='upper left', bbox_to_anchor=(1, 1))
locs = [0.3, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2, 2.5, 3, 3.5, 4, 4.5, 5]
ax.yaxis.set_major_locator(ticker.FixedLocator(locs))
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
plt.xticks(rotation=40, ha="right")
# plt.legend(bbox_to_anchor=(1.01, 1), borderaxespad=0)
plt.xlabel('Klasse')
plt.ylabel('Seitenverhältnis Weite / Höhe')
plt.savefig(os.path.join(output_directory, 'box_ratio.png'), bbox_inches='tight')
# Gewicht in Abhängigkeit von Anzahl der Objekte
plt.figure(figsize=(8,8))
g = sns.catplot(data=file_dataframe, y='weight', x='objects', hue='objects', col='class', col_wrap=4, jitter=0.4, alpha=0.8)
for ax in g.axes.flat:
ax.yaxis.set_major_locator(ticker.MultipleLocator(500))
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
# sns.move_legend(ax, loc='upper left', bbox_to_anchor=(1, 1))
g.set_ylabels('Gewicht in Gramm', fontsize=15) # not set_label
g.set_xlabels('Anzahl der Objekte', fontsize=15)
plt.ylabel('Gewicht in Gramm')
plt.xlabel('Anzahl der Objekte')
plt.savefig(os.path.join(output_directory, 'weight_number_of_objects.png'), bbox_inches='tight')
# Gewicht pro Objekt
plt.figure(figsize=(8,8))
ax = sns.boxenplot(data=object_dataframe, x='class', y='weight')
ax.yaxis.set_major_locator(ticker.MultipleLocator(200))
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
plt.xlabel('Klasse')
plt.ylabel('Gesamtgewicht / Anzahl der Objekte')
plt.xticks(rotation=40, ha="right")
plt.savefig(os.path.join(output_directory, 'weight_pro_object.png'), bbox_inches='tight')
# Gewicht pro Objekt
plt.figure(figsize=(16,8))
ax = sns.boxenplot(data=object_dataframe, x='class', y='weight', showfliers=False)
ax.yaxis.set_major_locator(ticker.MultipleLocator(25))
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
plt.xlabel('Klasse')
plt.ylabel('Gesamtgewicht / Anzahl der Objekte')
plt.xticks(rotation=40, ha="right")
plt.savefig(os.path.join(output_directory, 'weight_pro_object_wo_outlier.png'), bbox_inches='tight')
|
[
"nils.kleine.klausing@uni-duesseldorf.de"
] |
nils.kleine.klausing@uni-duesseldorf.de
|
973563f47c93e5795c8f564be07c9269e4e24c26
|
f9062ccd797042b4df5bc75a60127c639067cb8c
|
/archived_old_solutions/solutions in python/p40.py
|
b42fee26a8c45251020e0b94dbb5eaa828f39d27
|
[] |
no_license
|
azusa0127/ProjectEuler
|
c88aa1fde9be67fad20826364f5a2415cc34ab72
|
3c3b1d8b7e5a62aa624738976e3a00c39f959076
|
refs/heads/master
| 2021-01-18T23:40:41.974375
| 2016-05-12T12:32:53
| 2016-05-12T12:32:53
| 51,574,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
#coding: utf-8
'''
Problem 40: Champernowne's constantAn irrational decimal fraction is created by concatenating the positive integers:0.123456789101112131415161718192021...It can be seen that the 12th digit of the fractional part is 1.If dn represents the nth digit of the fractional part, find the value of the followingexpression.
d1 × d10 × d100 × d1000 × d10000 × d100000 × d1000000
'''
import time
def doProblem():
soln = 1
df = 1
count = 0
currentIndex = 0
i = 0
while count < 7:
while currentIndex < df:
i += 1
currentIndex += len(str(i))
soln *= int(str(i)[-(currentIndex+1-df)])
df *= 10
count += 1
return soln
print('start')
start = time.time()
print(doProblem())
print('end')
print(time.time()-start)
|
[
"phoenixsong@outlook.com"
] |
phoenixsong@outlook.com
|
11792d5ee8f35e482aeb9aa3dfe58b8d761598d7
|
139ef8139237792f85b0b91c152c0ec34386a723
|
/model/document.py
|
eeb6d84eb75af8270b19a80ef1e3397dfefecc60
|
[] |
no_license
|
aj-fuentes/BrainApp
|
ab790451c80872e67acf54788da3698e1fa58770
|
753615f84b326b72010f8b9aab36650b6b939d1c
|
refs/heads/master
| 2020-04-25T21:57:09.862117
| 2019-02-28T11:05:59
| 2019-02-28T11:05:59
| 173,096,087
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
Python
| false
| false
| 12,195
|
py
|
#! -*- coding: cp1252 -*-
import itk
from model.confidence_connected_pipe import ConfidenceConnectedPipe
from model.voting_pipe import VotingPipe
from model.cast_pipe import CastPipe
from model.segment_pipe import SegmentPipe
from model.smooth_pipe import SmoothPipe
__author__ = 'Alvaro Javier'
##Esta clase modela la información necesaria para la segmentación de una imagen
#
# Esta clase existe para no guardar la información directamente en la clas Application y así
# brindar la posibilidad de tener múltiples documentos en la misma aplicación cada una con
# su propia imagen actual en un ambiente MDI. Por ahora la aplicación está restringida a SDI
# con una sola instancia de esta clase como docmento activo.
# En esta clase se almacenan las imágenes a procesar y la información del procesamiento (filtros
# a utilizar, parámetros, etc.)
#
# @yo
#
class Document(object):
##Inicializador
# @selfdoc
def __init__(self):
##@var _images
# Diccionario con las imágenes de este documento, la llave es el id de la imagen
self._images = {}
##@var _current_image_id
# Id de la imagen que se está usando en el procesamiento
self._current_image_id = ""
##@var _processed_image
# Imagen procesada
self._processed_image = None
##@var _up_cast_pipe
# referencia a la tubería para convertir a tipo de pixel a itk.F (float)
self._up_cast_pipe = None
##@var _smooth_pipe
# referencia a la tubería para suavizar la imagen
self._smooth_pipe = None
##@var _segment_pipe
# referencia a la tubería para segmentar la imagen
self._segment_pipe = None
##@var _confidence_connected_pipe
# referencia a la tubería para la segmentación estadística de la imagen
self._confidence_connected_pipe = None
##@var _down_cast_pipe
# referencia a la tubería para convertir a tipo de pixel itk.UC (unsigend char)
self._down_cast_pipe = None
##@var _voting_pipe
# referencia a la tubería para modificar la segmetación dada una votación basad en los píxeles
self._voting_pipe = None
##Este diccionario indica cuáles tuberías de la aplicación se usarán o no en el procesamiento
# de la imagen
self._used_pipes = dict(
_up_cast_pipe = True, #indica si la tubería para convertir la imagen se está usando
_smooth_pipe = True, #indica si la tubería para quitar el ruido se está usando
_segment_pipe = True, #indica si la tubería para segmentar la imagen se está usando
_confidence_connected_pipe = True, #indica si la segmentación estadística de la imagen se está usando
_down_cast_pipe = True, #indica si la tubería para convertir la imagen se está usando
_voting_pipe = True #indica si la tubería para votar se está usando
)
##Tubería de procesamiento, con todos los filtros seleccionados ya interconectados
self._pipeline = []
## Devuelve si el filtro segmentación estadística se está usando
# @selfdoc
# @return True si se está usando la votación, si no False
def usingConfidenceConnected(self):
return self._used_pipes["_confidence_connected_pipe"]
##Cambia el uso del filtro segmentación estadística
# @selfdoc
# @param value True si se va a usar, False si no
def useConfidenceConnected(self,value):
update = value != self._used_pipes["_confidence_connected_pipe"]
self._used_pipes["_confidence_connected_pipe"] = value
if update:
self._updatePipes()
## Devuelve si el filtro de votación se está usando
# @selfdoc
# @return True si se está usando la votación, si no False
def usingVoter(self):
return self._used_pipes["_voting_pipe"]
##Cambia el uso del filtro de votación
# @selfdoc
# @param value True si se va a usar, False si no
def useVoter(self,value):
update = value != self._used_pipes["_voting_pipe"]
self._used_pipes["_voting_pipe"] = value
if update:
self._updatePipes()
## Devuelve si el filtro suavizador se está usando
# @selfdoc
# @return True si se está usando el suavizador, si no False
def usingSmoother(self):
return self._used_pipes["_smooth_pipe"]
##Cambia el uso del filtro suavizador
# @selfdoc
# @param value True si se va a usar, False si no
def useSmoother(self,value):
update = value != self._used_pipes["_smooth_pipe"]
self._used_pipes["_smooth_pipe"] = value
if update:
self._updatePipes()
## Devuelve si el filtro segmentador se está usando
# @selfdoc
# @return True si se está usando el filtro segementador, si no False
def usingSegmentor(self):
return self._used_pipes["_segment_pipe"]
##Cambia el uso del filtro segmentador
# @selfdoc
# @param value True si se va a usar, False si no
def useSegmentor(self,value):
update = value != self._used_pipes["_segment_pipe"]
self._used_pipes["_segment_pipe"] = value
if update:
self._updatePipes()
##Devuelve las imágenes en este documento (un diccionario indexado por los id de las imágenes)
# @selfdoc
# @return el diccionario con las imágenes en este docuemnto (e la aplicación en general, por ahora)
@property
def Images(self):
return self._images
##Devuelve la tubería de procesamiento
# @selfdoc
# @return la tubería de procesamiento con los filtros que se van a usar interconectados
@property
def Pipes(self):
return self._pipeline
##Devuelve la imagen actual
# @selfdoc
# @return la imagen actual que se usará en el procesamiento
@property
def CurrentImage(self):
img = None
if self._current_image_id in self._images.keys():
img = self._images[self._current_image_id]
return img
##Devuelve el id de la imagen actual
# @selfdoc
# @return el id de la imagen actual que se usará en el procesamiento
@property
def CurrentId(self):
return self._current_image_id
##Cambia el id de la imagen actual (y consecuentemente la imagen actual)
# @selfdoc
# @param id el id de la imagen que se usará en el procesamiento
@CurrentId.setter
def CurrentId(self,id):
self._current_image_id = id
self._updatePipes() #actualizar las tuberías que se van a usar
#conectar la entrada de la tubería
self._pipeline[0].setInput(self.CurrentImage)
##Devuelve la imagen ya procesada
# @selfdoc
# @return la imagen que se obtiene como resultado del procesameinto de la imagen actual, None en caso de que no se
# haya procesado la imagen
@property
def ProcessedImage(self):
return self._processed_image
##Actualiza la conexión de filtros en la tubería
#
# Actualiza las conexiones de los filtros en la tuberías de procesameinto para que cuando se
# cambia el estado de uso de un filtro (si se usa en el procesamiento o no)
# @selfdoc
def _updateFixedPipes(self):
self._processed_image = None #la imagen procesada anterior ya no es válida
img = self.CurrentImage #imagen a procesar
#verificar que haya una imagen seleccionada
if img is None: return
#verificar si ya está creado o si hace falta volver a crearlo porque cambió la dimensión de la imagen
if self._smooth_pipe is None or self._smooth_pipe.Dimension != img.Dimension:
self._smooth_pipe = SmoothPipe(itk.F,img.Dimension,itk.F)
#conectar la tubería que quita el ruido
if self._used_pipes["_smooth_pipe"]:
self._smooth_pipe.connect(self._pipeline[-1])
self._pipeline.append(self._smooth_pipe)
if self._segment_pipe is None or self._segment_pipe.Dimension != img.Dimension:
self._segment_pipe = SegmentPipe(itk.F,img.Dimension,itk.F)
#conectar la tubería que segmenta
if self._used_pipes["_segment_pipe"]:
#verificar si ya está creado o si hace falta volver a crearlo porque cambió la dimensión de la imagen
self._segment_pipe.connect(self._pipeline[-1])
self._pipeline.append(self._segment_pipe)
#verificar si ya está creado o si hace falta volver a crearlo porque cambió la dimensión de la imagen
if self._confidence_connected_pipe is None or\
self._confidence_connected_pipe.Dimension != img.Dimension:
self._confidence_connected_pipe = ConfidenceConnectedPipe(itk.F,img.Dimension,itk.F)
#conectar la tubería de segmentación estadística
if self._used_pipes["_confidence_connected_pipe"]:
self._confidence_connected_pipe.connect(self._pipeline[-1])
self._pipeline.append(self._confidence_connected_pipe)
#verificar si ya está creado o si hace falta volver a crearlo porque cambió la dimensión de la imagen
if self._voting_pipe is None or self._voting_pipe.Dimension != img.Dimension:
self._voting_pipe = VotingPipe(itk.F,img.Dimension,itk.F)
#conectar la tubería encargada de la votación
if self._used_pipes["_voting_pipe"]:
self._voting_pipe.connect(self._pipeline[-1])
self._pipeline.append(self._voting_pipe)
#conectar la tubería encargada de convertir la imagen para tipo de pixel unsigned char
self._down_cast_pipe.connect(self._pipeline[-1])
self._pipeline.append(self._down_cast_pipe)
##Actualizar la tubería de entrada y de salida del procesamiento
# @selfdoc
def _updateInOutPipes(self):
img = self.CurrentImage
if img is None: return #si no hay imagen no hacer nada
#verificar si el filtro de entrada no se ha creado todavía
if self._up_cast_pipe is None:
self._up_cast_pipe = CastPipe(img.PixelType,img.Dimension,itk.F)
#verificar que el tipo de la imagen es compatible con la entrada de la tubería
elif img.ImageType != self._up_cast_pipe.InputImageType:
self._up_cast_pipe = CastPipe(img.PixelType,img.Dimension,itk.F)
#verificar si el filtro de entrada no se ha creado todavía
if self._down_cast_pipe is None:
self._down_cast_pipe = CastPipe(itk.F,img.Dimension,itk.UC)
#verificar que la dimansión de la imagen es compatible con la entrada de la tubería
elif img.Dimension != self._down_cast_pipe.Dimension:
self._down_cast_pipe = CastPipe(itk.F,img.Dimension,itk.UC)
self._pipeline.append(self._up_cast_pipe) #agregar a la tubería (en el principio)
##Limpia la tubería eliminando todos los filtros en ella
# @selfdoc
def clean_pipes(self):
self._confidence_connected_pipe = None
self._down_cast_pipe = None
self._up_cast_pipe = None
self._voting_pipe = None
self._segment_pipe = None
self._smooth_pipe = None
##Actualiza las tuberías del procesamiento y las conexiones entre ellas
# @param clean vuelve a crear la tuberías desde cero, destruye las anterires
# @selfdoc
def _updatePipes(self):
self._pipeline = [] #la tubería se crea desde cero!!!!
self._updateInOutPipes() #actualizar el filtro de entrada
self._updateFixedPipes() #actualizar los filtros fijos de la tubería
img = self.CurrentImage #imagen actual
if img is not None: #si hay una imagen actual
self._pipeline[0].setInput(img) #conectarla a la tubería de procesamiento
##Procesa la imagen actual con la tubería de filtros
# @selfdoc
def processImage(self):
for pipe in self._pipeline:
pipe.update()
self._processed_image = self._pipeline[-1].getOutput()
|
[
"alvaro.fuentes-suarez@inria.fr"
] |
alvaro.fuentes-suarez@inria.fr
|
0366444ae9eece2d51f67648df2afa3d8c684745
|
2b7c39b9a841d8fb67129c5dd62c0a7bba530224
|
/desktopRemoteControl/device/manager.py
|
c294d06e95d1379d0708b80da92f86213df73eb9
|
[
"ISC"
] |
permissive
|
dpm76/eaglebone
|
6be44f39a6a528e716675a90dde252da1d49a87c
|
46403d03359a780f385ccb1f05b462869eddff89
|
refs/heads/master
| 2020-04-12T02:27:08.450212
| 2018-11-22T19:11:57
| 2018-11-22T19:11:57
| 52,774,836
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,533
|
py
|
'''
Created on 22 de may. de 2016
@author: david
'''
from contextlib import contextmanager
import os
try:
import pygame
_pygameLoaded = True
except:
_pygameLoaded = False
import sys
from threading import Thread
from time import sleep
from device.event import EventHook
from device.joystick import Joystick
class JoystickManager(object):
'''
Implements the device update loop.
'''
_instance = None
@staticmethod
def getInstance():
if JoystickManager._instance == None:
JoystickManager._instance = JoystickManager()
return JoystickManager._instance
@contextmanager
def suppressStdout(self):
old_stdout = sys.stdout
with open(os.devnull, "w") as devnull:
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
def __init__(self):
self._isRunning = False
self._pollingThread = None
self._joysticks = []
#Events
self.onStart = EventHook()
self.onStop = EventHook()
def start(self):
if _pygameLoaded and (self._pollingThread == None or not self._pollingThread.isAlive()):
# Initialize the joysticks
pygame.init()
pygame.joystick.init()
joystickCount = pygame.joystick.get_count()
if joystickCount != 0:
for joystickIndex in range(joystickCount):
joystick = pygame.joystick.Joystick(joystickIndex)
joystick.init()
#Get device parameters
name = joystick.get_name()
axes = joystick.get_numaxes()
buttons = joystick.get_numbuttons()
hats = joystick.get_numhats()
self._joysticks += [Joystick(name, joystickIndex, axes, buttons, hats)]
#Init thread
self._isRunning = True
self._pollingThread = Thread(target=self._doPolling)
self._pollingThread.start()
self.onStart.fire(self)
def stop(self):
self._isRunning = False
if self._pollingThread != None and self._pollingThread.isAlive():
self._pollingThread.join()
self.onStop.fire(self)
pygame.quit()
def getJoysticks(self):
return self._joysticks
def _doPolling(self):
while self._isRunning:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
self._isRunning=False
for joystick in self._joysticks:
#Read core data
with self.suppressStdout():
pygameJoystick = pygame.joystick.Joystick(joystick.getIndex())
#Axes
for axisIndex in range(joystick.axesCount()):
with self.suppressStdout():
axisValue = pygameJoystick.get_axis(axisIndex)
if joystick._axes[axisIndex] != axisValue:
joystick._setAxisValue(axisIndex, axisValue)
#Buttons
for buttonIndex in range(joystick.buttonsCount()):
with self.suppressStdout():
buttonValue = pygameJoystick.get_button(buttonIndex)
if joystick._buttons[buttonIndex] != buttonValue:
joystick._setButtonValue(buttonIndex, buttonValue)
#Hats
for hatIndex in range(joystick.hatsCount()):
with self.suppressStdout():
hatValue = pygameJoystick.get_hat(hatIndex)
if joystick._hats[hatIndex] != hatValue:
joystick._setHatValue(hatIndex, hatValue)
sleep(0.1)
|
[
"davidpm.itengineer@gmail.com"
] |
davidpm.itengineer@gmail.com
|
a05737228108409c7e76c33930a38d897451d19d
|
9f8fa729e0e5603bf7434839ab516b04151adb90
|
/MergeSort.py
|
5c73f5d59e6095cf85c836366fb85eb2eff5dbd9
|
[] |
no_license
|
rossmmurray/Algorithm-Implementations
|
64b5e4a6ab8631ea98330c34cd6651e3cebfc516
|
832a72068276a6451ac9b95941241655de7a1c29
|
refs/heads/master
| 2020-04-02T06:40:17.854944
| 2018-10-31T17:37:21
| 2018-10-31T17:37:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
def main():
list_a = [1, 5, 3, 7, 2, 6, 4, 8]
sorted_list = merge_sort(list_a)
# print(list_a[:len(list_a)//2])
print(sorted_list)
def merge_sort(unsorted_list=[]):
if len(unsorted_list) == 1:
print(f'returning one element: {unsorted_list}')
return unsorted_list
else:
midpoint = len(unsorted_list)//2
print(f'Merge sorting Left: {unsorted_list[:midpoint]}')
u = merge_sort(unsorted_list[:midpoint])
print(f'Merge sorting Right: {unsorted_list[midpoint:]}')
v = merge_sort(unsorted_list[midpoint:])
# print(u, v)
return merge(u, v)
def merge(list_a, list_b):
print(f'merging: {list_a} and {list_b}')
sorted_sublist = []
while len(list_a) > 0 or len(list_b) > 0:
if len(list_a) == 0:
sorted_sublist.append(list_b.pop())
elif len(list_b) == 0:
sorted_sublist.append(list_a.pop())
elif list_a[0] < list_b[0]:
sorted_sublist.append(list_a.pop(0))
else:
sorted_sublist.append(list_b.pop(0))
print(f'Merged {sorted_sublist}')
return sorted_sublist
if __name__ == '__main__':
main()
|
[
"rossmichaelm@gmail.com"
] |
rossmichaelm@gmail.com
|
59e44ba1af2617fe935446a0cabcd0aaf090e9a3
|
b210d58b724c7199f43ddde031eba40996c82257
|
/submissions/sm_107_hasmuddin/week_18/day_4/Blog_application/blueprintAuth.py
|
3393f731b0a6a6e9eeb6a28707293268034a9771
|
[] |
no_license
|
Tanmay53/cohort_3
|
0447efd2dc8b3c1284b03da7326b35393fdf5f93
|
351fb6e1d0c29995fb5cb3b6af411dbcf0ced64c
|
refs/heads/master
| 2021-05-23T17:55:36.508185
| 2020-04-05T12:58:07
| 2020-04-05T12:58:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,463
|
py
|
from flask import request
from flask import Blueprint
import os
import hashlib
import json
import csv
import base64
import jwt
auth = Blueprint("auth", __name__)
fieldnames = ["id", "name", "email", "salt", "password_hash"]
path = "Data/users.csv"
def sha1_hash(string):
hashing = hashlib.sha1()
hashing.update(string.encode('utf-8'))
return hashing.hexdigest()
def salt_generate():
salt = os.urandom(16)
return base64.b64encode(salt)
def password_generator(password, salt):
i = 0
pswd = ""
while i < 30:
ps = salt+password
pswd = sha1_hash(ps)
i += 1
return pswd
def csv_reader():
data = []
with open(path) as user_data:
reader = csv.DictReader(user_data, fieldnames=fieldnames)
for line in reader:
data.append(line)
return data
def get_user(email):
data = csv_reader()
for line in data:
if line["email"] == email:
return line
def id_generate():
data = csv_reader()
if len(data) <= 1:
return 1
else:
last_id = data[-1]["id"]
return int(last_id)+1
def check_email(email):
data = csv_reader()
for file in data:
if file["email"] == email:
return True
return False
def user_find(email):
data = csv_reader()
for file in data:
if file["email"]==email:
return {"name":file["name"], "id": file["id"]}
return "not found"
def user_details_find(id):
data = csv_reader()
for file in data:
if file["id"] == id:
return file
return "not found"
def write_csv(**data):
with open(path, "a") as users:
writer = csv.DictWriter(users, fieldnames=fieldnames)
writer.writerow(data)
return json.dumps({"error": "false", "message": "Registration Success"})
@auth.route("/signup", methods=['POST'])
def sign_up():
id = id_generate()
name = request.json["name"]
email = request.json["email"]
password = request.json["password"]
salt = str(salt_generate())
password_hash = password_generator(str(password), salt)
if check_email(email):
return json.dumps({"error": "true", "message": "Email Already Exist"})
else:
message = write_csv(id=id, name=name, email=email, salt=salt, password_hash=password_hash)
return message
@auth.route("/login", methods=["POST"])
def login():
email = request.json["email"]
password = request.json["password"]
if check_email(email):
user = get_user(email)
password_hash = password_generator(str(password), user["salt"])
if password_hash == user["password_hash"]:
user = user_find(email)
encode_data = jwt.encode(user, 'ayaan', algorithm='HS256')
return json.dumps({"error": "false", "message": "login Successfull", "token": str(encode_data, "utf-8")})
else:
return json.dumps({"error":"true", "message":"Incorrect Password"})
else:
return json.dumps({"error": "true", "message": "Invalid login Creadentials"})
@auth.route("/details", methods=['POST'])
def get_data():
auth_header = request.headers.get('Authorization')
token_encoded = auth_header.split(' ')[1]
decode_data = jwt.decode(token_encoded, 'ayaan', algorithms=['HS256'])
id = decode_data['id']
user_data = user_details_find(id)
return json.dumps({"name": user_data["name"], 'email':user_data["email"]})
|
[
"mdaliansari33@gmail.com"
] |
mdaliansari33@gmail.com
|
70cdf6817a27dd3b5244f57b32546ab0cb108d97
|
0cc30330fc06c2b23d322d0dedc3c558b648e380
|
/blog/migrations/0005_post_views.py
|
f642aadf8710b1dd396b3bb2b047ffab195be820
|
[] |
no_license
|
jamu1570/iCoder
|
01f49ec316257e6dc28d3f707f36b109cc065650
|
9bb3c7d1f35465f6d9649deb75211922f149133e
|
refs/heads/master
| 2022-11-28T01:41:23.158484
| 2020-08-03T16:57:24
| 2020-08-03T16:57:24
| 284,758,150
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
# Generated by Django 3.0 on 2020-05-05 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20200421_1625'),
]
operations = [
migrations.AddField(
model_name='post',
name='views',
field=models.IntegerField(default=0),
),
]
|
[
"jayminmakwana157@gmail.com"
] |
jayminmakwana157@gmail.com
|
8b5d14b684dae7b7eac4615d179cbcb649f3b839
|
43108b48303f898e17740d57e61190238c876c56
|
/file_hosting/app.py
|
61ce52bc4fe327866ebd7a4103b6c3f2f302c94a
|
[] |
no_license
|
ivdunin/sample_file_hosting
|
957c8e67f3e0f55383355c1ac0bbff900555388a
|
6b912f0695772c808a26c60a7e1d80f4ddf37f49
|
refs/heads/master
| 2021-06-17T09:25:32.969739
| 2020-09-04T06:13:18
| 2020-09-04T06:13:18
| 171,557,130
| 0
| 0
| null | 2021-03-20T00:36:27
| 2019-02-19T22:02:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
#!/usr/bin/env python
# Copyright (c) 2018 Dunin Ilya.
""" Main Service file """
from os import path, makedirs
from flask import Flask
from flask_restful import Api
from file_hosting.resources.file_hosting import FileHosting
app = Flask(__name__)
app.config.from_object('file_hosting.settings.development')
api = Api(app)
api.add_resource(FileHosting, '/files', '/files/<fid>')
if __name__ == '__main__':
context = None # pylint: disable=invalid-name
if not app.config.get('DEBUG'):
context = (app.config.get('ROOT_CERT'), app.config.get('PRIVATE_KEY')) # pylint: disable=invalid-name
if not path.exists(path.join(app.root_path, app.config.get('UPLOAD_FOLDER'))):
makedirs(path.join(app.root_path, app.config.get('UPLOAD_FOLDER')))
try:
app.run(
host=app.config.get('HOST'),
port=app.config.get('PORT'),
debug=app.config.get('DEBUG'),
ssl_context=context
)
except FileNotFoundError as exc:
app.logger.error('Cannot load certificate: {}'
'\nCheck settings/production.json "ROOT_CERT" and "PRIVATE_KEY" values'.format(exc))
exit(1)
|
[
"ilya.mirea@gmail.com"
] |
ilya.mirea@gmail.com
|
daf354cea52cc0b38c1452a5d1fd3ee9ff365979
|
13b803636ea92b634a9d6afc2b17bd7df2de8fc5
|
/f_codes/PingFS/ping_server.py
|
e7ad13b04d3584305c3843f5b4dc642e080ad2e0
|
[] |
no_license
|
atupal/snippet
|
0e00d9fadf8ae50797ddddc4c6ee09941f25c695
|
bda9e154aa0acd9a5c23706bf740db6542479ed9
|
refs/heads/master
| 2022-06-22T09:39:15.729319
| 2022-06-06T07:06:57
| 2022-06-06T07:06:57
| 10,927,197
| 23
| 13
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,172
|
py
|
import ping, threading, time, socket, select, sys, struct, Queue
import binascii, collections, math, random, logging
import ping_reporter
log = ping_reporter.setup_log('PingServer')
class PingTimer(threading.Thread): # helper class for PingServer to manage timeouts
def __init__(self, event):
self.queue = Queue.PriorityQueue()
threading.Thread.__init__(self)
self.running = False
self.event = event
def stop(self):
log.debug('PingTimeout terminating')
self.running = False
self.event.set()
def run(self):
self.running = True
log.debug('PingTimeout starting')
while self.running:
timeout = None
self.event.clear()
timeout = self.process()
self.event.wait(timeout)
#time.sleep(0.1)
def process(self):
while self.queue.qsize() > 0:
try: expire,event,callback,cb_args = item = self.queue.get_nowait()
except Queue.Empty: break # our qsize check isn't guaranteed to prevent this
if event.is_set(): continue # event was completed; ignore it
if expire > time.time():
self.queue.put(item)
return expire - time.time()
callback(*cb_args)
event.set() # make sure no one executes it
return None
def add_callback(self, timeout, handler, args):
event = threading.Event()
item = (time.time()+timeout,event,handler,args)
self.queue.put(item)
self.event.set()
return event
def mult_callback(self, count, timeout, handler, args):
events = []
timeout += time.time() + 1
for i in range(0,count): events.append(threading.Event())
for i in range(0,count): self.queue.put((timeout,events[i],handler,args[i]))
self.event.set()
return events
class PingServer(threading.Thread):
def __init__(self, d_addr, block_size=1024, initial_timeout=2):
self.block_size = block_size # default; use setup for exact
self.server = d_addr,socket.gethostbyname(d_addr)
self.running_timeout = initial_timeout
threading.Thread.__init__(self)
self.listeners = []
self.debug = 0
# timeout events are queued and executed in a seperate thread
self.timer_event = threading.Event()
self.timer = PingTimer(self.timer_event)
self.blocks = 0
self.running = False
self.socket = ping.build_socket()
self.empty_block = self.null_block()
self.queued_events = collections.defaultdict(collections.deque)
def timeout(self): return 2.0/5.0 # self.running_timeout
def safe_timeout(self): return 3 * self.timeout()
def setup_timeout(self, ID=0):
Time = time.time()
Times = struct.pack('d',Time)
if ID == 0: ID = random.getrandbits(32) # ID size in bits
ping.data_ping(self.socket,self.server[1],ID,Times)
msg = ping.read_ping(self.socket,self.timeout())
if not msg: raise Exception('PingServer::setup_timeout: no valid response from '+self.server[0])
addr,rID,data = msg['address'],msg['ID'],msg['payload']
log.debug("Addr=%s rID=%d Data=%d bytes"%(addr[0],rID,len(data)))
if len(data) == 0: raise Exception('PingServer::setup_timeout: null response from '+self.server[0])
if rID != ID: raise Exception('PingServer::setup_timeout: invalid response id from '+self.server[0])
if data != Times: raise Exception('PingServer::setup_timeout: invalid response data from '+self.server[0])
if addr[0] != self.server[1]: raise Exception('PingServer::setup_timeout: invalid response server from '+self.server[0])
delay = time.time() - Time
log.notice('echo delay: %.02fms'%(1000*delay))
def setup_block(self, ID = 0):
if ID == 0: ID = random.getrandbits(32) # ID size in bits
Fill = chr(random.getrandbits(8)) # repeated data
Filler = self.block_size * Fill
ping.data_ping(self.socket,self.server[1],ID,Filler)
msg = ping.read_ping(self.socket,self.timeout())
if not msg: raise Exception('PingServer::setup_block: no valid response from '+self.server[0])
addr,rID,data = msg['address'],msg['ID'],msg['payload']
log.debug("Addr=%s rID=%d Data=%d bytes"%(addr[0],rID,len(data)))
if len(data) == 0: raise Exception('PingServer::setup_block: null response from '+self.server[0])
if rID != ID: raise Exception('PingServer::setup_block: invalid response id from '+self.server[0])
if data != len(data)*Fill: raise Exception('PingServer::setup_block: invalid response data from '+self.server[0])
if addr[0] != self.server[1]: raise Exception('PingServer::setup_block: invalid response server from '+self.server[0])
self.block_size = len(data)
self.empty_block = self.null_block()
log.notice('echo length: %d bytes'%self.block_size)
def setup(self):
log.trace('PingServer::setup: testing server "%s"'%self.server[0])
ID = random.getrandbits(32)
self.setup_timeout(ID)
self.setup_block(ID)
self.strip_counter = 0
self.strip_interval = 97
def stop(self):
self.running = False
log.info('PingServer terminating')
self.timer.stop()
def run(self):
self.running = True
log.notice('PingServer starting')
self.timer.start()
while self.running:
start_blocks = self.blocks # updated asynchronously
ready = select.select([self.socket], [], [], self.timeout())
if ready[0] == []: # timeout
if start_blocks != 0 and self.blocks != 0:
log.error('%s timed out'%self.server[0])
try:
msg = ping.recv_ping(self.socket,self.timeout())
if not msg: continue
except:
continue
addr,block_id,data = msg['address'],msg['ID'],msg['payload']
if block_id == 0:
import binascii
raise Exception('received packet w/ ID 0 packet: '+binascii.hexlify(msg['raw']))
self.process_block(addr[0],block_id,data)
def process_block(self, addr, ID, data):
if ID == 0: raise Exception('server responded with ID 0 packet')
while len(self.queued_events[ID]):
handler,event,args = self.queued_events[ID].popleft()
if event.is_set(): continue
if handler == self.write_block_timeout:
if self.debug: log.trace('%s (block %d) updated'%(self.server[0],ID))
data = args[1]
elif handler == self.read_block_timeout:
if self.debug: log.trace('%s (block %d) read'%(self.server[0],ID))
callback,cb_args = args[1],args[2]
if len(data) > 0: callback(ID,data,*cb_args)
else: callback(ID,self.null_block(),*cb_args)
elif handler == self.delete_block_timeout:
if self.debug: log.trace('%s (block %d) deleted'%(self.server[0],ID))
data = ''
event.set()
self.strip_counter += 1
if len(data) and not self.strip_counter % self.strip_interval:
data = data.rstrip('\0')
if len(data) == 0:
self.blocks = self.blocks - 1
return
if len(self.listeners): self.process_listeners(addr, ID, data)
#log.trace('%s: sending %d bytes from block %d'%(self.server[0],len(data),ID))
ping.send_ping(self.socket, addr, ID, data)
def process_listeners(self, addr, ID, data):
if not self.listeners: raise Exception('process_listeners invoked without valid listeners on ID=%d'%ID)
self.listeners = [l for l in self.listeners if l[0] >= time.time()] # clean the listeners
for x in self.listeners:
expire,handler,cb_args = x
handler(ID, addr, data, *cb_args)
def add_listener(self, handler, timeout, args):
log.debug('add_listener: timeout=%d handler=%s'%(timeout,handler))
expire = time.time() + timeout
self.listeners.append((expire,handler,args))
def null_block(self, length=None):
if length: return length * '\0'
return self.block_size * '\0'
def event_insert(self, ID, handler, args):
event = self.timer.add_callback(self.timeout(), handler, args)
self.queued_events[ID].append((handler,event,args))
return event
# read / write / delete a single block
def write_block(self, ID, data, blocking = False):
# add a block to the queue (or delete if equivalent)
#log.trace('PingServer::write_block: ID=%d bytes=%d blocking=%s'%(ID,len(data),blocking))
if ID == 0: raise Exception('write_block: invalid block ID (0)')
if data == self.null_block()[:len(data)]: return self.delete_block(ID,blocking)
event = self.event_insert(ID,self.write_block_timeout,[ID,data[:self.block_size]])
if blocking: event.wait()
return event
def write_blocks(self, IDs, datas, blocking = False):
log.trace('PingServer::write_blocks: IDs=%d-%d blocking=%s'%(IDs[0],IDs[-1],blocking))
args = []
handler = self.write_block_timeout
for i in range(0,len(IDs)): args.append([IDs[i],datas[i][:self.block_size]])
events = self.timer.mult_callback(len(args),self.timeout(), handler, args)
for i in range(0,len(IDs)): self.queued_events[IDs[i]].append((handler,events[i],args[i]))
if blocking:
for e in events: e.wait()
return events
def delete_block(self, ID, blocking = False):
log.trace('PingServer::delete_block: ID=%d blocking=%s'%(ID,blocking))
if ID == 0: raise Exception('delete_block: invalid block ID (0)')
t = self.event_insert(ID,self.delete_block_timeout,[ID])
if blocking: t.wait()
return t
def read_block(self, ID, callback, cb_args = [], blocking = False):
log.trace('PingServer::read_block: ID=%d blocking=%s'%(ID,blocking))
if ID == 0: raise Exception('read_block: invalid block ID (0)')
t = self.event_insert(ID,self.read_block_timeout,[ID,callback,cb_args])
if blocking: t.wait()
return t
def read_blocks(self, IDs, callback, cb_args, blocking = False):
log.trace('PingServer::read_blocks: IDs=%d-%d blocking=%s'%(IDs[0],IDs[-1],blocking))
args = []
handler = self.read_block_timeout
for ID in IDs: args.append([ID,callback,cb_args])
events = self.timer.mult_callback(len(args),self.timeout(), handler, args)
for i in range(0,len(IDs)): self.queued_events[IDs[i]].append((handler,events[i],args[i]))
if blocking:
for e in events: e.wait()
return events
def read_block_timeout(self, ID, callback, cb_args):
log.debug('PingServer::read_block_timeout: ID=%d callback=%s'%(ID,callback.__name__))
callback(ID,self.null_block(),*cb_args)
def delete_block_timeout(self, ID):
log.debug('PingServer::delete_block_timeout: ID=%d'%ID)
# do nothing; we're marked invalid anyhow
pass
def write_block_timeout(self, ID, data):
log.trace('PingServer::write_block_timeout: ID=%d bytes=%d'%(ID,len(data)))
self.blocks = self.blocks + 1
# force update queue (as if packet arrived)
if ID == 0: raise Exception('write_block_timeout: ID == 0')
self.process_block(self.server[1], ID, data)
def print_block(ID, data):
print '----- print block -----'
print 'block',ID,'bytes',len(data)
print data
print '----- print block -----'
def __live_blocks(ID, addr, data, datastore):
datastore[ID] = 1
def live_blocks(PServer, timeout=None):
store = {}
if not timeout: timeout = PServer.safe_timeout()
PServer.add_listener(__live_blocks,timeout,[store])
time.sleep(timeout)
return store
def used_blocks(blocks):
result,lookup = {},{}
for x in blocks:
if x-1 in lookup:
lookup[x] = lookup[x-1]
result[lookup[x]] += 1
else:
lookup[x] = x
result[x] = 1
return result
def free_blocks(blocks):
result = {}
if 1 not in blocks:
if not blocks: result[1] = 0
elif len(blocks) == 0: result[1] = 0
else: result[1] = min(blocks.keys())-1
for x in blocks:
if not x+1 in blocks: result[x+1] = 0
if not x-1 in blocks:
if not len(result): continue
block = max(result.keys())
result[block] = x-block
return result
if __name__ == "__main__":
ping_reporter.start_log(log,logging.DEBUG)
server = ping.select_server(log,1)
from ping_reporter import humanize_bytes
try:
PS = PingServer(server,4)
PS.debug = 1
PS.setup()
PS.start()
if 0:
print 'traffic:',ping.ping_count,'pings ('+humanize_bytes(ping.ping_bandwidth)+')'
PS.read_block(2,print_block)
time.sleep(2)
PS.write_block(2,'coconut')
time.sleep(1)
print 'traffic:',ping.ping_count,'pings ('+humanize_bytes(ping.ping_bandwidth)+')'
PS.write_block(1,'apples')
PS.read_block(1,print_block)
PS.read_block(1,print_block)
time.sleep(2)
print 'traffic:',ping.ping_count,'pings ('+humanize_bytes(ping.ping_bandwidth)+')'
log.info('testing block metrics')
blocks = live_blocks(PS)
log.debug('blocks: %s'%blocks)
log.debug('--used: %s'%used_blocks(blocks))
log.debug('--free: %s'%free_blocks(blocks))
PS.delete_block(1)
time.sleep(2)
print 'traffic:',ping.ping_count,'pings ('+humanize_bytes(ping.ping_bandwidth)+')'
PS.write_block(1,'apples')
time.sleep(2)
PS.read_block(1,print_block)
time.sleep(4)
PS.read_block(1,print_block)
time.sleep(1)
PS.write_block(1,'bananas')
time.sleep(1)
PS.read_block(1,print_block)
time.sleep(1)
PS.read_block(1,print_block)
PS.read_block(1,print_block)
time.sleep(1)
PS.delete_block(1)
print 'traffic:',ping.ping_count,'pings ('+humanize_bytes(ping.ping_bandwidth)+')'
while True:
time.sleep(1)
if 1:
a = 2500
time.sleep(2)
log.debug('%s: writing %d blocks'%(time.time(),a))
events = PS.write_blocks(range(1,a+1),['AAAA']*a,False)
log.debug('%s: inserted writes'%(time.time()))
#import ping_reporter
#PR = ping_reporter.PingReporter(log,PS,5)
#PR.start()
def __read_callback(ID, data, data_store):
data_store[ID] = data
def test_block_read(PS,rData,count):
data = {}
blob = ''
log.debug('%s: reading %d blocks'%(time.time(),a))
PS.read_blocks(range(1,a+1), __read_callback, [data], True)
log.debug('%s: completed read'%(time.time()))
for i in range(1,a+1): blob = blob + data[i]
missing = 0
for i in range(1,a+1):
if data[i] == '\0'*len(data[i]):
missing += 1
if blob == rData:
log.trace('block read successfully')
return True
else:
log.error('block read failed: data corrupted')
log.error('%d of %d blocks missing'%(missing,count))
return False
data = 'A'*4*a
for i in range(12):
time.sleep(5)
if not test_block_read(PS,data,a): break
print 'terminate'
except KeyboardInterrupt:
print "Keyboard Interrupt"
except Exception:
print 'General Exception'
from traceback import print_exc
print_exc()
finally:
PS.stop()
print 'traffic:',ping.ping_count,'pings ('+humanize_bytes(ping.ping_bandwidth)+')'
sys.exit(1)
|
[
"me@atupal.org"
] |
me@atupal.org
|
2ca1ed615108497115477b80afd5d2fecac50e40
|
82cc99e8fdbfe91d67f6f491706d252e72e531c2
|
/manage.py
|
314cee692ce6893d679d86908af0c8969992e72a
|
[] |
no_license
|
PeterLiao/FindTrain
|
cf418d10dc0503c57e8299238ff3b985b8a54065
|
4ee3f9683e8b7b7b3e6a6c474ec4b5cb4184c199
|
refs/heads/master
| 2021-01-10T19:58:03.561788
| 2014-07-22T08:32:44
| 2014-07-22T08:32:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "FindTrain.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"s9356016@gmail.com"
] |
s9356016@gmail.com
|
40d1268160ff039fb4eb6c0e18a1719ad3baac13
|
769814957131fec3f519d4b8d7a6ed44201fabcf
|
/video_prediction/prediction_model4.py
|
58f8e1b915035e04cfbed16215b6ac28c96ad9c4
|
[
"Apache-2.0"
] |
permissive
|
wangyang59/tf_models
|
79d8c8e4de449a124bbca358f75c4acf0301a520
|
852339e6ebeab7f7587a36d5a81ddea876c5800f
|
refs/heads/master
| 2022-10-18T09:34:26.539574
| 2017-09-12T18:37:45
| 2017-09-12T18:37:45
| 88,771,570
| 0
| 2
|
Apache-2.0
| 2022-10-11T19:00:49
| 2017-04-19T17:22:30
|
Python
|
UTF-8
|
Python
| false
| false
| 18,482
|
py
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model architecture for predictive model, including CDNA, DNA, and STP."""
"""prediction_model3 + normalized total masks/bg_only"""
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.layers.python import layers as tf_layers
from lstm_ops import basic_conv_lstm_cell
from tensorflow.python.ops import init_ops
# Amount to use when lower bounding tensors
RELU_SHIFT = 1e-12
# kernel size for DNA and CDNA.
DNA_KERN_SIZE = 5
def sample_gumbel(shape, eps=1e-20):
"""Sample from Gumbel(0, 1)"""
U = tf.random_uniform(shape,minval=0,maxval=1)
return -tf.log(-tf.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temperature):
""" Draw a sample from the Gumbel-Softmax distribution"""
y = logits + sample_gumbel(tf.shape(logits))
return tf.nn.softmax( y / temperature)
def gumbel_softmax(logits, temperature, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, temperature)
if hard:
#k = tf.shape(logits)[-1]
#y_hard = tf.cast(tf.one_hot(tf.argmax(y,1),k), y.dtype)
y_hard = tf.cast(tf.equal(y,tf.reduce_max(y,1,keep_dims=True)),y.dtype)
y = tf.stop_gradient(y_hard - y) + y
return y
def construct_model(images,
actions=None,
states=None,
iter_num=-1.0,
k=-1,
use_state=True,
num_masks=10,
stp=False,
cdna=True,
dna=False,
context_frames=2,
global_shift=0):
"""Build convolutional lstm video predictor using STP, CDNA, or DNA.
Args:
images: tensor of ground truth image sequences
actions: tensor of action sequences
states: tensor of ground truth state sequences
iter_num: tensor of the current training iteration (for sched. sampling)
k: constant used for scheduled sampling. -1 to feed in own prediction.
use_state: True to include state and action in prediction
num_masks: the number of different pixel motion predictions (and
the number of masks for each of those predictions)
stp: True to use Spatial Transformer Predictor (STP)
cdna: True to use Convoluational Dynamic Neural Advection (CDNA)
dna: True to use Dynamic Neural Advection (DNA)
context_frames: number of ground truth frames to pass in before
feeding in own predictions
Returns:
gen_images: predicted future image frames
gen_states: predicted future states
Raises:
ValueError: if more than one network option specified or more than 1 mask
specified for DNA model.
"""
batch_size, img_height, img_width, color_channels = images[0].get_shape()[0:4]
lstm_func = basic_conv_lstm_cell
# Generated robot states and images.
gen_states, gen_images = [], []
current_state = states[0]
shifted_masks = []
mask_lists = []
entropy_losses = []
poss_move_masks = []
if k == -1:
feedself = True
else:
# Scheduled sampling:
# Calculate number of ground-truth frames to pass in.
num_ground_truth = tf.to_int32(
tf.round(tf.to_float(batch_size) * (k / (k + tf.exp(iter_num / k)))))
feedself = False
# LSTM state sizes and states.
lstm_size = np.int32(np.array([32, 32, 64, 64, 128, 64, 32]))
lstm_state1, lstm_state2, lstm_state3, lstm_state4 = None, None, None, None
lstm_state5, lstm_state6, lstm_state7 = None, None, None
for image, action in zip(images[:-1], actions[:-1]):
# Reuse variables after the first timestep.
reuse = bool(gen_images)
done_warm_start = len(gen_images) > context_frames - 1
with slim.arg_scope(
[lstm_func, slim.layers.conv2d, slim.layers.fully_connected,
tf_layers.layer_norm, slim.layers.conv2d_transpose],
reuse=reuse):
if k == 0:
prev_image = image
else:
if feedself and done_warm_start:
# Feed in generated image.
prev_image = gen_images[-1]
elif done_warm_start:
# Scheduled sampling
prev_image = scheduled_sample(image, gen_images[-1], batch_size,
num_ground_truth)
else:
# Always feed in ground_truth
prev_image = image
# Predicted state is always fed back in
state_action = tf.concat(axis=1, values=[action, current_state])
enc0 = slim.layers.conv2d(
prev_image,
32, [5, 5],
stride=2,
scope='scale1_conv1',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm1'})
hidden1, lstm_state1 = lstm_func(
enc0, lstm_state1, lstm_size[0], scope='state1')
hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2')
hidden2, lstm_state2 = lstm_func(
hidden1, lstm_state2, lstm_size[1], scope='state2')
hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3')
enc1 = slim.layers.conv2d(
hidden2, hidden2.get_shape()[3], [3, 3], stride=2, scope='conv2')
hidden3, lstm_state3 = lstm_func(
enc1, lstm_state3, lstm_size[2], scope='state3')
hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm4')
hidden4, lstm_state4 = lstm_func(
hidden3, lstm_state4, lstm_size[3], scope='state4')
hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm5')
enc2 = slim.layers.conv2d(
hidden4, hidden4.get_shape()[3], [3, 3], stride=2, scope='conv3')
# Pass in state and action.
smear = tf.reshape(
state_action,
[int(batch_size), 1, 1, int(state_action.get_shape()[1])])
smear = tf.tile(
smear, [1, int(enc2.get_shape()[1]), int(enc2.get_shape()[2]), 1])
if use_state:
enc2 = tf.concat(axis=3, values=[enc2, smear])
enc3 = slim.layers.conv2d(
enc2, hidden4.get_shape()[3], [1, 1], stride=1, scope='conv4')
hidden5, lstm_state5 = lstm_func(
enc3, lstm_state5, lstm_size[4], scope='state5') # last 8x8
hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm6')
# enc4 = slim.layers.conv2d_transpose(
# hidden5, hidden5.get_shape()[3], 4, stride=2, scope='convt1')
#
# hidden6, lstm_state6 = lstm_func(
# enc4, lstm_state6, lstm_size[5], scope='state6') # 16x16
# hidden6 = tf_layers.layer_norm(hidden6, scope='layer_norm7')
# # Skip connection.
# hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) # both 16x16
#
# enc5 = slim.layers.conv2d_transpose(
# hidden6, hidden6.get_shape()[3], 4, stride=2, scope='convt2')
# hidden7, lstm_state7 = lstm_func(
# enc5, lstm_state7, lstm_size[6], scope='state7') # 32x32
# hidden7 = tf_layers.layer_norm(hidden7, scope='layer_norm8')
# Skip connection.
# hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) # both 32x32
# enc6 = slim.layers.conv2d_transpose(
# hidden7,
# hidden7.get_shape()[3], 4, stride=2, scope='convt3',
# normalizer_fn=tf_layers.layer_norm,
# normalizer_params={'scope': 'layer_norm9'})
# Using largest hidden state for predicting a new image layer.
# enc7 = slim.layers.conv2d_transpose(
# enc6, color_channels, 1, stride=1, scope='convt4')
# # This allows the network to also generate one image from scratch,
# # which is useful when regions of the image become unoccluded.
# guessed = tf.nn.sigmoid(enc7)
stp_input0 = tf.reshape(hidden5, [int(batch_size), -1])
stp_input1 = slim.layers.fully_connected(stp_input0, 100, scope='fc_stp')
# masks = slim.layers.conv2d_transpose(
# enc6, num_masks, 1, stride=1, scope='convt7')
# masks_probs = tf.nn.softmax(tf.reshape(masks, [-1, num_masks]))
# entropy_losses.append(tf.reduce_mean(-tf.reduce_sum(masks_probs * tf.log(masks_probs + 1e-10), [1])))
# masks = tf.reshape(
# masks_probs,
# #gumbel_softmax(tf.reshape(masks, [-1, num_masks]), TEMP, hard=False),
# [int(batch_size), int(img_height), int(img_width), num_masks])
# mask_list = tf.split(axis=3, num_or_size_splits=num_masks, value=masks)
#############################
enc0_s = slim.layers.conv2d(
prev_image,
32, [5, 5],
stride=2,
scope='scale1_conv1_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm1_s'})
hidden1_s = slim.layers.conv2d(
enc0_s,
lstm_size[0], [5, 5],
stride=1,
scope='state1_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm2_s'})
hidden2_s = slim.layers.conv2d(
hidden1_s,
lstm_size[1], [5, 5],
stride=1,
scope='state2_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm3_s'})
enc1_s = slim.layers.conv2d(
hidden2_s, hidden2_s.get_shape()[3], [3, 3], stride=2, scope='conv2_s')
hidden3_s = slim.layers.conv2d(
enc1_s,
lstm_size[2], [5, 5],
stride=1,
scope='state3_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm4_s'})
hidden4_s = slim.layers.conv2d(
hidden3_s,
lstm_size[3], [5, 5],
stride=1,
scope='state4_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm5_s'})
enc2_s = slim.layers.conv2d(
hidden4_s, hidden4_s.get_shape()[3], [3, 3], stride=2, scope='conv3_s')
enc3_s = slim.layers.conv2d(
enc2_s, hidden4_s.get_shape()[3], [1, 1], stride=1, scope='conv4_s')
hidden5_s = slim.layers.conv2d(
enc3_s,
lstm_size[4], [5, 5],
stride=1,
scope='state5_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm6_s'})
enc4_s = slim.layers.conv2d_transpose(
hidden5_s, hidden5_s.get_shape()[3], 4, stride=2, scope='convt1_s')
hidden6_s = slim.layers.conv2d(
enc4_s,
lstm_size[5], [5, 5],
stride=1,
scope='state6_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm7_s'})
# Skip connection.
hidden6_s = tf.concat(axis=3, values=[hidden6_s, enc1_s]) # both 16x16
enc5_s = slim.layers.conv2d_transpose(
hidden6_s, hidden6_s.get_shape()[3], 4, stride=2, scope='convt2_s')
hidden7_s = slim.layers.conv2d(
enc5_s,
lstm_size[6], [5, 5],
stride=1,
scope='state7_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm8_s'})
# Skip connection.
hidden7_s= tf.concat(axis=3, values=[hidden7_s, enc0_s]) # both 32x32
enc6_s = slim.layers.conv2d_transpose(
hidden7_s,
hidden7_s.get_shape()[3], 4, stride=2, scope='convt3_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm9_s'})
masks_s = slim.layers.conv2d_transpose(
enc6_s, 2, 1, stride=1, scope='convt7_s')
masks_probs_s = tf.nn.softmax(tf.reshape(masks_s, [-1, 2]))
#entropy_losses.append(tf.reduce_mean(-tf.reduce_sum(masks_probs * tf.log(masks_probs + 1e-10), [1])))
masks_s = tf.reshape(
masks_probs_s,
#gumbel_softmax(tf.reshape(masks, [-1, num_masks]), TEMP, hard=False),
[int(batch_size), int(img_height), int(img_width), 2])
poss_move_mask, bg_mask = tf.split(axis=3, num_or_size_splits=2, value=masks_s)
poss_move_masks.append(poss_move_mask)
#############################
#mask_list = mask_list[0:2] + [mask * poss_move_mask for mask in mask_list[2:]]
#mask_lists.append(mask_list)
output, shifted_mask = my_transformation2(prev_image, int(color_channels), poss_move_mask, bg_mask, stp_input1)
gen_images.append(output)
shifted_masks.append(shifted_mask)
current_state = slim.layers.fully_connected(
state_action,
int(current_state.get_shape()[1]),
scope='state_pred',
activation_fn=None)
gen_states.append(current_state)
return gen_images, gen_states, shifted_masks, mask_lists, entropy_losses, poss_move_masks
## Utility functions
def stp_transformation(prev_image, bg_mask, stp_input):
"""Apply spatial transformer predictor (STP) to previous image.
Args:
prev_image: previous image to be transformed.
stp_input: hidden layer to be used for computing STN parameters.
num_masks: number of masks and hence the number of STP transformations.
Returns:
List of images transformed by the predicted STP parameters.
"""
# Only import spatial transformer if needed.
from spatial_transformer import transformer
img_height, img_width = prev_image.get_shape()[1:3]
identity_params = tf.convert_to_tensor(
np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
params = slim.layers.fully_connected(
stp_input, 6, scope='stp_params',
activation_fn=None, weights_initializer=init_ops.zeros_initializer()) + identity_params
return transformer(prev_image, params, (int(img_height), int(img_width))), transformer(bg_mask, params, (int(img_height), int(img_width)))
# def do_global_shift(prev_image, cdna_input, color_channels):
# batch_size = int(cdna_input.get_shape()[0])
#
# # Predict kernels using linear function of last hidden layer.
# cdna_kerns = slim.layers.fully_connected(
# cdna_input,
# DNA_KERN_SIZE * DNA_KERN_SIZE,
# scope='global_shift_params',
# activation_fn=None)
#
# # Reshape and normalize.
# cdna_kerns = tf.reshape(
# cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, 1])
# cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT
# norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True)
# cdna_kerns /= norm_factor
# cdna_kerns_output = cdna_kerns
#
# cdna_kerns = tf.tile(cdna_kerns, [1, 1, 1, color_channels, 1])
# cdna_kerns = tf.split(axis=0, num_or_size_splits=batch_size, value=cdna_kerns)
# prev_images = tf.split(axis=0, num_or_size_splits=batch_size, value=prev_image)
#
# # Transform image.
# transformed = []
# for kernel, preimg in zip(cdna_kerns, prev_images):
# kernel = tf.squeeze(kernel)
# if len(kernel.get_shape()) == 3:
# kernel = tf.expand_dims(kernel, -1)
# transformed.append(
# tf.nn.depthwise_conv2d(preimg, kernel, [1, 1, 1, 1], 'SAME'))
# transformed = tf.concat(axis=0, values=transformed)
# return transformed, tf.squeeze(cdna_kerns_output)
def my_transformation2(prev_image, color_channels, fg_mask, bg_mask, stp_input):
# kernels = []
# for i in xrange(DNA_KERN_SIZE * DNA_KERN_SIZE):
# if i != DNA_KERN_SIZE * DNA_KERN_SIZE / 2:
# kernel = np.zeros((DNA_KERN_SIZE * DNA_KERN_SIZE), dtype=np.float32)
# kernel[i] = 1.0
# kernel = kernel.reshape((DNA_KERN_SIZE, DNA_KERN_SIZE, 1, 1))
# kernel = tf.constant(kernel, shape=(DNA_KERN_SIZE, DNA_KERN_SIZE, 1, 1),
# name='kernel'+str(i), verify_shape=True)
# kernels.append(kernel)
#
# assert len(kernels) == len(mask_list) - 2
# mask[0] indicates stay, mask[1] indicates disappear
# fg_image = prev_image * fg_mask
bg_image = prev_image * bg_mask
bg_image, bg_mask = stp_transformation(bg_image, bg_mask, stp_input)
# output = bg_image + fg_image * mask_list[0]
# shifted_mask = bg_mask + fg_mask*mask_list[0] + fg_mask*mask_list[1]
# for kernel, mask in zip(kernels, mask_list[2:]):
# tmp_mask = tf.nn.depthwise_conv2d(mask, kernel, [1, 1, 1, 1], 'SAME')
# output += tmp_mask * tf.nn.depthwise_conv2d(fg_image, tf.tile(kernel, [1, 1, color_channels, 1]),
# [1, 1, 1, 1], 'SAME')
# shifted_mask += fg_mask*tmp_mask
#
# output += guessed * tf.nn.relu(1.0 - shifted_mask)
# return output, shifted_mask
return bg_image, bg_mask
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
"""Sample batch with specified mix of ground truth and generated data points.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
num_ground_truth: number of ground-truth examples to include in batch.
Returns:
New batch with num_ground_truth sampled from ground_truth_x and the rest
from generated_x.
"""
idx = tf.random_shuffle(tf.range(int(batch_size)))
ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))
ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
generated_examps = tf.gather(generated_x, generated_idx)
return tf.dynamic_stitch([ground_truth_idx, generated_idx],
[ground_truth_examps, generated_examps])
|
[
"wangyang59@baidu.com"
] |
wangyang59@baidu.com
|
3424d90576cff18381aa8d2aa7290caf6fff993e
|
a19ab80e2aebe88da038d6900d0532a0ac030f58
|
/backend/chat/models.py
|
b91c1fa676e0d9c7ede651c840fe200c079b1759
|
[] |
no_license
|
crowdbotics-apps/test-project-18237
|
1975ce9949626582c017baebe1ebd56a8bd0d99a
|
27101ee2a0a2cdfb60bf4437191d2dfb08475dac
|
refs/heads/master
| 2022-11-21T21:11:14.642037
| 2020-07-20T00:30:15
| 2020-07-20T00:30:15
| 273,353,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,946
|
py
|
from django.conf import settings
from django.db import models
class ThreadAction(models.Model):
"Generated Model"
action = models.CharField(max_length=7,)
thread = models.ForeignKey(
"chat.Thread", on_delete=models.CASCADE, related_name="threadaction_thread",
)
profile = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="threadaction_profile",
)
timestamp_action = models.DateTimeField(auto_now_add=True,)
class Thread(models.Model):
"Generated Model"
name = models.CharField(max_length=255,)
thread_photo = models.URLField()
timestamp_created = models.DateTimeField(auto_now_add=True,)
class ThreadMember(models.Model):
"Generated Model"
profile = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="threadmember_profile",
)
thread = models.ForeignKey(
"chat.Thread", on_delete=models.CASCADE, related_name="threadmember_thread",
)
is_admin = models.BooleanField()
timestamp_joined = models.DateTimeField(auto_now_add=True,)
timestamp_left = models.DateTimeField()
last_rejoined = models.DateTimeField()
class MessageAction(models.Model):
"Generated Model"
action = models.CharField(max_length=7,)
message = models.ForeignKey(
"chat.Message", on_delete=models.CASCADE, related_name="messageaction_message",
)
profile = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="messageaction_profile",
)
timestamp_action = models.DateTimeField(auto_now_add=True,)
class ForwardedMessage(models.Model):
"Generated Model"
message = models.ForeignKey(
"chat.Message",
on_delete=models.CASCADE,
related_name="forwardedmessage_message",
)
forwarded_by = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="forwardedmessage_forwarded_by",
)
forwarded_to = models.ForeignKey(
"chat.Thread",
on_delete=models.CASCADE,
related_name="forwardedmessage_forwarded_to",
)
timestamp_forwarded = models.DateTimeField(auto_now_add=True,)
class Message(models.Model):
"Generated Model"
message = models.TextField()
thread = models.ForeignKey(
"chat.Thread", on_delete=models.CASCADE, related_name="message_thread",
)
sent_by = models.ForeignKey(
"chat.ThreadMember", on_delete=models.CASCADE, related_name="message_sent_by",
)
attachment = models.URLField()
is_draft = models.BooleanField()
is_delivered = models.BooleanField()
is_read = models.BooleanField()
timestamp_created = models.DateTimeField(auto_now_add=True,)
timestamp_delivered = models.DateTimeField()
timestamp_read = models.DateTimeField()
# Create your models here.
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
1a4fbbd353f863b050ded6155f139a22e17735b2
|
4384609f3b300375e0cfb82d2b04a6c675d6ac63
|
/analysisinHPC/v6.2_17.12.2018/CNN_test_mva_16.6_80.5_VGG16_truemva_except_456/pattern_0/ISS_anylsis/loadmodel_test.py
|
db7d2560181d54e7d0767676bc62ce9bf19a31ab
|
[] |
no_license
|
season0112/antiproton_to_proton_ratio
|
b5fc178886adffec0c9ed3ba21447e4f553b8eed
|
c35c1834b7113946bbc45ca25131a80604834790
|
refs/heads/master
| 2020-03-25T11:50:40.347777
| 2019-01-11T12:17:26
| 2019-01-11T12:17:26
| 143,750,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,775
|
py
|
#!/usr/bin/env python
########################################################################
# #
# Toy Model for Pronton Charge Confusion with Dense Layer #
# version 1.0 07.03.2018 #
# #
########################################################################
########################## packages ####################################
from __future__ import division
import numpy as np
import math
import json
import collections
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import LogNorm
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.layers import Dense, Conv2D, MaxPooling2D
from keras.models import Sequential, Model, model_from_json
from keras.optimizers import SGD, RMSprop, Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras import initializers
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
plt.switch_backend('agg')
binnings = np.array([45.1, 48.5, 52.2, 56.1, 60.3, 64.8, 69.7, 74.9, 80.5])
binnings = np.array([24.7, 26.7, 28.8, 31.1, 33.5, 36.1,38.9, 41.9])
for binleft in range(binnings.shape[0]-1):
########################## Free Parameters #############################
binnumber=150
EcalBDTCut = True
EcalBDTCutvalue_select_proton = 0.0 ## -1 denote proton, 1 denote electron
EcalBDTCutvalue_select_electron = 0.0 ## -1 denote proton, 1 denote electron
TrdProtonHeliumCut = False
TrdProtonHeliumCutValue = 0.3
########################## Properties Label ############################
MvAresult=16
TrdLogLikelihoodRatioProtonHeliumTracker=17
######################### Test events ##################################
testset1 = np.load('/hpcwork/jara0052/sichen/analysis_6.2/B1042_pr.pl1.flux.l1o9.2016000_7.6_all/16.6-147GeV/transferdata/positive/'+str(binnings[binleft])+'_'+str(binnings[binleft+1])+'GeV/pattern0/positive_'+str(binnings[binleft])+'_'+str(binnings[binleft+1])+'_pattern_0.npy')
testset2 = np.load('/hpcwork/jara0052/sichen/analysis_6.2/B1042_pr.pl1.flux.l1a9.2016000_7.6_all/16.6-147GeV/transferdata/negative/'+str(binnings[binleft])+'_'+str(binnings[binleft+1])+'GeV/pattern0/negative_'+str(binnings[binleft])+'_'+str(binnings[binleft+1])+'_pattern_0.npy')
testset3 = np.load('/hpcwork/jara0052/sichen/analysis_6.2/B1091_el.pl1.0_25200_7.6_all/16.6-147GeV/transferdata/negative/'+str(binnings[binleft])+'_'+str(binnings[binleft+1])+'GeV/pattern0/negative_'+str(binnings[binleft])+'_'+str(binnings[binleft+1])+'_pattern_0.npy')
if (EcalBDTCut):
testset1 = testset1[np.where((testset1[:,-5]>-2) & (testset1[:,-5] < EcalBDTCutvalue_select_proton))[0],:]
testset2 = testset2[np.where((testset2[:,-5]>-2) & (testset2[:,-5] < EcalBDTCutvalue_select_proton))[0],:]
testset3 = testset3[np.where((testset3[:,-5]>-2) & (testset3[:,-5] > EcalBDTCutvalue_select_electron))[0],:]
if (TrdProtonHeliumCut):
testset1 = testset1[np.where(testset1[:,TrdLogLikelihoodRatioProtonHeliumTracker] < TrdProtonHeliumCutValue)[0],:]
testset2 = testset2[np.where(testset2[:,TrdLogLikelihoodRatioProtonHeliumTracker] < TrdProtonHeliumCutValue)[0],:]
testset3 = testset3[np.where(testset3[:,TrdLogLikelihoodRatioProtonHeliumTracker] < TrdProtonHeliumCutValue)[0],:]
##################################################################################
testset = np.r_[testset1,testset2,testset3]
testsetMvA = testset[:,MvAresult] ## get the CCProton MvA results
testset = np.expand_dims(testset, axis=2)
inputlist = np.arange(0,16)
testset = testset[:,inputlist,:]
testset[:,4] = np.fabs(testset[:,4])
testset[:,5] = np.fabs(testset[:,5])
testset[:,6] = np.fabs(testset[:,6])
features = 16
############## model prediction #################
import CNN_models
model = CNN_models.VGG16(features)
model.load_weights('VGG16.h5')
y_pred = model.predict(testset)
######################## save template data ################################
fplot=file('data/plot_positive_rigidity'+str(binnings[binleft])+'_'+str(binnings[binleft+1])+'.npy','wb')
np.save(fplot, np.transpose([ y_pred[0:testset1.shape[0],1], testset1[:,0]]) )
fplot.close()
fplot=file('data/plot_negative_rigidity'+str(binnings[binleft])+'_'+str(binnings[binleft+1])+'.npy','wb')
np.save(fplot, np.transpose([ y_pred[testset1.shape[0]:(testset1.shape[0]+testset2.shape[0]),1], testset2[:,0]]) )
fplot.close()
fplot=file('data/electron_negative'+str(binnings[binleft])+'_'+str(binnings[binleft+1])+'.npy','wb')
np.save(fplot, np.transpose([ y_pred[(testset1.shape[0]+testset2.shape[0]):,1], testset3[:,0]]) )
fplot.close()
fplot=file('data/plot_positive_rigidity_CCMVA'+str(binnings[binleft])+'_'+str(binnings[binleft+1])+'.npy','wb')
np.save(fplot, np.transpose([ testsetMvA[0:testset1.shape[0]], testset1[:,0] ]))
fplot.close()
fplot=file('data/plot_negative_rigidity_CCMVA'+str(binnings[binleft])+'_'+str(binnings[binleft+1])+'.npy','wb')
np.save(fplot, np.transpose([ testsetMvA[testset1.shape[0]:(testset1.shape[0]+testset2.shape[0])], testset2[:,0]]))
fplot.close()
fplot=file('data/electron_negative_CCMVA'+str(binnings[binleft])+'_'+str(binnings[binleft+1])+'.npy','wb')
np.save(fplot, np.transpose([ testsetMvA[(testset1.shape[0]+testset2.shape[0]):], testset3[:,0]]))
fplot.close()
'''
########################## NN Prediction ##################################
y_pred = model.predict(testset)
plt.figure(figsize=(18,18))
plt.hist(y_pred[0:testset1.shape[0],1],bins=binnumber,range=(0,1),log=True,alpha=0.5,label='ChargeCorrect',facecolor='blue',edgecolor='black' )
plt.hist(y_pred[testset1.shape[0]:y_pred.shape[0],1],bins=binnumber,range=(0,1),log=True,alpha=0.5,label='ChargeConfused',facecolor='green',edgecolor='black' )
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel('Estimator $_{CC}$',fontsize=22)
plt.ylabel('Count',fontsize=22)
plt.legend(loc='upper center',fontsize=30)
plt.savefig('plot/ML_test.png')
######################## MvA Prediction ##############################
plt.figure(figsize=(18,18))
plt.hist(testsetMvA[0:testset1.shape[0]],binnumber,range=(0,1),log=True, alpha=0.5,label='ChargeCorrect',facecolor='blue',edgecolor='black' )
plt.hist(testsetMvA[testset1.shape[0]:y_pred.shape[0]],binnumber,range=(0,1),log=True, alpha=0.5,label='ChargeConfused',facecolor='green',edgecolor='black' )
plt.xlabel('Estimator $_{CC}$',fontsize=30)
plt.ylabel('Count',fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.legend(loc='upper center',fontsize=30)
plt.savefig('plot/CCMVA_test.png')
'''
|
[
"li8@j3l04.juropa3"
] |
li8@j3l04.juropa3
|
ee3e78f5244773102201d16a3406150e4742e9ef
|
4e2a1197b47d0c861be98e1c574062c8b9988253
|
/gendiff/constants.py
|
2fc2aa753152c66da526fe12aa4b668431a3c75a
|
[] |
no_license
|
gabady13/python-project-lvl2-1
|
a72a0e45c5e6dc97d4a23e4c034994fab63b12bd
|
77a62ecb0eea9a62d476a1bc93307dcc43d8566a
|
refs/heads/master
| 2023-08-11T02:20:20.943323
| 2021-10-10T19:59:34
| 2021-10-10T20:17:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
"""Common constants."""
INDENT = ' '
ADDED = '+'
REMOVED = '-'
NESTED = 'nested'
CHANGED = 'changed'
UNCHANGED = ' '
SIMPLE = 'simple'
COMPLEX = 'complex value'
|
[
"me@sergeykalistratov.com"
] |
me@sergeykalistratov.com
|
6769adc1d325b19d2cf6a7305fc677a6b55f1df4
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/amplify_read_3/job_get.py
|
7bf63412c69332288cc54d8318983d71ee50cc82
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 992
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import execute_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/amplify/get-job.html
if __name__ == '__main__':
"""
delete-job : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/amplify/delete-job.html
list-jobs : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/amplify/list-jobs.html
start-job : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/amplify/start-job.html
stop-job : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/amplify/stop-job.html
"""
parameter_display_string = """
# app-id : The unique ID for an Amplify app.
# branch-name : The branch name for the job.
"""
execute_two_parameter("amplify", "get-job", "app-id", "branch-name", parameter_display_string)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
15a0108c1ecf3548cee46e39c0c89ad95d9d0228
|
5f3f2377b7bd1865177c0c681df30bd06fa8e71d
|
/likes/serializers.py
|
1ce1590f8cd0daa3ca8ccf3bfce1e62c05827fc8
|
[] |
no_license
|
mr-fibonacci/drf-sts-codealong
|
d1ffaaf8c26d92166d5877e6a90d25649a6f31fd
|
59bbd944d0695fbee18cd10671b75870931f9dd3
|
refs/heads/master
| 2023-05-10T11:04:33.023936
| 2021-06-09T11:26:30
| 2021-06-09T11:26:30
| 368,148,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
from django.db import IntegrityError
from rest_framework import serializers
from likes.models import Like
class LikeSerializer(serializers.ModelSerializer):
"""
Serializer for the Like model
The create method handles the unique constraint on 'owner' and 'post'
"""
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Like
fields = ['id', 'created_at', 'owner', 'post']
def create(self, validated_data):
try:
return super().create(validated_data)
except IntegrityError:
raise serializers.ValidationError({'detail': 'possible duplicate'})
|
[
"adamlapinski1@gmail.com"
] |
adamlapinski1@gmail.com
|
ae226e84b9e094aaaf017cb12423793840a31af4
|
c12d468c6bef229a58033a6a40ea3925a52adae6
|
/python/pns.py
|
46552cfbb60f07e6db7e251fedeff5efbe67b650
|
[] |
no_license
|
aajarven/TM-harjoitustyo
|
4b65558ee5216f7919d1a9436635148be26cc2e4
|
09a2cf73f7420c8041c5b4d77feef3f3dd8487ac
|
refs/heads/master
| 2021-01-10T14:46:07.753756
| 2015-10-25T18:40:05
| 2015-10-25T18:40:05
| 44,267,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import sys
from utils import *
if len(sys.argv) !=5 :
print "Anna komentoriviargumentteina sovitettavan käyrän aste sekä tiedostot, joissa käytettävät matriisit X, Y ja W ovat"
sys.exit(1)
aste = int(sys.argv[1])
xHavainto = lueMatriisi(sys.argv[2])
Y = lueMatriisi(sys.argv[3])
W = lueMatriisi(sys.argv[4])
if len(np.transpose(xHavainto)) == 1 : # jos annetussa X-matriisissa on vain yksi sarake, oletetaan n. asteen käyrä y=f(x) ja luodaan X-matriisi sen mukaan
X = np.ones((len(xHavainto), aste+1))
for i in range(0,len(xHavainto)):
for j in range(1, aste+1):
X[i][j] = xHavainto[i]**j
else : # muuten käytetään annettua sellaisenaan
X = xHavainto
if len(X) != len(Y) or len(Y) != len(W):
print "Annettujen matriisien dimensiot eivät täsmää"
sys.exit(1)
beta = np.dot( np.dot( np.dot( np.linalg.inv( np.dot( np.dot( np.transpose(X), W), X)), np.transpose(X)), W), Y)
print "kerroinmatriisi beta:"
print beta
|
[
"anni.jarvenpaa@gmail.com"
] |
anni.jarvenpaa@gmail.com
|
b9154c9c4115909fbb3e5991ec61067d7f7aae77
|
2a32ba95aa3b5da7b7376f7a7a4df5bc932c6b90
|
/Stack and Queue/BSTSortedOrder.py
|
dd0a8e0aba651a94dd5281f6889844f37468abc3
|
[] |
no_license
|
maruichen2004/EPI
|
33cb4f1860ca294c9aba460ac7f22e25c2c9b210
|
2379e83536bdbeaa7f21ceeb8f1e369a90f434a0
|
refs/heads/master
| 2016-09-06T03:32:04.412640
| 2014-11-23T05:29:59
| 2014-11-23T05:29:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
from Util.BST import TreeNode
from Util.BST import BST
class Solution:
# Time: O(n)
# Space: O(h)
def bstSortedOrder(self, node):
stack, cur, res = [], node, []
if node is None: return []
while cur or stack:
if cur:
stack.append(cur)
cur = cur.left
else:
parent = stack.pop()
res.append(parent.val)
cur = parent.right
return res
if __name__ == "__main__":
bst = BST()
t = Solution()
print t.bstSortedOrder(bst.getRoot())
|
[
"ruichenma@RUICHENs-MacBook-Pro.local"
] |
ruichenma@RUICHENs-MacBook-Pro.local
|
ffc0fbbe473bc936a32f0d705a41d7ef23a20216
|
8ac5fa643e54f6aac8af600eddbeb4c074095b2b
|
/tspdb/src/algorithms/pymf/bnmf.py
|
c70343667fa65184f5fee1099128dc41cf369245
|
[
"Apache-2.0"
] |
permissive
|
AbdullahO/tspdb
|
00acca3b29a054f02ac07a0243c04ba575af0a19
|
6ba75833a128d4036caad488d144fb6b0ba682e6
|
refs/heads/master
| 2023-05-25T09:44:19.856796
| 2023-05-18T17:25:48
| 2023-05-18T17:25:48
| 210,507,080
| 179
| 60
|
Apache-2.0
| 2023-02-10T23:10:00
| 2019-09-24T03:51:25
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,665
|
py
|
# Authors: Christian Thurau
# License: BSD 3 Clause
"""
PyMF Binary Matrix Factorization [1]
BNMF(NMF) : Class for binary matrix factorization
[1]Z. Zhang, T. Li, C. H. Q. Ding, X. Zhang: Binary Matrix Factorization with
Applications. ICDM 2007
"""
import numpy as np
from base import PyMFBase
__all__ = ["BNMF"]
class BNMF(PyMFBase):
"""
BNMF(data, data, num_bases=4)
Binary Matrix Factorization. Factorize a data matrix into two matrices s.t.
F = | data - W*H | is minimal. H and W are restricted to binary values.
Parameters
----------
data : array_like, shape (_data_dimension, _num_samples)
the input data
num_bases: int, optional
Number of bases to compute (column rank of W and row rank of H).
4 (default)
Attributes
----------
W : "data_dimension x num_bases" matrix of basis vectors
H : "num bases x num_samples" matrix of coefficients
ferr : frobenius norm (after calling .factorize())
Example
-------
Applying BNMF to some rather stupid data set:
>>> import numpy as np
>>> from bnmf import BNMF
>>> data = np.array([[1.0, 0.0, 1.0], [0.0, 1.0, 1.0]])
Use 2 basis vectors -> W shape(data_dimension, 2).
>>> bnmf_mdl = BNMF(data, num_bases=2)
Set number of iterations to 5 and start computing the factorization.
>>> bnmf_mdl.factorize(niter=5)
The basis vectors are now stored in bnmf_mdl.W, the coefficients in bnmf_mdl.H.
To compute coefficients for an existing set of basis vectors simply copy W
to bnmf_mdl.W, and set compute_w to False:
>>> data = np.array([[0.0], [1.0]])
>>> W = np.array([[1.0, 0.0], [0.0, 1.0]])
>>> bnmf_mdl = BNMF(data, num_bases=2)
>>> bnmf_mdl.W = W
>>> bnmf_mdl.factorize(niter=10, compute_w=False)
The result is a set of coefficients bnmf_mdl.H, s.t. data = W * bnmf_mdl.H.
"""
# controls how fast lambda should increase:
# this influence convergence to binary values during the update. A value
# <1 will result in non-binary decompositions as the update rule effectively
# is a conventional nmf update rule. Values >1 give more weight to making the
# factorization binary with increasing iterations.
# setting either W or H to 0 results make the resulting matrix non binary.
_LAMB_INCREASE_W = 1.1
_LAMB_INCREASE_H = 1.1
def _update_h(self):
"""
"""
H1 = np.dot(self.W.T, self.data[:,:]) + 3.0*self._lamb_H*(self.H**2)
H2 = np.dot(np.dot(self.W.T,self.W), self.H) + 2*self._lamb_H*(self.H**3) + self._lamb_H*self.H + 10**-9
self.H *= H1/H2
self._lamb_W = self._LAMB_INCREASE_W * self._lamb_W
self._lamb_H = self._LAMB_INCREASE_H * self._lamb_H
def _update_w(self):
W1 = np.dot(self.data[:,:], self.H.T) + 3.0*self._lamb_W*(self.W**2)
W2 = np.dot(self.W, np.dot(self.H, self.H.T)) + 2.0*self._lamb_W*(self.W**3) + self._lamb_W*self.W + 10**-9
self.W *= W1/W2
def factorize(self,
niter=10,
compute_w=True,
compute_h=True,
show_progress=False,
compute_err=True):
""" Factorize s.t. WH = data
Parameters
----------
niter : int
number of iterations.
show_progress : bool
print some extra information to stdout.
compute_h : bool
iteratively update values for H.
compute_w : bool
iteratively update values for W.
compute_err : bool
compute Frobenius norm |data-WH| after each update and store
it to .ferr[k].
Updated Values
--------------
.W : updated values for W.
.H : updated values for H.
.ferr : Frobenius norm |data-WH| for each iteration.
"""
# init some learning parameters
self._lamb_W = 1.0/niter
self._lamb_H = 1.0/niter
PyMFBase.factorize(self, niter=niter, compute_w=compute_w,
compute_h=compute_h, show_progress=show_progress,
compute_err=compute_err)
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
[
"aalomar@mit.edu"
] |
aalomar@mit.edu
|
c219ee966f58b6da567f9e09dd8d46c6f2c96b6a
|
04e8267f3f7137c4f41fc9c8c72e1a9ab27ee2ff
|
/NLP_Assignment-3/dxp170020_memm.py
|
502e9583ca6dfd0d6de582f684a22075d2345e67
|
[] |
no_license
|
ashutoshsenapati/NLP-Projects
|
a6dab578b9dfcfe807bba52e8464ad1f16c2eb9a
|
76941d6ca297c290c3534feb37138e461bede2bc
|
refs/heads/master
| 2022-03-06T02:47:43.528851
| 2019-10-28T19:48:33
| 2019-10-28T19:48:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,929
|
py
|
import nltk
nltk.download('brown')
nltk.download('universal_tagset')
import numpy as np
from nltk.corpus import brown
import collections
import re
import sys
from scipy.sparse import csr_matrix
from sklearn.linear_model import LogisticRegression
import csv
import itertools
from itertools import groupby
############ creating the set rare words that occur less than 5 times ############################
rare_words = set()
### tag dictionary global variable ###
tag_dict = {}
## feature dictionary global variable ###
feature_dict = {}
def word_ngram_features(i, words):
''' The argument i (int) is the index of the current word to generate features for, and words is a list containing all the words
in the sentence (ie. words[i] is the current word wi). The function should return a list containing the following features:
• ‘prevbigram-[x]’, where [x] is the previous word wi−1
• ‘nextbigram-[x]’, where [x] is the next word wi+1
• ‘prevskip-[x]’, where [x] is the next previous word wi−2
• ‘nextskip-[x]’, where [x] is the next next word wi+2
• ‘prevtrigram-[x]-[y]’, where [x] is wi−1 and [y] is wi−2
• ‘nexttrigram-[x]-[y]’, where [x] is wi+1 and [y] is wi+2
• ‘centertrigram-[x]-[y]’, where [x] is wi−1 and [y] is wi+1
You will need to check for corner cases where i ± 1 and/or i ± 2 are n '''
list_ngram_features = list()
list_len = len(words)
#### checking for prevbigram #######
prevbigram = ""
if i == 0:
prevbigram = "<s>"
else :
prevbigram = words[i-1]
list_ngram_features.append("prevbigram" + "-" + prevbigram)
### checking the next bigram ########
nextbigram = ""
if i+1 == list_len:
nextbigram = "</s>"
else:
nextbigram = words[i+1]
list_ngram_features.append("nextbigram" + "-" + nextbigram)
### checking for the prev skip ######
prevskip = ""
if i < 2:
prevskip = "<s>"
else:
prevskip = words[i-2]
list_ngram_features.append("prevskip" + "-" + prevskip)
### checking fro the next skip ######
nextskip = ""
if i+2 >= len(words):
nextskip = "</s>"
else:
nextskip = words[i+2]
list_ngram_features.append("nextskip" + "-" + nextskip)
### checking for prevtrigram #######
prevtrigram1 = ""
prevtrigram2 = ""
if i == 0:
prevtrigram1 = "<s>"
prevtrigram2 = "<s>"
elif i == 1:
prevtrigram1 = words[i-1]
prevtrigram2 = "<s>"
else:
prevtrigram1 = words[i-1]
prevtrigram2 = words[i-2]
list_ngram_features.append("prevtrigram" + "-" + prevtrigram1 + "-" + prevtrigram2)
### checking for nexttrigram ######
nexttrigram1 = ""
nexttrigram2 = ""
if (i + 1) >= len(words):
nexttrigram1 = "</s>"
nexttrigram2 = "</s>"
elif (i+1) >= len(words) - 1:
nexttrigram1 = words[i+1]
nexttrigram2 = "</s>"
else:
nexttrigram1 = words[i+1]
nexttrigram2 = words[i+2]
list_ngram_features.append("nexttrigram" + "-" + nexttrigram1 + "-" + nexttrigram2)
### checking for center trigram ###
centertrigram1 = ""
centertrigram2 = ""
if len(words) == 1:
centertrigram1 = "<s>"
centertrigram2 = "</s>"
elif i == 0:
centertrigram1 = "<s>"
centertrigram2 = words[i+1]
elif (i+1) >= len(words):
centertrigram1 = words[i-1]
centertrigram2 = "</s>"
else:
centertrigram1 = words[i-1]
centertrigram2 = words[i+1]
list_ngram_features.append("centertrigram" + "-" + centertrigram1 + "-" + centertrigram2)
return list_ngram_features
def word_features(word, rare_words):
''' The argument word is the current word wi , and rare words is the set of rare words we made in the previous section.
The function should return a list containing the following features:
• ‘word-[word]’, only if word is not in rare words
• ‘capital’, only if word is capitalized
• ‘number’, only if word contains a digit
• ‘hyphen’, only if word contains a hyphen
• ‘prefix[j]-[x]’, where [j] ranges from 1 to 4 and [x] is the substring containing
the first [j] letters of word
‘suffix[j]-[x]’, where [j] ranges from 1 to 4 and [x] is the substring containing
the last [j] letters of word '''
word_features_list = list()
### check if word not in rare word ###
if word not in rare_words:
word_features_list.append(word + "-" + word)
### check if word is capitalized ###
if word[0].isupper():
word_features_list.append("capital")
### check if word contains digit ####
if re.search("\d", word):
word_features_list.append("number")
### check if word contains hyphen ###
if '-' in word:
word_features_list.append("hyphen")
### prefix[j]-[x] where j ranges from 1 to 4 and [x] is the substring containing the first [j] letters of word ###
j = 1
while(j <= 4) and j <= len(word):
word_features_list.append("prefix" + str(j) + "-" + word[0:j])
j = j + 1
### suffix[j] - [x] where j ranges from 1 to 4 and [x] is the substring containing the last [j] letters of word ###
j = 1
while(j <= 4 and j <= len(word)):
word_features_list.append("suffix" + str(j) + "-" + word[-1*j:])
j = j + 1
return word_features_list
def get_features(i, words, prevtag, rare_words):
''' training features[0][0] should be the list of features for the first word in the first
sentence of the corpus. You will need to check for corner cases where i = 0 and there is
no prevtag; in these cases, use the meta-tag ‘<S>’ .'''
list_word_ngram_features = word_ngram_features(i,words)
list_word_features = word_features(words[i],rare_words)
features = list_word_ngram_features + list_word_features
### adding the tagbigram-[prevtag] to the final feature list ######
features.append("tagbigram" + "-" + prevtag)
########## extra credit part for the HW3. Question 6 ##############
features.append("word-" + words[i] + "-prevtag-" + prevtag)
#### converting the features to lower case #########
lower_features = list(map(lambda x:x.lower(),features))
### the below features do not need to be converted to lowercase ###########
#### check all caps ######
if words[i].isupper():
lower_features.append("allcaps")
####### wordshape feature #########
abstract_version = ""
for character in words[i]:
if character.islower():
abstract_version = abstract_version + "x"
elif character.isupper():
abstract_version = abstract_version + "X"
elif character.isdigit():
abstract_version = abstract_version + "d"
if len(abstract_version) > 0:
lower_features.append("wordshape" + "-" + abstract_version)
### short wordshape feature #######
short_word_shape = ''.join(ch for ch, _ in itertools.groupby(abstract_version))
if len(short_word_shape) > 0:
lower_features.append("short-wordshape-" + short_word_shape)
#### all caps , atleast 1 hyphen, atleast 1 digit #####
if words[i].isupper() and '-' in words[i] and any(ch.isdigit() for ch in words[i]):
lower_features.append("allcaps-digit-hyphen")
######## capital-followed by co feature #########
if words[i][0].isupper():
for indice in range(i+1,i+3):
if indice < len(words):
if words[indice] == "Co." or words[indice] == "Inc.":
lower_features.append("capital-followedby-co")
break
else:
break
return lower_features
def remove_rare_features(features, n):
''' The argument features is a list of lists of feature lists (ie. training features), and n is the number of times a
feature must occur in order for us to keep it in the feature set '''
### creating a vocabulary that will store the feature and its count ####
feature_vocabulary = {}
for sentence_feature in features:
for word_features in sentence_feature:
for feature in word_features:
feature_vocabulary[feature] = feature_vocabulary.get(feature,0) + 1
###### creating two sets for rare and non rare feature vocab #############
rare_features = set()
non_rare_features = set()
for feature,feature_count in feature_vocabulary.items():
if feature_count < n:
rare_features.add(feature)
else:
non_rare_features.add(feature)
########## removing the rare features from the training features set ############
updated_training_features = list()
for sentence_feature in features:
word_features_list = list()
for word_features in sentence_feature:
word_list = list()
for word_feature in word_features:
if word_feature not in rare_features:
word_list.append(word_feature)
word_features_list.append(word_list)
updated_training_features.append(word_features_list)
return (updated_training_features,non_rare_features)
def build_Y(tags):
''' build train_Y for training data and return numpy array '''
Y = list()
for sentence_tag in tags:
for word_tag in sentence_tag:
Y.append(tag_dict[word_tag])
return np.array(Y)
def build_X(features):
''' construct a sparse matrix using three lists, row, col, and value, to indicate which positions are not 0:
X[row[i][col[i]] = value[i], and all other positions in X are assumed to be 0. Of course, all non-0 positions
have value 1 for us, so value would just be a list of 1s '''
examples = []
feature_index = []
i=0
#### some theory here #####
#### here , feature refers to the individual features for each word. So , for every word , we have a feature list
#### we append the number of times index i to the examples list for each word feature in word feature list. since we
#### want to keep track of count of features in each word. so if a word has features like [A,B,C] , we will append, index i
### to the example list , 3 times.
for sentence_features in features:
for word_features in sentence_features:
for feature in word_features:
if feature in feature_dict:
examples.append(i)
feature_index.append(feature_dict[feature])
i += 1
values = [1] * len(examples)
examples = np.asarray(examples)
feature_index = np.asarray(feature_index)
values = np.asarray(values)
sparse_matrix = csr_matrix((values, (examples, feature_index)), shape = (i, len(feature_dict)), dtype = np.int8)
return sparse_matrix
def viterbi(Y_start, Y_pred):
N = np.shape(Y_pred)[0] + 1
T = len(tag_dict)
V = np.empty((N, T))
BP = np.empty((N, T))
for j in range(T):
V[0][j] = Y_start[0][j]
BP[0][j] = -1
for i, row in enumerate(Y_pred):
for k in range(T):
sum = V[i, :] + Y_pred[i, :, k]
V[i + 1, k] = max(sum)
BP[i + 1, k] = int(np.argmax(sum))
backward_indices = []
index = np.argmax(V[N-1])
backward_indices.append(index)
for n in range(N - 1, 0, -1):
index = BP[n, int(index)]
backward_indices.append(index)
for key,value in tag_dict.items():
i = 0
for bindex in backward_indices:
if bindex == value:
backward_indices[i] = key
i = i + 1
return list(reversed(backward_indices))
def load_test(filename):
''' The function should return a list of lists, where each sublist is a sentence (a line in the test file), and each item
in the sentence is a word. '''
final_list = list()
with open(filename, encoding='utf-8', errors='ignore') as file:
file_reader = csv.reader(file, delimiter='\t')
for row in file_reader:
updated_list = list()
for word in row[0].split():
updated_list.append(word.strip())
final_list.append(updated_list)
return final_list
def get_predictions(test_sentence, model):
''' The argument test sentence is a list containing a single list of words (we continue to use a nested list
because the functions we wrote for generating features for the training data expect a list
of list(s)), and model is a trained LogisticRegression model'''
n = len(test_sentence)
T = len(tag_dict)
Y_pred = np.empty((n-1,T,T))
Y_start = np.empty((1, T))
index = 0
for word in test_sentence:
if index == 0:
X = build_X([[get_features(index, test_sentence, "<S>", rare_words)]])
Y_start = model.predict_log_proba(X)
else:
for prev_tag in tag_dict.keys():
j = tag_dict[prev_tag]
X = build_X([[get_features(index, test_sentence, prev_tag, rare_words)]])
Y_pred[index-1][j] = model.predict_log_proba(X)
index += 1
return (Y_pred, Y_start)
def main():
brown_sentences = brown.tagged_sents(tagset='universal')
train_sentences = list()
train_tags = list()
count = 0
vocabulary_count = {}
for sentence in brown_sentences:
sentence_list = list()
label_list = list()
for tags in sentence:
tags = list(tags)
### adding new key to vocabulary counts dictionary, if the key exists add the count to it ######
vocabulary_count[tags[0]] = vocabulary_count.get(tags[0],0) + 1
sentence_list.append(tags[0])
label_list.append(tags[1])
train_sentences.append(sentence_list)
train_tags.append(label_list)
for word,count in vocabulary_count.items():
if count < 5:
rare_words.add(word)
######## training part 3 ####################
training_features = list()
for train_sentence in train_sentences:
indx = train_sentences.index(train_sentence)
word_feature_list = list()
i = 0
for word in train_sentence:
if i == 0:
prev_tag = '<S>'
else:
prev_tag = train_tags[indx][i-1]
word_feature = get_features(i,train_sentence,prev_tag,rare_words)
word_feature_list.append(word_feature)
i = i + 1
training_features.append(word_feature_list)
### calling the remove rare features #####
n = 5
remove_rare_output = remove_rare_features(training_features,n)
training_features_updated = remove_rare_output[0]
non_rare_set = remove_rare_output[1]
################ printing the feature dictionary for the non rare words ##############
index = 0
for feature_word in non_rare_set:
feature_dict[feature_word] = index
index = index + 1
tag_vocabulary = set(x for l in train_tags for x in l)
#### creating tag dictionary where the keys are the 17 tags and the values are the indices assigned to the tag ###
tag_idx = 0
for tag in tag_vocabulary:
tag_dict[tag] = tag_idx
tag_idx = tag_idx + 1
### calling the build_Y function using the tag_dict #####
Y_train = build_Y(train_tags)
##### calling build_X features using the training features #####
X_train = build_X(training_features_updated)
model = LogisticRegression(class_weight='balanced',solver='saga',multi_class='multinomial').fit(X_train, Y_train)
########## starting part 4 ################
### calling the load_test function to read the test file #########
test_data = load_test('test.txt')
#### iterating through the test sentences and calling get_predictions for each test_sentence ###########
## iterate through each test sentence, using get predictions() and viterbi() to decode the highest-probability sequence of tags
for test_sentence in test_data:
y_output = get_predictions(test_sentence,model)
y_start = y_output[1]
y_pred = y_output[0]
predicted_tag_sequence = viterbi(y_start, y_pred)
print(predicted_tag_sequence)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
ashutoshsenapati.noreply@github.com
|
381258816ff5fe8a9eeb8adbf2c8f7912bb17f52
|
103b8a33ba4d39443ae43e25132797ee2b2a7658
|
/sign/views.py
|
ab3d8098105296bc4539af9deeee67084f13eed0
|
[] |
no_license
|
qj1106/qj_test
|
2c624861c1c04b9d11e213c22481fa56a5aac3f6
|
a6ac2660345989ffb43141fbd5261274f8ea36a9
|
refs/heads/master
| 2021-01-23T22:31:25.834968
| 2017-09-09T09:29:24
| 2017-09-09T09:29:24
| 102,941,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,533
|
py
|
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.contrib import auth
from django.views.decorators.csrf import csrf_exempt
from sign.models import Event, Guest
# Create your views here.
def index(request):
# return HttpResponse("Hello Django!")
return render (request, "index.html")
def qj(request):
return render (request, "qj.html")
def login_action(request):
# if request.method == 'get':
# username = request.get.get ('username', '')
# password = request.get.get ('password', '')
# if username == 'admin' and password == '123':
# # return HttpResponse ('login success')
# return render (request, 'index.html', {'error': 'login success'})
# else:
# return render (request, 'index.html', {'error': '用户名或密码 error!'})
if request.method == 'GET':
username = request.GET.get ('username', '')
password = request.GET.get ('password', '')
user = auth.authenticate (username=username, password=password)
if user is not None:
auth.login (request, user) # 登陆
# if username == 'admin' and password == '123':
# return HttpResponse ('login success')
# return HttpResponseRedirect ('/event_manage/')
response = HttpResponseRedirect ('/event_manage/')
# response.set_cookie ('user', username, 3600) #添加浏览器cookie
request.session['user'] = username # 将 session 信息记录到浏览器
return response
else:
return render (request, 'index.html', {'error': 'username or password error!'})
else:
return HttpResponse ('login error')
# 登陆成功页管理
# @login_required # 限制页面访问
def event_manage(request):
# 用于查询所有发布会对象(数据)
event_list = Event.objects.all ()
# username = request.COOKIES.get ('user', '') # 读取cookie
username = request.session.get ('user', '') # 读取session
return render (request, 'event_manage.html', {'user': username, 'events': event_list})
# return render (request, 'event_manage.html')
# 发布会名称搜索
# @login_required
def search_name(request):
username = request.session.get ('user', '')
search_name = request.GET.get ('name', '')
# 若搜索条件为空的话,返回所有数据
if search_name == '':
event_lists = Event.objects.all()
return render (request, 'event_manage.html',{'events':event_lists})
else:
# 模糊匹配
event_list = Event.objects.filter (name__contains=search_name)
return render (request, 'event_manage.html', {'user': username, 'events': event_list})
# 嘉宾管理
# @login_required
def guest_manage(request):
username = request.session.get ('user', '')
guest_list = Guest.objects.all ()
# 分页,每页3条,把查询出来的所有嘉宾列表 guest_list 放到 Paginator 类中
paginator = Paginator (guest_list, 5)
# 通过 GET 请求得到当前要显示第几页的数据
page = request.GET.get ('page')
# 获取第 page 页的数据。如果当前没有页数,抛 PageNotAnInteger 异常,返回第一页的数据。如果超出最大页数的范围,抛EmptyPage异常,返回最后一页面的数据
try:
contacts = paginator.page (page)
except PageNotAnInteger:
contacts = paginator.page (1)
except EmptyPage:
contacts = paginator.page (paginator.num_pages)
return render (request, 'guest_manage.html', {'user': username, 'guests': contacts})
# 嘉宾页面搜索
# @login_required
def guest_search(request):
username = request.session.get ('user', '')
guest_search = request.GET.get ('phone', '')
# if guest_search is None:
# return render (request, 'guest_manage.html', {'hint': '请输入手机号'})
guest_list = Guest.objects.filter (phone=guest_search)
paginator = Paginator (guest_list, 2)
page = request.GET.get ('page')
try:
contacts = paginator.page (page)
except PageNotAnInteger:
contacts = paginator.page (1)
except EmptyPage:
contacts = paginator.page (paginator.num_pages)
# contacts = paginator.page (page)
return render (request, 'guest_manage.html', {'user': username, 'guests': contacts})
# 签到页面
# @login_required
def sign_index(request, event_id):
event = get_object_or_404 (Event, id=event_id)
num = Guest.objects.filter (event_id=event_id).count ()
sign_num = Guest.objects.filter (event_id=event_id, sign=1).count ()
return render (request, 'sign_index.html', {'event': event, 'num': num, 'sign_num': sign_num})
# 签到动作
@csrf_exempt
# @login_required
def sign_index_action(request, event_id):
event = get_object_or_404 (Event, id=event_id)
phone = request.POST.get ('phone', '')
# 初始查询
num = Guest.objects.filter (event_id=event_id).count ()
sign_num = Guest.objects.filter (event_id=event_id, sign=1).count ()
# 查询手机号是否存在
result = Guest.objects.filter (phone=phone)
if not result:
return render (request, 'sign_index.html', {'event': event, 'hint': '手机号有误!请重新输入', 'num': num, 'sign_num': sign_num})
# 查询该event和phone是否匹配
result = Guest.objects.filter (phone=phone, event_id=event_id)
if not result:
return render (request, 'sign_index.html', {'event': event, 'hint': '该用户未产于此次发布会!请核实', 'num': num, 'sign_num': sign_num})
# 查询签到状态
result = Guest.objects.get (phone=phone, event_id=event_id)
if result.sign:
return render (request, 'sign_index.html', {'event': event, 'hint': "用户已签到!", 'num': num, 'sign_num': sign_num})
else:
Guest.objects.filter (phone=phone, event_id=event_id).update (sign='1')
# 更新后查询
num = Guest.objects.filter (event_id=event_id).count ()
sign_num = Guest.objects.filter (event_id=event_id, sign=1).count ()
return render (request, 'sign_index.html', {'event': event, 'hint': '签到成功!', 'guest': result, 'num': num, 'sign_num': sign_num})
# 退出
@login_required
def logout(request):
auth.logout (request) # 退出登陆
response = HttpResponseRedirect ('/index/')
return response
|
[
"familyqj@163.com"
] |
familyqj@163.com
|
f54e21805de615c80002759056ee8b84f8513514
|
a8e9b9915ee14caf88097b77a9f4b36e9ea34995
|
/continuousMedian.py
|
bbce229b723c567c4a5e460fd90ca3c182fb3db8
|
[] |
no_license
|
pratyushgarg15/CodingPractice
|
b69f37c94dfd5033ea33c80bc1943a0c90717258
|
cd70ec50c52bbefdfed972fd230ff6a563bc4704
|
refs/heads/master
| 2020-09-07T14:48:51.033534
| 2019-11-10T16:34:09
| 2019-11-10T16:34:09
| 220,815,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,287
|
py
|
import math
class MinHeap:
def __init__(self, array):
# Do not edit the line below.
self.heap = self.buildHeap(array)
def buildHeap(self, array):
# Write your code here.
self.heap = array
for i in range(len(self.heap)//2,-1,-1):
self.minHeapify(i)
return array
def minHeapify(self,i):
minimum = i
if(self.left(i) < len(self.heap) and self.heap[self.left(i)] < self.heap[i] ):
minimum = self.left(i)
else:
minimum = i
if(self.right(i) < len(self.heap) and self.heap[self.right(i)] < self.heap[minimum] ):
minimum = self.right(i)
if(minimum != i):
self.heap[i], self.heap[minimum] = self.heap[minimum], self.heap[i]
self.minHeapify(minimum)
def siftDown(self):
self.minHeapify(0)
def siftUp(self):
i= len(self.heap)-1
while(self.parent(i)>=0 and self.heap[i] < self.heap[self.parent(i)]):
self.heap[i], self.heap[self.parent(i)] = self.heap[self.parent(i)], self.heap[i]
i = self.parent(i)
def peek(self):
return self.heap[0]
def remove(self):
self.heap[0], self.heap[-1] = self.heap[-1], self.heap[0]
val = self.heap.pop()
self.siftDown()
return val
def insert(self, value):
self.heap.append(value)
self.siftUp()
def heapSort(self):
arr = []
while(len(self.heap) != 0):
arr.append(self.remove())
print(arr)
def parent(self, i):
return math.floor((i-1)/2)
def left(self, i):
return 2*i + 1
def right(self, i):
return 2*i + 2
def size(self):
return len(self.heap)
class MaxHeap:
def __init__(self, array):
# Do not edit the line below.
self.heap = self.buildHeap(array)
def buildHeap(self, array):
# Write your code here.
self.heap = array
for i in range(len(self.heap)//2,-1,-1):
self.maxHeapify(i)
return array
def maxHeapify(self,i):
maximum = i
if(self.left(i) < len(self.heap) and self.heap[self.left(i)] > self.heap[i] ):
maximum = self.left(i)
else:
maximum = i
if(self.right(i) < len(self.heap) and self.heap[self.right(i)] > self.heap[maximum] ):
maximum = self.right(i)
if(maximum != i):
self.heap[i], self.heap[maximum] = self.heap[maximum], self.heap[i]
self.maxHeapify(maximum)
def siftDown(self):
self.maxHeapify(0)
def siftUp(self):
i= len(self.heap)-1
while(self.parent(i)>=0 and self.heap[i] > self.heap[self.parent(i)]):
self.heap[i], self.heap[self.parent(i)] = self.heap[self.parent(i)], self.heap[i]
i = self.parent(i)
def peek(self):
return self.heap[0]
def remove(self):
self.heap[0], self.heap[-1] = self.heap[-1], self.heap[0]
val = self.heap.pop()
self.siftDown()
return val
def insert(self, value):
self.heap.append(value)
self.siftUp()
def heapSort(self):
arr = []
while(len(self.heap) != 0):
arr.append(self.remove())
print(arr)
def parent(self, i):
return math.floor((i-1)/2)
def left(self, i):
return 2*i + 1
def right(self, i):
return 2*i + 2
def size(self):
return len(self.heap)
class continuousMedianHandler:
def __init__(self):
self.median = None
self.lowerHalf = MaxHeap([])
self.upperHalf = MinHeap([])
def insertVal(self, val):
if(self.lowerHalf.size() == 0):
self.lowerHalf.insert(val)
elif(val > self.lowerHalf.peek()):
self.upperHalf.insert(val)
else:
self.lowerHalf.insert(val)
#Balacing both the halves
if(self.lowerHalf.size() - self.upperHalf.size() >= 2):
self.upperHalf.insert(self.lowerHalf.remove())
if(self.upperHalf.size() - self.lowerHalf.size() >= 2):
self.lowerHalf.insert(self.upperHalf.remove())
# Calculating Median
if(self.lowerHalf.size() < self.upperHalf.size()):
self.median = self.upperHalf.peek()
elif(self.lowerHalf.size() > self.upperHalf.size()):
self.median = self.lowerHalf.peek()
else:
#self.median = (self.lowerHalf.peek() + self.upperHalf.peek())/2
self.median = self.lowerHalf.peek()
def getMedian(self):
return self.median
if __name__ == '__main__':
with open('Median.txt') as file:
arr = [int(line) for line in file]
med = continuousMedianHandler()
res = 0
for i in range(len(arr)):
med.insertVal(arr[i])
res += med.getMedian()
#print(med.getMedian(),end =',')
print(res%10000)
|
[
"pratyushgarg2017@gmail.com"
] |
pratyushgarg2017@gmail.com
|
b5a7140d7dc130dcdf39d4584f59b48f6e8762d8
|
df901b7165323f1b03bc690d639c603bbeac9d5c
|
/airflow/providers/papermill/operators/papermill.py
|
8e5cfe6beb326cd1ba26c3823497e9dcfd3121ff
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause",
"Python-2.0"
] |
permissive
|
nikspatel03/airflow
|
04d81ccbe0a53a95e058afb3f6fc98427a65ff4b
|
39375c01ced008a715bb2a540df31fe2cf908bbb
|
refs/heads/master
| 2020-12-27T01:20:18.614360
| 2020-02-01T17:11:35
| 2020-02-01T17:11:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,574
|
py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Optional
import attr
import papermill as pm
from airflow.lineage.entities import File
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
@attr.s(auto_attribs=True)
class NoteBook(File):
type_hint: Optional[str] = "jupyter_notebook"
parameters: Optional[Dict] = {}
meta_schema: str = __name__ + '.NoteBook'
class PapermillOperator(BaseOperator):
"""
Executes a jupyter notebook through papermill that is annotated with parameters
:param input_nb: input notebook (can also be a NoteBook or a File inlet)
:type input_nb: str
:param output_nb: output notebook (can also be a NoteBook or File outlet)
:type output_nb: str
:param parameters: the notebook parameters to set
:type parameters: dict
"""
supports_lineage = True
@apply_defaults
def __init__(self,
input_nb: Optional[str] = None,
output_nb: Optional[str] = None,
parameters: Optional[Dict] = None,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
if input_nb:
self.inlets.append(NoteBook(url=input_nb,
parameters=parameters))
if output_nb:
self.outlets.append(NoteBook(url=output_nb))
def execute(self, context):
if not self.inlets or not self.outlets:
raise ValueError("Input notebook or output notebook is not specified")
for i in range(len(self.inlets)):
pm.execute_notebook(self.inlets[i].url, self.outlets[i].url,
parameters=self.inlets[i].parameters,
progress_bar=False, report_mode=True)
|
[
"noreply@github.com"
] |
nikspatel03.noreply@github.com
|
f6b9d1c4d566ee024922349938ea97c3771a96fc
|
c8fcb8561ca9037969329b8aa7c5650a3405ff4a
|
/mhb-py/mhb_createjson_09-ratio_registr.py
|
e3391b79ab232a21c674acaafb522ee815264aa3
|
[] |
no_license
|
dbrechet/moocheartbeat
|
b0d15ad89f5c301abad302b0cea88651a1b34dd8
|
af2f8652c27d37a26ee72a3e9930d32e1931e2c6
|
refs/heads/master
| 2021-01-02T09:43:29.673584
| 2015-01-12T10:12:35
| 2015-01-12T10:12:35
| 24,980,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,021
|
py
|
#!/usr/bin/env python
#title :mhb_createjson_ratio_registr.py
#description :This will make a query to the database and create a json file to store the data. The data is lecture_view_total.
#author :dbrechet
#date :20141014
#version :0.1
#usage :python mhb_createjson_ratio_registr.py
#notes :
#python_version :2.7.6
#==============================================================================
# Import the modules needed to run the script.
import sys
import os
import MySQLdb
import io
import json
from datetime import datetime
from datetime import timedelta
from time import mktime
from array import *
# Create dict structure of the json file.
data_dict_name =[]
data_dict_value = []
# genneral info
data_dict_name.append("type")
data_dict_value.append("serial")
data_dict_name.append("theme")
data_dict_value.append("none")
data_dict_name.append("pathToImages")
data_dict_value.append("http://www.amcharts.com/lib/3/images/")
#legend
item_name = []
item_value = []
item_name.append("equalWidths")
item_value.append("true")
item_name.append("periodValueText")
item_value.append("total: [[value.sum]]")
item_name.append("position")
item_value.append("top")
item_name.append("valueAlign")
item_value.append("left")
item_name.append("valueWidth")
item_value.append(100)
legend_dict = dict(zip(item_name, item_value))
data_dict_name.append("legend")
data_dict_value.append(legend_dict)
# Open database connection
db = MySQLdb.connect(host='localhost', user='mhbuser', passwd='mhb23!', db='moocheartbeat')
# prepare a cursor object using cursor() method
cursor = db.cursor()
#"graphs"
dict_graphs =[]
sql_sessions = "SELECT distinct(session) FROM mhb_coursera WHERE 1"
cursor.execute(sql_sessions)
rows_sessions = cursor.fetchall()
for row_ses in rows_sessions:
item_graph_name = []
item_graph_value = []
item_graph_name.append("balloonText")
item_graph_value.append("<span style='font-size:10px; color:#000000;'><b>[[title]]: [[value]]</b></span>")
item_graph_name.append("fillAlphas")
item_graph_value.append(0.6)
item_graph_name.append("lineAlpha")
item_graph_value.append(0.4)
item_graph_name.append("title")
item_graph_value.append(row_ses[0])
item_graph_name.append("type")
item_graph_value.append("column")
item_graph_name.append("valueField")
item_graph_value.append(row_ses[0])
dict_graph = dict(zip(item_graph_name,item_graph_value))
dict_graphs.append(dict_graph)
data_dict_name.append("graphs")
data_dict_value.append(dict_graphs)
data_dict_name.append("plotAreaBorderAlpha")
data_dict_value.append(0)
data_dict_name.append("marginTop")
data_dict_value.append(10)
data_dict_name.append("marginLeft")
data_dict_value.append(0)
data_dict_name.append("marginBottom")
data_dict_value.append(0)
# dataProvider
datadict = []
sql_item_min_max = """SELECT min(item) as min_item, max(item) as max_item
FROM mhb_coursera
WHERE 1"""
cursor.execute(sql_item_min_max)
rows_minmax = cursor.fetchall()
for r in rows_minmax:
min_i = r[0]
max_i = r[1]
d = min_i
while d <= max_i:
#sql_item = """SELECT item
# FROM mhb_coursera
# WHERE 1
# GROUP BY item ORDER BY item ASC"""
#cursor.execute(sql_item)
#rows = cursor.fetchall()
#for row in rows:
data_item = d.isoformat()
dw = d + timedelta(days=7)
data_item_week = dw.isoformat()
sql_session = """SELECT session, registrations, assignment_total/assignment_unique as assignment_ratio_registr,
forum_comment_total/forum_comment_unique as forum_comment_ratio_registr,
forum_post_total/forum_post_unique as forum_post_ratio_registr,
lecture_download_total/lecture_download_unique as lecture_download_ratio_registr,
lecture_view_total/lecture_view_unique as lecture_view_ratio_registr,
quiz_exam_total/quiz_exam_unique as quiz_exam_ratio_registr,
quiz_homework_total/quiz_homework_unique as quiz_homework_ratio_registr,
quiz_quiz_total/quiz_quiz_unique as quiz_quiz_ratio_registr,
quiz_survey_total/quiz_survey_unique as quiz_survey_ratio_registr,
quiz_video_total/quiz_video_unique as quiz_video_ratio_registr FROM mhb_coursera
WHERE item between '""" + data_item + "' AND '" + data_item_week + "' ORDER BY session ASC"
cursor.execute(sql_session)
rows_session = cursor.fetchall()
sessions = []
regs = []
sessions.append ('item')
regs.append (mktime(d.timetuple()))
for row_s in rows_session:
nvrow = 0
for row_c in row_s:
if row_c != row_s[0]:
if row_c is None:
row_c = 0
row_c= float(row_c)
nvrow = nvrow + row_c
if nvrow != 0:
sessions.append(row_s[0])
regs.append (round(nvrow,2))
sess_dict = dict(zip(sessions, regs))
datadict.append(sess_dict)
d = dw
data_dict_name.append("dataProvider")
data_dict_value.append(datadict)
#valueAxes
list_va = []
item_va_name = []
item_va_value = []
item_va_name.append("stackType")
item_va_value.append("regular")
item_va_name.append("gridAlpha")
item_va_value.append(0.07)
item_va_name.append("position")
item_va_value.append("left")
item_va_name.append("title")
item_va_value.append("Ratio + Registration")
va_dict = dict(zip(item_va_name, item_va_value))
list_va.append(va_dict)
data_dict_name.append("valueAxes")
data_dict_value.append(list_va)
#"chartScrollbar"
item_chartScrollbar_name = []
item_chartScrollbar_value = []
dict_chartScrollbar = dict(zip(item_chartScrollbar_name, item_chartScrollbar_value))
data_dict_name.append("chartScrollbar")
data_dict_value.append(dict_chartScrollbar)
#"chartCursor"
item_chartCursor_name = ["cursorAlpha"]
item_chartCursor_value = [0]
dict_chartCursor = dict(zip(item_chartCursor_name, item_chartCursor_value))
data_dict_name.append("chartCursor")
data_dict_value.append(dict_chartCursor)
data_dict_name.append("categoryField")
data_dict_value.append("item")
#"categoryAxis"
item_categoryAxis_name = []
item_categoryAxis_value = []
item_categoryAxis_name.append("startOnAxis")
item_categoryAxis_value.append("true")
item_categoryAxis_name.append("axisColor")
item_categoryAxis_value.append("#DADADA")
item_categoryAxis_name.append("gridAlpha")
item_categoryAxis_value.append(0.07)
item_categoryAxis_name.append("equalSpacing")
item_categoryAxis_value.append("true")
item_categoryAxis_name.append("parseDates")
item_categoryAxis_value.append("true")
item_categoryAxis_name.append("minPeriod")
item_categoryAxis_value.append("7DD")
item_categoryAxis_name.append("twoLineMode")
item_categoryAxis_value.append("true")
dict_categoryAxis = dict(zip(item_categoryAxis_name, item_categoryAxis_value))
data_dict_name.append("categoryAxis")
data_dict_value.append(dict_categoryAxis)
data_dict = dict(zip(data_dict_name, data_dict_value))
#print json.dumps(data_dict, sort_keys=True, indent=4, separators=(',', ': '))
with io.open('mhb-data/09_courses-ratio-registr.json', 'w', encoding='utf-8') as f:
f.write(unicode(json.dumps(data_dict, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)))
jsfilename ='mhb-js/09_courses-ratio-registr.js'
jsmessage = """
var data = d3.json("mhb-data/09_courses-ratio-registr.json", function(error, data){
var chart = AmCharts.makeChart("chartdiv", data);
});"""
with io.open(jsfilename, 'w', encoding='utf-8') as fj:
fj.write(unicode(jsmessage))
htmlfilename = '09_test-courses-ratio-registr.html'
htmlmessage = """
<!DOCTYPE html>
<html>
<head>
<title>Iter 03: 09, Courses Ratio | amCharts</title>
<meta name="description" content="chart created using amCharts live editor" />
<!-- amCharts javascript sources -->
<script type="text/javascript" src="mhb-libscripts/amcharts.js"></script>
<script type="text/javascript" src="mhb-libscripts/serial.js"></script>
<script type="text/javascript" src="mhb-libscripts/none.js"></script>
<script type="text/javascript" src="mhb-libscripts/d3.min.js"></script>
<!-- amCharts javascript code -->
<script type="text/javascript" src="mhb-js/09_courses-ratio-registr.js"></script>
<link rel="stylesheet" type="text/css" href="mhb-css/AMChart.css" media="screen" />
</head>
<body>
<a href="index.html">Back</a>
<div id="chartdiv"></div>
</body>
</html>
"""
with io.open(htmlfilename, 'w', encoding='utf-8') as fh:
fh.write(unicode(htmlmessage))
#data_dict_name.append()
#data_dict_value.append()
# disconnect from server
db.close()
|
[
"david.brechet@epfl.ch"
] |
david.brechet@epfl.ch
|
f3a14d75cb6f74deff6199083aa4c00c43f44e3c
|
52084ad3ce08eed62fb8286964624eec4f775fb3
|
/gig_finder/musicians/apps.py
|
460a778a3f1c8289055aecfd4d1aee0b10494ebf
|
[] |
no_license
|
armoredpigeon1111/gig_finder_backend
|
efd954cb384b3fad0099a5f2094b7785b3179e41
|
97aa8f6ce19e700b2e6a906e43fbd33d4cae364c
|
refs/heads/main
| 2023-09-01T14:24:29.862928
| 2021-10-12T23:19:39
| 2021-10-12T23:19:39
| 414,647,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from django.apps import AppConfig
class MusiciansConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'musicians'
|
[
"armoredpigeon1111@outlook.com"
] |
armoredpigeon1111@outlook.com
|
dfb8e76f0fccedb60e1e39eeb3046d19447bd0fb
|
dad17e34a71a097ccf9b6237b34ee733699a8f83
|
/exercise_16.py
|
2bd473f80df200368fd1ccf1bcc2ffd30d394c57
|
[] |
no_license
|
artazm/practice-python-sol
|
0cba85c31f9f45f0c935729f24299ae06ef8523c
|
299971cc07811dcf07d9b8f7c9683a1054c3f45d
|
refs/heads/master
| 2021-01-16T06:51:24.129086
| 2020-02-25T14:13:36
| 2020-02-25T14:13:36
| 243,014,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
# Password Generator
# weak (1-3) medium (4-6) strong ()
import random
passw = ['write', 'a', 'password', 'generator', 'python', 'creative', 'mix']
num = list(str(range(0, 10)))
symbols = ['!', '@', '#', '$', '%', '&', '*']
pass_gen = passw + symbols + num
i = 0
strength = input('How strong you want your password: ')
if strength == 'weak':
i = random.randint(1, 3)
elif strength == 'medium':
i = random.randint(4, 6)
elif strength == 'strong':
i = random.randint(7, 9)
gen_pass = random.sample(pass_gen, i)
print(''.join(gen_pass))
|
[
"noreply@github.com"
] |
artazm.noreply@github.com
|
c7a690e0922c0f2a1823e095494aaedf862b75aa
|
1e297bf3ed06537471d5d3f7109c6a9c8caf0827
|
/HelloWorld/oop_init.py
|
bf168233ee1be972f9188c360481b5c930df3396
|
[] |
no_license
|
holyhan/Python-practice
|
0ac0e1c44982c2e241e964024416439e5b27d863
|
82423550445b1f3d61594fb2afd99cbef57795e4
|
refs/heads/master
| 2020-03-19T08:00:41.405680
| 2018-09-12T08:46:50
| 2018-09-12T08:46:50
| 136,168,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
class Person:
def __init__(self, name):
self.name = name
def say_hi(self):
print('Hello, my name is', self.name)
p = Person('Swaroop')
p.say_hi()
# 前面两行同时也能写作
# Person('Swaroop').say_hi()
|
[
"hanyang08@meituan.com"
] |
hanyang08@meituan.com
|
97c119f4d923746ea4284cbf0f78cfa18e90c6e2
|
dcd6eb3161a4fd906f15c36b92bb562edc330f33
|
/PycharmProjects/Matplotlib/test/test_1.py
|
51c9b7921856c8a53f16bf9d0442a4d237020e95
|
[] |
no_license
|
ZHANGDINGREN123/numpy-pandas
|
50c0ed8a0b6e496a6f9ca365078370a5324eb47a
|
61c2b312f6a43c15996f6189dcfc4083a7be501c
|
refs/heads/master
| 2021-09-14T04:13:09.570371
| 2018-05-08T09:22:53
| 2018-05-08T09:22:53
| 106,893,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
a = [1, 2, 3, 4, 3, 2, 1, 1]
a.append(0)#追加0
a.insert(2,100)
a.remove(1)
print(a[len(a) - 2])
print ("数组a的长度:",len(a))
print(a)
print(a[-1])
print("返回4的下标",a.index(4))
print("出现一的次数",a.count(1))
a.sort()#从小到大排序
print("从小到大排序",a)
|
[
"877905831@qq.com"
] |
877905831@qq.com
|
fd98185f7e9703dffba7abb5f1031c7a0ea7b835
|
da96d29b457eb123c01274efea562448df105fc6
|
/chapter4/st8.py
|
8eaec17aec53c648c569598be9a899ae03e3fa6e
|
[] |
no_license
|
Alonsovau/sketches
|
a1336f1a7909ad059744c4613ab992c8361264f5
|
dfb072086cc813d7409fa11393ebaad6e26db180
|
refs/heads/master
| 2021-01-19T22:29:15.827896
| 2017-10-19T15:37:28
| 2017-10-19T15:37:28
| 88,761,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# 跳过可迭代对象的开始部分
with open('somefile.txt') as f:
for line in f:
print(line, end='')
print()
from itertools import dropwhile
with open('somefile.txt') as f:
for line in dropwhile(lambda l: l.startswith('#'), f):
print(line, end='')
print()
from itertools import islice
items = ['a', 'b', 'c', 1, 2, 4]
for x in islice(items, 3, None):
print(x)
|
[
"alonsovau@outlook.com"
] |
alonsovau@outlook.com
|
1cfac28232ffbf3f5da95399128712f4d11ba2de
|
7ed748584247a743c30de0c790ccbcb60ffef574
|
/Crawler/study/class8.py
|
b7d0248be2c0072ec458ea7acd938c98e06ee272
|
[] |
no_license
|
quqiao/eshare
|
18f28640c9814d21ec44a69a3acb6c0968243daa
|
fe221e4187a26da9b5f93d5f68bf6ff64d581d73
|
refs/heads/master
| 2020-04-01T19:31:09.484384
| 2019-04-25T01:52:37
| 2019-04-25T01:52:37
| 153,551,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,462
|
py
|
# coding=utf-8
'''
Created on 2019年04月12日
@author: quqiao
第八课:进程,线程的初步了解
'''
# 进程:程序并不能单独运行,只有将程序装载到内存中,系统为他分配资源才能运行,而这种执行的程序称为进程
# 程序和进程区别:程序是指令的集合,它是进程的静态描述文本。进程是程序的一次执行活动,属于动态概念
# 进程的概念:进程是操作系统对一个正在运行的程序的一种抽象。即进程是处理器,主存,IO设备的抽象,操作系统可以同时运行多个
# 进程,而每个进程都好像在毒战的使用硬件
# 并发运行:一个进程的指令和另外一个进程的指令是交错执行的
# 上下文切换:CPU看上去像是在并发的执行多个进程,这是通过处理器在进程之间切换来实现的,操作系统实现这种交错执行的机制称
# 为上下文切换
# 线程:操作系统能够进行运算跳读的最小单位,被包含在进程中,是进程中的实际运作单位
# 一个进程实际上可以由多个线程的执行单元组成,每个线程都运行在进程的上下文中,并共享同样的代码和全局数据
# 并发和并行的概念
# 并发:指的是同时具有多个活动的系统
# 并行:指的是用并发来使一个系统运行的更快,并行可以在操作系统的多个抽象层次进行运用
|
[
"trnwk8r@dingtalk.com"
] |
trnwk8r@dingtalk.com
|
ba34b1efd290267f29bc5f2d582adaa0c9e66d9f
|
f5a351cb77d5c8e1e1f4d53fd24e9654d26e55f7
|
/examples/bokeh/bokeh_plot_violin.py
|
98b569b926785c40ad79922fb4b5fb2357318d3e
|
[
"Apache-2.0"
] |
permissive
|
JulianPedro/arviz
|
f8e37e03eb07247dbdd77908d8bb5d8321319249
|
92f9e5bcda03468c228735fe16a52c2f7e616762
|
refs/heads/main
| 2023-08-11T17:45:17.427084
| 2021-10-03T10:43:26
| 2021-10-03T10:43:26
| 413,251,708
| 1
| 0
|
Apache-2.0
| 2021-10-04T02:33:59
| 2021-10-04T02:33:58
| null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
"""
Violinplot
==========
_thumb: .2, .8
"""
import arviz as az
data = az.load_arviz_data("non_centered_eight")
ax = az.plot_violin(data, var_names=["mu", "tau"], backend="bokeh")
|
[
"noreply@github.com"
] |
JulianPedro.noreply@github.com
|
d7d72f77c945542c4ce6e6ffd6dd52fc758c5f4a
|
1bcb966740f47c0edc23e9b05afec55f2bcae36a
|
/app/share/dbopear/dbItems.py
|
91bb279962de43eaf9ac657f7002bf23c5741bd5
|
[] |
no_license
|
East196/diabloworld
|
0d2e9dbf650aa86fcc7b9fc1ef49912e79adb954
|
d7a83a21287ed66aea690ecb6b73588569478be6
|
refs/heads/master
| 2021-05-09T12:15:31.640065
| 2018-02-04T15:16:54
| 2018-02-04T15:16:54
| 119,007,609
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 993
|
py
|
#coding:utf8
'''
Created on 2011-7-14
对物品表(tb_item)的操作
@author: lan (www.9miao.com)
'''
from gfirefly.dbentrust.dbpool import dbpool
from pymysql.cursors import DictCursor
all_ItemTemplate = {} #所有的物品模板信息
ALL_SETINFO = {}
def getAll_ItemTemplate():
"""获取所有的物品信息
"""
global all_ItemTemplate
sql="select * from `tb_item_template`"
conn = dbpool.connection()
cursor = conn.cursor(DictCursor)
cursor.execute(sql)
result=cursor.fetchall()
cursor.close()
conn.close()
for _item in result:
all_ItemTemplate[_item['id']] = _item
def getAllsetInfo():
'''获取所有的套装信息
'''
global ALL_SETINFO
sql = "SELECT * from tb_equipmentset;"
conn = dbpool.connection()
cursor = conn.cursor(DictCursor)
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
conn.close()
for setinfo in result:
ALL_SETINFO[setinfo['id']] = setinfo
|
[
"2901180515@qq.com"
] |
2901180515@qq.com
|
0b3934663c0b429b89e7064ed4f2eb1e29285a06
|
ea85259d8dff0fd8257e9132bdd4bf9d7b506a64
|
/conf.py
|
78e9dcbf9f97f8a410e92af41fd9f1ccdc48807a
|
[] |
no_license
|
KartavyaKothari/DOCSFOR-
|
bd15fc6ea7a92ac5e7f0310693d2dc204f89c88c
|
59ebbdf4dc5d181de00722e82460279bf5a6e730
|
refs/heads/master
| 2022-11-24T06:48:49.859648
| 2019-11-27T02:58:53
| 2019-11-27T02:58:53
| 224,326,527
| 0
| 0
| null | 2022-11-21T22:41:22
| 2019-11-27T02:23:55
|
TeX
|
UTF-8
|
Python
| false
| false
| 1,942
|
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Automated Essay Grader'
copyright = '2019, Kartavya, Aditya, Shreyansh'
author = 'Kartavya, Aditya, Shreyansh'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
|
[
"kartavya@cse.iitb.ac.in"
] |
kartavya@cse.iitb.ac.in
|
5405576a4d430f4961545624fd7cb1ee1c920862
|
2e816dc7654fc8e124fd9adfe26a6c9adcf92542
|
/parsetext.py
|
07f96d6eb31f43e8cfba36707bafa788ea1b2aaa
|
[] |
no_license
|
carlosmcastro/PlanetAPP-BACKEND-
|
9db8d458a5d388dcd3e7b66737ed98675ebbe57d
|
583511fe3d5f7295f2120a22c3d208b84b21bd0a
|
refs/heads/master
| 2021-02-11T09:02:11.033396
| 2020-03-11T17:45:57
| 2020-03-11T17:45:57
| 244,475,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,625
|
py
|
#encoding: utf-8
from container_constantes import ASTRO_DATA, EQUIVALENCIAS
from exoplanets import concor
import pandas as pd
data=pd.read_csv(ASTRO_DATA)
#Data from binary stellar systems have been cleaned.
data=data.drop(data.loc[data['pl_cbflag']==1].index)
#Busquedador principal.
def whopl(who_type, elec, elec_filt):
filt_data=data.copy() #copia
elec = [concor(i, EQUIVALENCIAS) for i in elec] #si se a cometido un error se corrige.
elecciones = {EQUIVALENCIAS[i] : i for i in elec}
if who_type=='equal':
for i in elec_filt:
filt_data=filt_data[filt_data[EQUIVALENCIAS[concor(i, EQUIVALENCIAS)]]==elec_filt[i]]
else:
for i in elec_filt:
filt_data=filt_data[(filt_data[EQUIVALENCIAS[concor(i, EQUIVALENCIAS)]]>elec_filt[i][0]) &
(filt_data[EQUIVALENCIAS[concor(i, EQUIVALENCIAS)]]<elec_filt[i][1])]
return filt_data[elecciones.keys()].drop_duplicates().rename(columns=elecciones).to_dict('list')
#Busca los planetas que cumplen la condición exacta.
#Ejemplo: whopl_equal('nombre_planeta', 'edad_estrella', edad_estrella=1.60)
def whopl_equal(*args ,**kargs):
return whopl('equal', args, kargs)
#Busca los planetas que cumplen la condición dentro de un intervalo.
#Ejemplo 1: whopl_inequal('nombre_planeta', 'distancia_estrella', edad_estrella=(1,2), distancia_estrella=(2, 25))
#Ejemplo 2: whopl_inequal('nombre_planeta', 'distancia_estrella', distancia_estrella=(7000, float('inf')))
#Se debe usar float('-inf') ó float('inf') para establecer una cota superior o inferior.
def whopl_inequal(*args, **kargs):
return whopl('inequal', args, kargs)
|
[
"noreply@github.com"
] |
carlosmcastro.noreply@github.com
|
a268bbdbc9b761a6f089807becb2a5f481bbd7be
|
ecfd331cfc18ca5cdddabea142ba1be8cc933e05
|
/battle/classes/magic.py
|
8766bea58303a38b5478fc7203a2b0b7c34f6381
|
[] |
no_license
|
jaybane-git/phython-source
|
c232a65c007256f0676ba61119186fcb04738516
|
45697595a91778cf830c1f4c89c3aac679b8e9ee
|
refs/heads/main
| 2023-01-21T03:17:34.712333
| 2020-12-01T17:37:29
| 2020-12-01T17:37:29
| 317,559,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
class Spell:
def __init__(self, name, cost, dmg, type):
self.name = name
self.cost = cost
self.dmg = dmg
self.type = type
|
[
"jaybane@hotmail.com"
] |
jaybane@hotmail.com
|
1b91fdde98fe3757f8f4dd5d71b99236dbfdcc6c
|
1cc37210c56ad03dfa2d8364fa448797b69d68e6
|
/Wettbewerbe/migrations/0002_auto_20170311_1954.py
|
c5ca12147dc0ecc5d4314cbe1be8dc94163825a9
|
[
"MIT"
] |
permissive
|
wmles/olymp
|
f536ad8a2054930995301e9110add74fcb824259
|
97b1a256982c2a75c39ba3a855b63a147d4409c5
|
refs/heads/master
| 2020-05-18T14:40:26.216562
| 2017-08-27T13:46:44
| 2017-08-27T13:48:35
| 84,254,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,612
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-11 19:54
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Wettbewerbe', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='artkategorie',
old_name='datum_erstellt',
new_name='zeit_erstellt',
),
migrations.RenameField(
model_name='artteilnahme',
old_name='datum_erstellt',
new_name='zeit_erstellt',
),
migrations.RenameField(
model_name='artveranstaltung',
old_name='datum_erstellt',
new_name='zeit_erstellt',
),
migrations.RenameField(
model_name='person',
old_name='datum_erstellt',
new_name='zeit_erstellt',
),
migrations.RenameField(
model_name='teilnahme',
old_name='datum_erstellt',
new_name='zeit_erstellt',
),
migrations.RenameField(
model_name='unterseite',
old_name='datum_erstellt',
new_name='zeit_erstellt',
),
migrations.RenameField(
model_name='veranstaltung',
old_name='datum_erstellt',
new_name='zeit_erstellt',
),
migrations.RenameField(
model_name='wettbewerbskategorie',
old_name='datum_erstellt',
new_name='zeit_erstellt',
),
migrations.AddField(
model_name='artkategorie',
name='slug',
field=models.SlugField(blank=True, max_length=30),
),
migrations.AddField(
model_name='artkategorie',
name='zeit_geaendert',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='artteilnahme',
name='slug',
field=models.SlugField(blank=True, max_length=30),
),
migrations.AddField(
model_name='artteilnahme',
name='zeit_geaendert',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='artveranstaltung',
name='slug',
field=models.SlugField(blank=True, max_length=30),
),
migrations.AddField(
model_name='artveranstaltung',
name='zeit_geaendert',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='person',
name='nutzer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='person',
name='slug',
field=models.SlugField(blank=True, max_length=30),
),
migrations.AddField(
model_name='person',
name='zeit_geaendert',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='teilnahme',
name='slug',
field=models.SlugField(blank=True, max_length=30),
),
migrations.AddField(
model_name='teilnahme',
name='zeit_geaendert',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='unterseite',
name='slug',
field=models.SlugField(blank=True, max_length=30),
),
migrations.AddField(
model_name='unterseite',
name='zeit_geaendert',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='veranstaltung',
name='slug',
field=models.SlugField(blank=True, max_length=30),
),
migrations.AddField(
model_name='veranstaltung',
name='zeit_geaendert',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='wettbewerbskategorie',
name='slug',
field=models.SlugField(blank=True, max_length=30),
),
migrations.AddField(
model_name='wettbewerbskategorie',
name='zeit_geaendert',
field=models.DateTimeField(auto_now=True),
),
]
|
[
"ilja.goethel@arcor.de"
] |
ilja.goethel@arcor.de
|
dad300ee7448615a5b4d43887b4c87e13ffb1cc0
|
80d9d8263a62fbbd730a7d44cc926a0cb791f2c8
|
/Solver.py
|
19bc4e2617a362442332c010f32fd225d451c5c2
|
[] |
no_license
|
fazzadr/Skyline_in_Crowdsourcing_with_Imprecise_Comparisons
|
8fc02d5607b6a2770ac21385540d127447b2b42c
|
90aa96acb3dd97e1f26d451affa45f2cebc32b55
|
refs/heads/main
| 2023-07-12T04:22:08.918828
| 2021-08-17T17:12:17
| 2021-08-17T17:12:17
| 397,330,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41,076
|
py
|
import math
import numpy as np
import random
import itertools as it
import pprint as pp
from setuptools.command.install_egg_info import install_egg_info
class Solver:
# crowdsourcing comparisons made by one sky algorithm
###### sky_comparisons = set()
# set_comparisons__point_1__point_2__dim = set()
# first sorted dim
# first_sorted_dimension = []
# #comparisons needed to sort the first dimension
# set_comparisons__first_dim = set()
def __init__(self):
#
self.set_comparisons__point_1__point_2__dim = set()
#
self.num_crowd_phases = 0
#
self.first_sorted_dimension = []
self.set_comparisons__first_dim = set()
self.num_crowd_phases__first_dim = 0
#
#
return
#
##
###
def get_num_comparisons_performed_by_the_last_method(self):
return len(self.set_comparisons__point_1__point_2__dim)
#
##
###
def round_robin_tournament_scores(self, list__point, d, worker):
"""
RR-Turnament
:param list__point:
:param d:
:return:
"""
scores = 0
map__point__num_wins = dict(zip(list__point, [0 for x in range(len(list__point))]))
for i in range(len(list__point)):
for j in range(i + 1, len(list__point)):
if worker.is_point_1_smaller_than_point_2(list__point[i], list__point[j], d,
self.set_comparisons__point_1__point_2__dim):
map__point__num_wins[list__point[j]] += 1
else:
map__point__num_wins[list__point[i]] += 1
return map__point__num_wins
#
def get_theoretical_num_comps_in_a_single_round_in_WC_scenario(self, n):
#
theoretical_best_value_for_s_in_WC_scenario = 64 if n <= 64 else math.floor(math.sqrt(2 * n))
#
theoretical_num_comps_in_this_round_in_WC_scenario = theoretical_best_value_for_s_in_WC_scenario * (
theoretical_best_value_for_s_in_WC_scenario - 1) / 2
theoretical_num_comps_in_this_round_in_WC_scenario += (n - theoretical_best_value_for_s_in_WC_scenario)
#
return theoretical_num_comps_in_this_round_in_WC_scenario
#
##
def twoSort(self, list__point, worker, d, map__num_points__best_s=None, try_random_quicksort=False,
total_amount_of_allowed_comparisons=float("+inf"), tare_of_num_comparisons=0):
"""
The 2Sort Algorithm by Ajtai at al.
:param points:
:param d:
:param map__num_points__best_s:
:return: Sorted list of points in DESCENDING order.
"""
#
s = math.floor(math.sqrt(2 * len(list__point)))
if map__num_points__best_s is not None and len(list__point) in map__num_points__best_s:
s = map__num_points__best_s[len(list__point)]
#
#
if try_random_quicksort and total_amount_of_allowed_comparisons > 0:
s = 1
#
#
c_num_crowd_phases = 0
#
#
if (len(list__point) > s):
#
#
predicted_num_comparisons_current_round = ((s * (s - 1)) / 2) + (len(list__point) - s)
#
##############################################################
num_comparisons_performed_so_far = len(
self.set_comparisons__point_1__point_2__dim) - tare_of_num_comparisons
#
if try_random_quicksort and total_amount_of_allowed_comparisons < num_comparisons_performed_so_far + predicted_num_comparisons_current_round:
if (map__num_points__best_s is not None) and (len(list__point) in map__num_points__best_s):
s = map__num_points__best_s[len(list__point)]
else:
s = math.floor(math.sqrt(2 * len(list__point)))
##############################################################
#
num_comps_t0 = len(self.set_comparisons__point_1__point_2__dim)
#
random_subset_of_points = np.random.choice(a=list__point, size=s, replace=False)
sorted_list_points_desc_num_wins = [x[0] for x in
sorted(self.round_robin_tournament_scores(random_subset_of_points, d,
worker).items(),
key=lambda kv: -kv[1])]
pivot = sorted_list_points_desc_num_wins[round(s / 2.)]
#
num_comps_t1 = len(self.set_comparisons__point_1__point_2__dim)
if num_comps_t1 > num_comps_t0: c_num_crowd_phases += 1
#
#
num_comps_t0 = len(self.set_comparisons__point_1__point_2__dim)
#
(list__point_1, list__point_2) = self.pivot_splitting(list__point, worker, pivot, d)
#
num_comps_t1 = len(self.set_comparisons__point_1__point_2__dim)
if num_comps_t1 > num_comps_t0: c_num_crowd_phases += 1
#
#
list__point_1, num_crowd_phases_1 = self.twoSort(list__point_1, worker,
d,
map__num_points__best_s,
try_random_quicksort,
total_amount_of_allowed_comparisons=total_amount_of_allowed_comparisons,
tare_of_num_comparisons=tare_of_num_comparisons)
list__point_2, num_crowd_phases_2 = self.twoSort(list__point_2, worker,
d,
map__num_points__best_s,
try_random_quicksort,
total_amount_of_allowed_comparisons=total_amount_of_allowed_comparisons,
tare_of_num_comparisons=tare_of_num_comparisons)
#
c_num_crowd_phases += max(num_crowd_phases_1, num_crowd_phases_2)
#
#
return list__point_2 + [pivot] + list__point_1, c_num_crowd_phases
#
#
#
#
num_comps_t0 = len(self.set_comparisons__point_1__point_2__dim)
#
final_result = [x[0] for x in
sorted(self.round_robin_tournament_scores(list__point, d, worker).items(),
key=lambda kv: (-kv[1], kv[0].at(d)))]
#
num_comps_t1 = len(self.set_comparisons__point_1__point_2__dim)
if num_comps_t1 > num_comps_t0: c_num_crowd_phases += 1
#
#
return final_result, c_num_crowd_phases
#
def pivot_splitting(self, list__point, worker, pivot, d):
"""
return two lists S1 and S2 such that S1 <_d pivot and S2 >_d pivot
:param points:
:param pivot:
:param d:
:return:
"""
list__point_1 = []
list__point_2 = []
for point in list__point:
#
if point == pivot:
continue
#
if worker.is_point_1_smaller_than_point_2(pivot, point, d, self.set_comparisons__point_1__point_2__dim):
list__point_1.append(point)
else:
list__point_2.append(point)
return (list__point_2, list__point_1)
#
##
def twoMaxFind(self, list__point, worker, d,
map__num_points__best_s=None,
try_random_max=False,
total_amount_of_allowed_comparisons=float("+inf"), tare_of_num_comparisons=0):
"""
:param list__point:
:param worker:
:param d:
:param map__num_points__best_s:
:param try_random_max:
:param total_amount_of_allowed_comparisons:
:param tare_of_num_comparisons:
:return:
"""
#
set__max = set()
#
#
initial_s = math.ceil(math.sqrt(len(list__point)))
s = initial_s
if map__num_points__best_s is not None and len(list__point) in map__num_points__best_s:
s = map__num_points__best_s[len(list__point)]
#
#
if try_random_max and total_amount_of_allowed_comparisons > 0:
s = 1
#
#
c_num_crowd_phases = 0
#
#
set_candidates_as_list_to_max__point = list(list__point)
while len(set_candidates_as_list_to_max__point) > s:
#
#
predicted_num_comparisons_current_round = ((s * (s - 1)) / 2) + (
len(set_candidates_as_list_to_max__point) - s)
#
##############################################################
num_comparisons_performed_so_far = len(
self.set_comparisons__point_1__point_2__dim) - tare_of_num_comparisons
#
if try_random_max and total_amount_of_allowed_comparisons < num_comparisons_performed_so_far + predicted_num_comparisons_current_round:
if (map__num_points__best_s is not None) and (
len(set_candidates_as_list_to_max__point) in map__num_points__best_s):
s = map__num_points__best_s[len(set_candidates_as_list_to_max__point)]
else:
s = initial_s
##############################################################
#
num_comps_t0 = len(self.set_comparisons__point_1__point_2__dim)
#
# extract a random subset of candidates to be the max of size 's'
random_subset_candidates_as_list_to_max__point = np.random.choice(a=set_candidates_as_list_to_max__point,
size=s, replace=False)
#
# perform RR on the extracted random subset of candidates to be the max of size 's'
map__point__RR_num_wins = self.round_robin_tournament_scores(random_subset_candidates_as_list_to_max__point,
d, worker)
#
num_comps_t1 = len(self.set_comparisons__point_1__point_2__dim)
if num_comps_t1 > num_comps_t0: c_num_crowd_phases += 1
#
#
# selct the point with maximum number of wins in the extracted random subset of candidates to be the max of size 's'
local_max_point = max([(point, num_wins) for point, num_wins in map__point__RR_num_wins.items()],
key=lambda x: x[1])[0]
#
#
num_comps_t0 = len(self.set_comparisons__point_1__point_2__dim)
#
# compare the local max to all candidates and eliminate all elements that lose to him.
set_candidates_as_list_to_max__point = self.update_set_of_candidates_for_the_max(local_max_point,
set_candidates_as_list_to_max__point,
d, worker)
num_comps_t1 = len(self.set_comparisons__point_1__point_2__dim)
if num_comps_t1 > num_comps_t0: c_num_crowd_phases += 1
#
#
#
#
#
num_comps_t0 = len(self.set_comparisons__point_1__point_2__dim)
#
# perform RR on the final set of candidates to be the max of size <='s'
map__point__RR_num_wins = self.round_robin_tournament_scores(set_candidates_as_list_to_max__point,
d, worker)
num_comps_t1 = len(self.set_comparisons__point_1__point_2__dim)
if num_comps_t1 > num_comps_t0: c_num_crowd_phases += 1
#
# selct the point with maximum number of wins in the extracted random subset of candidates to be the max of size 's'
num_wins_of_max_point = max([(point, num_wins) for point, num_wins in map__point__RR_num_wins.items()],
key=lambda x: x[1])[1]
set__max = set(
[point for point, num_wins in map__point__RR_num_wins.items() if num_wins == num_wins_of_max_point])
#
return set__max, c_num_crowd_phases
#
def update_set_of_candidates_for_the_max(self, point, set_candidates_as_list_to_max__point, d, worker):
#
new_set_candidates_as_list_to_max__point = [point]
#
for index in range(len(set_candidates_as_list_to_max__point)):
#
c_point = set_candidates_as_list_to_max__point[index]
#
if point == c_point:
continue
#
if not (
worker.is_point_1_smaller_than_point_2(c_point, point, d,
self.set_comparisons__point_1__point_2__dim)):
new_set_candidates_as_list_to_max__point.append(c_point)
#
return new_set_candidates_as_list_to_max__point
#
##
### OLD
def all_play_all(self, list__point, worker, use_complete_comparisons=False, reset_set_comparisons=True):
"""
APA algorithm for skln computation.
:param list__point:
:param use_complete_comparisons:
:return:
"""
#
if reset_set_comparisons:
self.set_comparisons__point_1__point_2__dim = set()
#
set_of_dominated_indexes = set()
self.num_crowd_phases = 1
#
for i in range(len(list__point)):
#
point_i = list__point[i]
#
for j in range(len(list__point)):
if j == i: continue
#
point_j = list__point[j]
#
#
if worker.does_point_1_dominate_point_2(point_j, point_i,
self.set_comparisons__point_1__point_2__dim,
perform_all_comparisons=use_complete_comparisons):
set_of_dominated_indexes.add(i)
#
skln = []
for i in range(len(list__point)):
if i not in set_of_dominated_indexes:
skln.append(list__point[i])
#
return skln
#
##
### OLD
def all_play_all_2(self, list__point, worker, use_complete_comparisons=False, reset_set_comparisons=True):
"""
APA algorithm for skln computation.
:param list__point:
:param use_complete_comparisons:
:return:
"""
#
if reset_set_comparisons:
self.set_comparisons__point_1__point_2__dim = set()
#
set_of_dominated_indexes = set()
self.num_crowd_phases = 1
#
list_of_i_indexes = list(range(len(list__point)))
list_of_j_indexes = list(list_of_i_indexes)
#
random.shuffle(list_of_i_indexes)
for i in list_of_i_indexes:
#
point_i = list__point[i]
#
#
random.shuffle(list_of_j_indexes)
for j in list_of_j_indexes:
if j == i: continue
#
point_j = list__point[j]
#
#
if worker.does_point_1_dominate_point_2(point_j, point_i,
self.set_comparisons__point_1__point_2__dim,
perform_all_comparisons=use_complete_comparisons):
set_of_dominated_indexes.add(i)
break # BrEaK!!!
#
#
skln = []
for i in range(len(list__point)):
if i not in set_of_dominated_indexes:
skln.append(list__point[i])
#
return skln
#
##
###
def repeated_twoMaxFind_for_skln(self, list__point, worker, reset_set_comparisons=True):
"""
Computes skln by repeating the 2MaxFind algorithm.
:param list__point:
:param worker:
:param map__num_points__best_s:
:return:
"""
skln = []
#
skln_as_set = set()
#
if reset_set_comparisons:
self.set_comparisons__point_1__point_2__dim = set()
#
# mapping point_id to the number of dimensions
# the point has been selected as maximum ;)
map__point_id__num_dimensions_in_which_it_has_been_selected_as_max = {}
temp_list__point = None
set__dimensions_out_of_the_skln = set()
num_dimensions = list__point[0].n_dimensions
map__dimension__inner_list__point = {c_dim: list(list__point) for c_dim in range(num_dimensions)}
# while len(set__dimensions_out_of_the_skln) < num_dimensions:
must_continue = True
while must_continue:
for d in range(num_dimensions):
#
# if d in set__dimensions_out_of_the_skln:
# continue
#
# 2MF
set__max, c_num_crowd_phases = self.twoMaxFind(map__dimension__inner_list__point[d], worker, d)
#
# print("----")
# print(" repeated_twoMaxFind_for_skln ", " dim ", d, " |set__max|", len(set__max))
# for pppp in set__max:
# print(" repeated_twoMaxFind_for_skln ", " dim ", pppp)
# pp.pprint(map__point_id__num_dimensions_in_which_it_has_been_selected_as_max)
#
# there_is_at_least_one_max_out_of_skln = False
for c_max in set__max:
#
# check if c_max is dominated by at least one point in the current skln
c_max_out_of_the_skln = worker.does_exist_a_point_in_collection_that_dominates_the_input_point(
skln,
c_max, self.set_comparisons__point_1__point_2__dim)
#
#
# is the c_max part of the skln?
if not c_max_out_of_the_skln:
#
# add c_max to the current skln
if c_max not in skln_as_set:
skln.append(c_max)
skln_as_set.add(c_max)
# else:
# must_continue = False
#
#
if c_max.ID not in map__point_id__num_dimensions_in_which_it_has_been_selected_as_max:
map__point_id__num_dimensions_in_which_it_has_been_selected_as_max[c_max.ID] = 0
map__point_id__num_dimensions_in_which_it_has_been_selected_as_max[c_max.ID] += 1
if num_dimensions == map__point_id__num_dimensions_in_which_it_has_been_selected_as_max[
c_max.ID]:
must_continue = False
# pp.pprint(map__point_id__num_dimensions_in_which_it_has_been_selected_as_max)
# remove all c_maxS from the input collection of points
temp_list__point = [point for point in map__dimension__inner_list__point[d] if point not in set__max]
map__dimension__inner_list__point[d] = temp_list__point
#
#
#
return skln
#
##
def compute_skln_using_all_sorted_components(self, sorted_components):
skln = []
#
dimensions = len(sorted_components)
while len(sorted_components[0]) > 0:
new_skln_element = sorted_components[0][0]
skln.append(new_skln_element)
set_dominated__point = set(sorted_components[0])
for d in range(1, dimensions):
index = sorted_components[d].index(new_skln_element)
set_dominated__point = set_dominated__point.intersection(set(sorted_components[d][index:]))
for d in range(0, dimensions):
sorted_components[d] = [x for x in sorted_components[d] if x not in set_dominated__point]
#
return skln
#
##
###
def compute_deterministic_skln(self, list__point):
skln = []
#
sorted_components = []
#
# Sort each component independtly.
dimensions = list__point[0].n_dimensions
for d in range(dimensions):
#
sorted_dimension = sorted(list__point, key=lambda x: (x.at(d)), reverse=True)
sorted_components.append(sorted_dimension)
#
#
# Compute skln using all sorted components.
skln = self.compute_skln_using_all_sorted_components(sorted_components)
#
return skln
#
##
###
def compute_deterministic_sklnS_of_all_ORDERS(self, list__point):
map__order__collection_of_points = {}
map__point_id__order = {}
list__point_id__order = []
#
inner_set__point = set(list__point)
#
c_order = 0
while len(inner_set__point) > 0:
#
c_order += 1
inner_list__point = list(inner_set__point)
#
c_skln = self.compute_deterministic_skln(inner_list__point)
#
map__order__collection_of_points[c_order] = c_skln
#
for c_point in c_skln:
inner_set__point.remove(c_point)
#
for c_order, c_collection_of_points in map__order__collection_of_points.items():
for c_point in c_collection_of_points:
map__point_id__order[c_point.ID] = c_order
list__point_id__order.append((c_point.ID, c_order))
#
list__point_id__order.sort(key=lambda x: (x[1], x[0]))
return list__point_id__order, map__order__collection_of_points, map__point_id__order
#
##
###
def OLD__random_skln(self, list__point, worker, reset_set_comparisons=True):
"""
No guarantees on ErRor!
:param list__point:
:param worker:
:param reset_set_comparisons:
:return:
"""
skln = []
#
if reset_set_comparisons:
self.set_comparisons__point_1__point_2__dim = set()
#
list_candidates__point = list__point.copy()
while len(list_candidates__point) > 0:
#
s = random.choice(list_candidates__point)
#
s_eliminated = False
list_candidates__point.remove(s)
#
deleted_points = []
for c_candidate in list_candidates__point:
if worker.does_point_1_dominate_point_2(s, c_candidate, self.set_comparisons__point_1__point_2__dim):
deleted_points.append(c_candidate)
elif worker.does_point_1_dominate_point_2(c_candidate, s, self.set_comparisons__point_1__point_2__dim):
s_eliminated = True
#
for deleted_point in deleted_points:
list_candidates__point.remove(deleted_point)
if not (s_eliminated):
skln.append(s)
#
return skln
#
##
###
####
def divede_et_impera_skln(self, list__point, worker, num_partitions=10, sub_method="2sort_skln",
super_method="2sort_skln", map__parameter__value={}, reset_set_comparisons=True):
skln = []
#
if reset_set_comparisons:
self.set_comparisons__point_1__point_2__dim = set()
#
random.shuffle(list__point)
#
sigle_partition_size_float = len(list__point) / num_partitions
sigle_partition_size = math.floor(len(list__point) / num_partitions)
if sigle_partition_size_float > sigle_partition_size:
sigle_partition_size += 1
#
second_order_list__point = []
for index in range(0, len(list__point), sigle_partition_size):
#
c_chunk_list__point = list__point[index:index + sigle_partition_size]
#
c_chunk_skln = []
if sub_method == "_random_skln":
c_chunk_skln = self.OLD__random_skln(c_chunk_list__point, worker, reset_set_comparisons=False)
if sub_method == "2sort_skln":
#
map__num_points__best_s = None
if "sub_method__map__num_points__best_s" in map__parameter__value:
map__num_points__best_s = map__parameter__value["sub_method__map__num_points__best_s"]
#
c_chunk_skln = self.twoSort_skln(c_chunk_list__point, worker,
map__num_points__best_s=map__num_points__best_s,
reset_set_comparisons=False)
if sub_method == "2lex_skln":
#
map__num_points__best_s = None
if "sub_method__map__num_points__best_s" in map__parameter__value:
map__num_points__best_s = map__parameter__value["sub_method__map__num_points__best_s"]
#
c_chunk_skln = self.lexicographic_skln(c_chunk_list__point, worker,
inital_set_comparisons__point_1__point_2__dim=set(),
map__num_points__best_s=map__num_points__best_s,
reset_first_sorted_dimension=True, reset_set_comparisons=False)
#
second_order_list__point.extend(c_chunk_skln)
#
# print()
# print(" |c_chunk_list__point|", str(len(c_chunk_list__point)))
# print(" |c_chunk_skln|", str(len(c_chunk_skln)))
# print(" |self.set_comparisons__point_1__point_2__dim|=",
# len(self.set_comparisons__point_1__point_2__dim))
# print()
#
#
#
# print()
# print("|second_order_list__point|", str(len(second_order_list__point)))
# if len(second_order_list__point) != len(set(second_order_list__point)):
# print()
# print("len(second_order_list__point) != len(set(second_order_list__point)) !!!!!!")
# print("len(second_order_list__point)", len(second_order_list__point))
# print("len(set(second_order_list__point))", len(set(second_order_list__point)))
# exit(-1)
# print()
# print()
if super_method == "_random_skln":
skln = self.OLD__random_skln(second_order_list__point, worker, reset_set_comparisons=False)
if super_method == "2sort_skln":
#
map__num_points__best_s = None
if "super_method__map__num_points__best_s" in map__parameter__value:
map__num_points__best_s = map__parameter__value["super_method__map__num_points__best_s"]
#
skln = self.twoSort_skln(second_order_list__point, worker, map__num_points__best_s=map__num_points__best_s,
reset_set_comparisons=False)
if super_method == "2lex_skln":
#
map__num_points__best_s = None
if "super_method__map__num_points__best_s" in map__parameter__value:
map__num_points__best_s = map__parameter__value["super_method__map__num_points__best_s"]
#
skln = self.lexicographic_skln(second_order_list__point, worker,
inital_set_comparisons__point_1__point_2__dim=set(),
map__num_points__best_s=map__num_points__best_s,
reset_first_sorted_dimension=True, reset_set_comparisons=False)
#
return skln
#####################################################################################
#####################################################################################
#####################################################################################
#####################################################################################
#####################################################################################
#
##
###
def all_play_all_method_for_skln(self, list__point, worker, use_complete_comparisons=False,
reset_set_comparisons=True):
"""
Compare each element to any other to isolate the ones
that are not dominated.
Any ErRoR guarantee??? No.
:param list__point:
:param worker:
:param use_complete_comparisons:
:param reset_set_comparisons:
:return:
"""
#
if reset_set_comparisons:
self.set_comparisons__point_1__point_2__dim = set()
#
set_of_dominated_indexes = set()
self.num_crowd_phases = 0
#
list_of_indexes = list(range(len(list__point)))
#
random.shuffle(list_of_indexes)
for i, j in it.combinations(list_of_indexes, 2):
#
point_i = list__point[i]
point_j = list__point[j]
#
if worker.does_point_1_dominate_point_2(point_j, point_i,
self.set_comparisons__point_1__point_2__dim,
perform_all_comparisons=use_complete_comparisons):
set_of_dominated_indexes.add(i)
elif worker.does_point_1_dominate_point_2(point_i, point_j,
self.set_comparisons__point_1__point_2__dim,
perform_all_comparisons=use_complete_comparisons):
set_of_dominated_indexes.add(j)
#
#
#
skln = []
for i in range(len(list__point)):
if i not in set_of_dominated_indexes:
skln.append(list__point[i])
#
self.num_crowd_phases = 1
#
return skln
#
##
###
def naive_method_for_skln(self, list__point, worker, use_complete_comparisons=False,
reset_set_comparisons=True):
"""
Scan the set of points to isolate the ones that are dominated
by other points.
Any ErRoR guarantee??? No.
:param list__point:
:param worker:
:param use_complete_comparisons:
:param reset_set_comparisons:
:return:
"""
#
if reset_set_comparisons:
self.set_comparisons__point_1__point_2__dim = set()
#
set_of_dominated_indexes = set()
self.num_crowd_phases = 0
#
list_of_i_indexes = list(range(len(list__point)))
list_of_j_indexes = list(list_of_i_indexes)
#
random.shuffle(list_of_i_indexes)
for i in list_of_i_indexes:
#
if i in set_of_dominated_indexes:
continue
#
point_i = list__point[i]
#
#
random.shuffle(list_of_j_indexes)
num_comps_t0 = len(self.set_comparisons__point_1__point_2__dim)
for j in list_of_j_indexes:
if j == i: continue
#
if j in set_of_dominated_indexes:
continue
#
#
point_j = list__point[j]
#
#
if worker.does_point_1_dominate_point_2(point_j, point_i,
self.set_comparisons__point_1__point_2__dim,
perform_all_comparisons=use_complete_comparisons):
set_of_dominated_indexes.add(i)
elif worker.does_point_1_dominate_point_2(point_i, point_j,
self.set_comparisons__point_1__point_2__dim,
perform_all_comparisons=use_complete_comparisons):
set_of_dominated_indexes.add(j)
#
num_comps_t1 = len(self.set_comparisons__point_1__point_2__dim)
if num_comps_t1 > num_comps_t0: self.num_crowd_phases += 1
#
#
#
skln = []
for i in range(len(list__point)):
if i not in set_of_dominated_indexes:
skln.append(list__point[i])
#
return skln
#
##
###
def lexicographic_skln(self, list__point, worker,
inital_set_comparisons__point_1__point_2__dim=set(),
map__num_points__best_s=None,
reset_first_sorted_dimension=False, reset_set_comparisons=True,
try_random_quicksort=False):
"""
Sove skln problem under the treshold-error model using
a lexicographic-sort like method.
:param list__point:
:param worker:
:param inital_set_comparisons__point_1__point_2__dim:
:param map__num_points__best_s:
:param reset_first_sorted_dimension:
:return:
"""
#
skln = []
#
self.num_crowd_phases = 0
#
if reset_set_comparisons:
self.set_comparisons__point_1__point_2__dim = inital_set_comparisons__point_1__point_2__dim
#
list_candidates__point = list__point.copy()
#
if reset_first_sorted_dimension:
self.first_sorted_dimension = []
self.num_crowd_phases__first_dim = 0
#
#
if (len(self.first_sorted_dimension) > 0):
#
# print("self.num_crowd_phases__first_dim", self.num_crowd_phases__first_dim)
self.num_crowd_phases = self.num_crowd_phases__first_dim
#
if (len(self.set_comparisons__point_1__point_2__dim) == 0):
list_candidates__point = self.first_sorted_dimension.copy()
self.set_comparisons__point_1__point_2__dim = self.set_comparisons__first_dim.copy()
else:
# We already have the points sorted according to the first dimension.
# We already have some performed comparison.
# We must initialize the list of candidates according to
# the order in the first dimension.
set_of_input_points = set(list__point)
list_candidates__point = []
for point in self.first_sorted_dimension:
if point in set_of_input_points:
list_candidates__point.append(point)
#
else:
#
total_amount_of_allowed_comparisons = float("+inf")
if try_random_quicksort:
total_amount_of_allowed_comparisons = int(4 * len(list_candidates__point) ** 1.5) + 1
#
tare_of_num_comparisons = len(self.set_comparisons__point_1__point_2__dim)
list_candidates__point, num_crowd_phases, = self.twoSort(
list_candidates__point, worker, 0,
map__num_points__best_s, try_random_quicksort,
total_amount_of_allowed_comparisons=total_amount_of_allowed_comparisons,
tare_of_num_comparisons=0)
#
num_comparisons_performed_by_2Sort = len(
self.set_comparisons__point_1__point_2__dim) - tare_of_num_comparisons
# print("zxcvbnm ", "num_comparisons_performed_by_2Sort=", num_comparisons_performed_by_2Sort, "4n**1.5=", int(4 * len(list_candidates__point) ** 1.5))
if (num_comparisons_performed_by_2Sort >= int(4 * len(list_candidates__point) ** 1.5)):
print("asdfghjkl",
"(num_comparisons_performed_by_2Sort >= int(4 * len(list_candidates__point) ** 1.5))")
self.num_crowd_phases__first_dim = num_crowd_phases
self.num_crowd_phases = num_crowd_phases
#
# print("lexicographic_skln", "dim", 0, self.num_crowd_phases)
#
while (len(list_candidates__point) > 0):
#
num_comps_t0 = len(self.set_comparisons__point_1__point_2__dim)
#
m = list_candidates__point[0]
skln.append(m)
deleted_pp = [m]
for i in range(1, len(list_candidates__point)):
if worker.does_point_1_dominate_point_2(m, list_candidates__point[i],
self.set_comparisons__point_1__point_2__dim):
deleted_pp.append(list_candidates__point[i])
for p in deleted_pp:
list_candidates__point.remove(p)
#
num_comps_t1 = len(self.set_comparisons__point_1__point_2__dim)
if num_comps_t1 > num_comps_t0: self.num_crowd_phases += 1
# print("lexicographic_skln", "phase_2", self.num_crowd_phases)
#
#
return skln
#
##
###
def twoSort_skln(self, list__point, worker, map__num_points__best_s=None, reset_set_comparisons=True,
try_random_quicksort=False):
"""
Compute the skln performing 'd' distinct 2Sort algorithms.
:param list__point:
:param map__num_points__best_s:
:return: skln
"""
#
if reset_set_comparisons:
self.set_comparisons__point_1__point_2__dim = set()
#
self.set_comparisons__first_dim = set()
skln = []
sorted_components = []
#
self.num_crowd_phases = 0
max__num_crowd_phases = 0
#
#
total_amount_of_allowed_comparisons = float("+inf")
if try_random_quicksort:
total_amount_of_allowed_comparisons = int(4 * len(list__point) ** 1.5) + 1
#
# apply 2Sort to each component independtly.
dimensions = list__point[0].n_dimensions
for d in range(dimensions):
#
tare_of_num_comparisons = len(self.set_comparisons__point_1__point_2__dim)
sorted_dimension, c_dim__num_crowd_phases = self.twoSort(list__point,
worker, d,
map__num_points__best_s,
try_random_quicksort,
total_amount_of_allowed_comparisons=total_amount_of_allowed_comparisons,
tare_of_num_comparisons=tare_of_num_comparisons)
max__num_crowd_phases = c_dim__num_crowd_phases if c_dim__num_crowd_phases > max__num_crowd_phases else max__num_crowd_phases
sorted_components.append(sorted_dimension)
if d == 0:
self.set_comparisons__first_dim = self.set_comparisons__point_1__point_2__dim.copy()
self.num_crowd_phases__first_dim = c_dim__num_crowd_phases
#
#
num_comparisons_performed_by_2Sort = len(
self.set_comparisons__point_1__point_2__dim) - tare_of_num_comparisons
# print("zxcvbnm ", "num_comparisons_performed_by_2Sort=", num_comparisons_performed_by_2Sort, "4n**1.5=", int(4 * len(list__point) ** 1.5))
if (num_comparisons_performed_by_2Sort >= int(4 * len(list__point) ** 1.5)):
print("asdfghjkl",
"(num_comparisons_performed_by_2Sort >= int(4 * len(list_candidates__point) ** 1.5))")
#
# print("twoSort_skln", "dim", d, self.num_crowd_phases)
# print("twoSort_skln", "self.num_crowd_phases__first_dim", self.num_crowd_phases__first_dim)
#
self.first_sorted_dimension = sorted_components[0].copy()
self.num_crowd_phases = max__num_crowd_phases
#
# Compute skln using all sorted components.
skln = self.compute_skln_using_all_sorted_components(sorted_components)
#
return skln
|
[
"noreply@github.com"
] |
fazzadr.noreply@github.com
|
ba95e11df98a4b488b08b1f488845db12aa99289
|
9ca2f63f855906695b4b587f137257a7793a75bb
|
/DBASE/AppConfig/options/Gauss/Pythia8_Pt0Ref2.6_CT09MCS.py
|
8d11b32e024d1f48e86b8da772ab0bbb161b9bee
|
[] |
no_license
|
kidaak/lhcbsoft
|
a59ffb6c884e5b6e2d28740551d3c49f8338ec3d
|
b22a1dfea5d4d572e318d1e5d05dfb7484da5f3d
|
refs/heads/master
| 2021-01-22T16:53:18.266147
| 2015-12-10T14:40:48
| 2015-12-10T14:40:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,645
|
py
|
from Configurables import Generation, MinimumBias, Pythia8Production
from Configurables import Inclusive, SignalPlain, SignalRepeatedHadronization
from Configurables import Special
Pythia8TurnOffFragmentation = [ "HadronLevel:all = off" ]
gen = Generation()
gen.addTool( MinimumBias , name = "MinimumBias" )
gen.MinimumBias.ProductionTool = "Pythia8Production"
gen.MinimumBias.addTool( Pythia8Production , name = "Pythia8Production" )
gen.MinimumBias.Pythia8Production.Tuning = "LHCbPt0Ref2.6_CT09MCS.cmd"
gen.addTool( Inclusive , name = "Inclusive" )
gen.Inclusive.ProductionTool = "Pythia8Production"
gen.Inclusive.addTool( Pythia8Production , name = "Pythia8Production" )
gen.Inclusive.Pythia8Production.Tuning = "LHCbpT0Ref2.6_CT09MCS.cmd"
gen.addTool( SignalPlain , name = "SignalPlain" )
gen.SignalPlain.ProductionTool = "Pythia8Production"
gen.SignalPlain.addTool( Pythia8Production , name = "Pythia8Production" )
gen.SignalPlain.Pythia8Production.Tuning = "LHCbPt0Ref2.6_CT09MCS.cmd"
gen.addTool( SignalRepeatedHadronization , name = "SignalRepeatedHadronization" )
gen.SignalRepeatedHadronization.ProductionTool = "Pythia8Production"
gen.SignalRepeatedHadronization.addTool( Pythia8Production , name = "Pythia8Production" )
gen.SignalRepeatedHadronization.Pythia8Production.Tuning = "LHCbPt0Ref2.6_CT09MCS.cmd"
gen.SignalRepeatedHadronization.Pythia8Production.Commands += Pythia8TurnOffFragmentation
gen.addTool( Special , name = "Special" )
gen.Special.ProductionTool = "Pythia8Production"
gen.Special.addTool( Pythia8Production , name = "Pythia8Production" )
gen.Special.Pythia8Production.Tuning = "LHCbPt0Ref2.6_CT09MCS.cmd"
|
[
"gcorti@4525493e-7705-40b1-a816-d608a930855b"
] |
gcorti@4525493e-7705-40b1-a816-d608a930855b
|
aafcd623cb57d0e5dc03351f6215fe2f34b4b783
|
26b93ed4b18392f5f577e85c1d06ad6252eff477
|
/2021. 3./5543.py
|
1bad76ca356dd5fdea7f1fd7411bd12e634edbd3
|
[] |
no_license
|
chengxxi/dailyBOJ
|
9d245da99cae1972255701cbcb16209f454cd3a3
|
06c1e35d1873d3e39de956cacc88d9e92869cb5b
|
refs/heads/master
| 2023-04-13T18:12:51.677637
| 2021-04-19T12:19:07
| 2021-04-19T12:19:07
| 335,271,521
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
# 5543: 상근날드
burgers = [int(input()) for _ in range(3)]
beverages = [int(input()) for _ in range(2)]
bur = min(burgers)
bev = min(beverages)
print(bur+bev-50)
"""
상근날드에서 가장 잘 팔리는 메뉴는 세트 메뉴이다. 주문할 때, 자신이 원하는 햄버거와 음료를 하나씩 골라, 세트로 구매하면,
가격의 합계에서 50원을 뺀 가격이 세트 메뉴의 가격이 된다.
햄버거는 총 3종류 상덕버거, 중덕버거, 하덕버거가 있고, 음료는 콜라와 사이다 두 종류가 있다.
햄버거와 음료의 가격이 주어졌을 때, 가장 싼 세트 메뉴의 가격을 출력하는 프로그램을 작성하시오.
"""
|
[
"noreply@github.com"
] |
chengxxi.noreply@github.com
|
a4904e6b0e4a67294faf226389fbd709ba1892d5
|
9613845558fa43a9302fb9821e333375966aa7b8
|
/decorator.py
|
cb9a50e28f0570ca8d10984ed5b14991c5575185
|
[] |
no_license
|
Hex-s/python
|
56d892048ec97ca519ae76bb72f57364cb5c256c
|
499fc3e487807d320c3140767901703aa6a7d1c4
|
refs/heads/master
| 2021-01-10T17:49:24.436894
| 2016-03-07T07:01:15
| 2016-03-07T07:01:15
| 49,856,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
#!/usr/bin/python
#-*-coding:utf-8-*-
import time
def log(func):
def wrapper(*args,**kw):
print('call: %s' % func.__name__)
return func(*args,**kw)
return wrapper
@log
def now():
print('now:%s' % time.time())
#now()
#print(now.__name__) #wrapper
'''
如果decorator本身需要传入参数,
那就需要编写一个返回decorator的高阶函数,写出来会更复杂。
'''
def log2(text):
def decorate(func):
def wrapper(*args,**kw):
print('text:%s,call:%s' % (text,func.__name__))
return func(*args,**kw)
return wrapper
return decorate
@log2('haha')
def nows():
print('now:%s' % (time.time()))
#nows()
#print(nows.__name__) #wrapper
'''
不改变函数的__name__
functools
'''
import functools
def log3(func):
@functools.wraps(func)
def wraper(*args,**kw):
print('logs')
return wraper
@log3
def now3():
print('now:%s' % (time.time()))
#now3()
#print(now3.__name__)
def log4(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
|
[
"syztobenum1@163.com"
] |
syztobenum1@163.com
|
3a795c578b70dbbb9d542086dbca989104d9bee0
|
e17ffff5120e6746672080750298e9384ecc2434
|
/app.py
|
df78519ebca275ae18efb1c8850b9605558fa3bf
|
[
"MIT"
] |
permissive
|
Maker-Mark/Flask-SQLAlchemy-REST
|
71274bca05f3159ddd0ae16b9bdb33fa8ccfd499
|
12d956c3ec2ec711fc118fd8a1891245b401065a
|
refs/heads/master
| 2023-03-23T11:16:37.884643
| 2019-07-22T14:46:11
| 2019-07-22T14:46:11
| 197,855,881
| 0
| 0
|
MIT
| 2021-03-20T01:20:04
| 2019-07-19T23:43:31
|
Python
|
UTF-8
|
Python
| false
| false
| 4,497
|
py
|
## Use this as our entry-point
from flask import Flask, request,render_template_string, jsonify #Jsonify allows us to take python dicts into json
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import os
#init our app
app = Flask(__name__)
#Get our DB file/URI
basedir= os.path.abspath(os.path.dirname(__file__)) #This is our base/current directory from this file
#Configs the app to have the DB attached that will be looked for in the base dir and named 'db.sqlite'
app.config['SQLALCHEMY_DATABASE_URI'] ='sqlite:///'+os.path.join(basedir, 'db.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False #Stop the console yelling about modifications
#Initialize the DB
db = SQLAlchemy(app)
#Init Marsh
ma = Marshmallow(app)
#Make a Model/Class #db is our SQLAlchemy and Model that gives us some predefined methods
class Product(db.Model):
id = db.Column(db.Integer, primary_key=True) #Making a id field using db.Column(dataType, PrimaryKey?, Autoincrement?, Limit?...)
name = db.Column(db.String(100), unique=True)
description = db.Column(db.String(200))
price = db.Column(db.Float)
qty = db.Column(db.Integer)
#Our initializer/constructor
#When the params are passed in, ass it to the instance
def __init__(self, name, description, price,qty):
self.name=name
self.description=description
self.price=price
self.qty=qty
# Product Schema:Blueprint specifying what fields will be present
class ProductSchema(ma.Schema):
class Meta:
fields = ('id','name','description','price','qty')
# Init our schema #Avoid warning
product_schema = ProductSchema(strict=True) #Single product
products_schema = ProductSchema(many=True, strict=True)
# Create a Product
@app.route('/product',methods=['POST'])
#Grab the data that is being sent int (ie react or postman etc)
def add_product():
name = request.json['name']
description = request.json['description']
price = request.json['price']
qty = request.json['qty']
new_product = Product(name, description, price, qty) #Instantiate the product
db.session.add(new_product) #Add the product to this session
db.session.commit() #Commit this session to our db to store the new record
print(product_schema)
print(new_product) #
result = product_schema.dump(new_product)
return jsonify(result.data) #Send back a json representation of our new product!
# Get all products
@app.route('/product', methods=["GET"])
def get_products():
all_products= Product.query.all() #This gives us back all the Products (thank you SQLAlchemy)
result = products_schema.dump(all_products) #Dump all the products, raw parsing
return jsonify(result.data) #Json representation of the result's data
# Get single product, angle bracket gives us a var
@app.route('/product/<id>', methods=["GET"])
def get_product(id):
product= Product.query.get(id) #This gives us back the product that matches the id given (thank you SQLAlchemy)
return product_schema.jsonify(product) #Json representation of the result's data
# Update a product
#We need to know what product, so we need the ID
# Use the PUT verb for updating
@app.route('/product/<id>',methods=['PUT'])
def update_product(id):
product = Product.query.get(id) #Get the product by id
#Grab the request's data
name = request.json['name']
description = request.json['description']
price = request.json['price']
qty = request.json['qty']
#Build out the product with updated info
product.name=name
product.description=description
product.price=price
product.qty=qty
db.session.commit() #Commit this session to our db to store the new record
return product_schema.jsonify(product) #Send back a json representation of our new product!
# Delete a product/record
@app.route('/product/<id>', methods=["DELETE"])
def delete_product(id):
product= Product.query.get(id) #This gives us back the product that matches the id given (thank you SQLAlchemy)
db.session.delete(product) #delete the product
db.session.commit() #Save the actions taken
return product_schema.jsonify(product) #Json representation of the result's data
@app.route('/', methods=['GET'])
def get():
return jsonify({'msg':'hello, world'})
#Run the server --> Uncomment if you want to run the module directly via a `python app.py` command
# if __name__ =='__main__':
# app.run(debug=True)
|
[
"markgoldstein12@gmail.com"
] |
markgoldstein12@gmail.com
|
89fd0f1a7fee51b6e6d58cf67f040186f6cf26bb
|
80867cdd8f7ea11cef96ebe0c894b87f9fc7afbb
|
/112_reverse_of_a_list.py
|
a52c82f565689f706a368ab893114e96c93bada3
|
[] |
no_license
|
srekan/pyLearn
|
fa1f56104adca625be196bc3ac975ea415b5c630
|
bf7c2350fc7ca57777b84addc35cd10b404eaf86
|
refs/heads/master
| 2022-10-05T05:20:23.262597
| 2020-06-08T10:31:30
| 2020-06-08T10:31:30
| 265,237,788
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
# 112_reverse_of_a_list.py
my_list = [
34, 5, 77, 87, 556, 99, 98, 347, 33, 232
]
rev_list = []
last_index = len(my_list) - 1
for i in range(last_index, -1, -1):
rev_list.append(my_list[i])
print("Reverse: ", rev_list)
|
[
"sravan.rekandar@drishya.ai"
] |
sravan.rekandar@drishya.ai
|
5998a3b039fda2f1608a7d35025afa24e81ee8f7
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_4/tama.eguchi/solve.py
|
cd6330c9e4e87932964281aeb28f5552d708afab
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Problem D. Fractiles
# https://code.google.com/codejam/contest/6254486/dashboard#s=p3
#
import sys
import random
def solve(K, C, S):
# 1回cleanするとCヶ所のGの有無がわかる
# S回cleanできるので最大C*Sヶ所のGの有無がわかる
# Kがそれより多いと判別できない
if K > C * S:
return 'IMPOSSIBLE'
checkpos = range(K)
clean = []
while checkpos:
cleanpos = 0
for c in range(C):
cleanpos += (K ** c) * checkpos.pop(0)
if not checkpos:
break
clean.append(min(cleanpos + 1, K ** C)) # 1 origin
return ' '.join(map(str, sorted(clean)))
def main(IN, OUT):
T = int(IN.readline())
for index in range(T):
K, C, S = map(int, IN.readline().strip().split())
OUT.write('Case #{}: {}\n'.format(index + 1, solve(K, C, S)))
def makesample(T=100, Kmax=100, Cmax=100):
print T
for index in range(T):
K = random.randint(1, Kmax)
C = random.randint(1, Cmax)
S = random.randint(1, K)
while K ** C > 10 ** 18:
C = random.randint(1, Cmax)
print '{} {} {}'.format(K, C, S)
if __name__ == '__main__':
if '-makesample' in sys.argv[1:]:
makesample()
else:
main(sys.stdin, sys.stdout)
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
b63e1759752e61c1e109cfd8b7f4a57b4b9b6145
|
c75f82516c4c54c6dca82acf15210a5fcc952b0b
|
/server/bufsm_location.py
|
353d0bbff245e1a08501684b2eb891287158de75
|
[
"MIT"
] |
permissive
|
IotGod/bufsm
|
e4c7988fe43636d1fef257fc4c6e367c7cab8b3d
|
d4b54b8872488de91a9caa769cd6aea60c246a37
|
refs/heads/master
| 2020-12-18T22:54:20.777760
| 2019-08-10T14:22:49
| 2019-08-10T14:22:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
from datetime import datetime
from pytz import timezone
import paho.mqtt.client as mqtt
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("bufsm/p")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
#print(msg.topic, ":", msg.payload.decode())
payload = msg.payload.decode()
with open('bufsm_locations.csv', 'a') as f:
line = str(datetime.now(timezone('America/Sao_Paulo'))) + ',' + str(payload)+ '\n'
f.write(line)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("mqtt.solveiot.com.br", 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
|
[
"matheusdalmago10@hotmail.com"
] |
matheusdalmago10@hotmail.com
|
0e145319ab43967d522e00aa463c3ecd9b256441
|
2afdbbd7c09f376a93ef3c730a627e7efe14e7d5
|
/cortex-alertmanager.dashboard.py
|
cf978c1b28f1aca09de973c3f2b537804af1a8da
|
[
"Apache-2.0"
] |
permissive
|
oprietop/cortex-dashboards
|
4a8efe857113375786c707a393276fa70b2192b1
|
edde28bd6950557fdfe45cdd3df0de7f213f7a18
|
refs/heads/master
| 2022-12-09T21:30:32.669595
| 2020-09-03T09:48:05
| 2020-09-03T09:48:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,268
|
py
|
# -*- mode: python; python-indent-offset: 2 -*-
import grafanalib.core as G
import common
dashboard = common.Dashboard(
uid='am',
title="Cortex > Alertmanager",
rows=[
G.Row(
title='Operations',
panels=[
common.PromGraph(
title="Alerts",
expressions=[
(
"{{instance}} {{status}}",
'sum by (instance, status)(rate(alertmanager_alerts_received_total{job="cortex/alertmanager"}[2m]))'
),
(
"{{instance}} invalid",
'sum by (instance, status)(rate(alertmanager_alerts_invalid_total{job="cortex/alertmanager"}[2m]))'
),
],
yAxes=common.OPS_AXIS,
),
common.PromGraph(
title="Notifications",
expressions=[
(
"{{integration}}",
'sum by (integration)(rate(alertmanager_notifications_total{job="cortex/alertmanager"}[2m]))'
),
],
yAxes=common.OPS_AXIS,
),
]
),
G.Row(
title='Alertmanager fetching configs',
collapse=True,
panels=[
common.QPSGraph('cortex_configs', 'Configs', 'cortex/alertmanager'),
common.PromGraph(
title="Configs Latency",
expressions=[
(
"99th centile",
'histogram_quantile(0.99, sum(rate(cortex_configs_request_duration_seconds_bucket{job="cortex/alertmanager"}[2m])) by (le)) * 1e3'
),
(
"50th centile",
'histogram_quantile(0.50, sum(rate(cortex_configs_request_duration_seconds_bucket{job="cortex/alertmanager"}[2m])) by (le)) * 1e3'
),
(
"Mean",
'sum(rate(cortex_configs_request_duration_seconds_sum{job="cortex/alertmanager"}[2m])) / sum(rate(cortex_configs_request_duration_seconds_count{job="cortex/alertmanager"}[2m])) * 1e3'
),
],
yAxes=common.LATENCY_AXES,
),
]
),
common.REDRow('cortex', 'Alertmanager', 'cortex/alertmanager'),
G.Row(
[
common.PromGraph(
title="Known Configurations",
expressions=[
("{{instance}}", 'cortex_alertmanager_configs_total{job="cortex/alertmanager"}'),
],
),
common.PromGraph(
title="Cluster Members",
expressions=[
("{{instance}}", 'sum(alertmanager_cluster_members{job="cortex/alertmanager"}) by (instance)'),
],
),
]
),
],
)
|
[
"bjboreham@gmail.com"
] |
bjboreham@gmail.com
|
06120a3ffdc7b3e67b51502bdd604159274fa0fa
|
11a246743073e9d2cb550f9144f59b95afebf195
|
/acmsgu/193.py
|
768cb75ea9152b431d5c7a3f66250a9afdbd9dea
|
[] |
no_license
|
ankitpriyarup/online-judge
|
b5b779c26439369cedc05c045af5511cbc3c980f
|
8a00ec141142c129bfa13a68dbf704091eae9588
|
refs/heads/master
| 2020-09-05T02:46:56.377213
| 2019-10-27T20:12:25
| 2019-10-27T20:12:25
| 219,959,932
| 0
| 1
| null | 2019-11-06T09:30:58
| 2019-11-06T09:30:57
| null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
from math import gcd
n = int(input())
k = n // 2
while gcd(n, k) > 1:
k -= 1
print(k)
|
[
"arnavsastry@gmail.com"
] |
arnavsastry@gmail.com
|
8a1554d203e48a6a1c62cac70266748ea0e9521b
|
e770376d6538ad11891defb2200b3a9bd56416ee
|
/comments/forms.py
|
539ad3e4a8ed00a9ad422e53b1fda90f8853efa0
|
[] |
no_license
|
konradkrasno/travelblog
|
812e7da34db97891784b572446dc37ed6ce35936
|
eae532bc144eb9150defddb3cbe64bb32a1dba14
|
refs/heads/master
| 2023-04-15T20:23:15.354088
| 2021-04-24T15:49:44
| 2021-04-24T15:49:44
| 349,385,938
| 0
| 0
| null | 2021-04-24T15:49:45
| 2021-03-19T10:35:17
|
Python
|
UTF-8
|
Python
| false
| false
| 350
|
py
|
from django import forms
from .models import Comment, SubComment
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ["body"]
class SubCommentForm(forms.ModelForm):
class Meta:
model = SubComment
fields = ["body", "main_comment"]
widgets = {"main_comment": forms.HiddenInput}
|
[
"konradkrasno@gmail.com"
] |
konradkrasno@gmail.com
|
86109bf27331bf14ac7ddb352964c41a784eb8c7
|
03380a2cf46385b0d971e150078d992cd7840980
|
/Python/pywarpx/PSATD.py
|
0cd3038336a6bea77a29ae91a1787be2315e3a46
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause-LBNL"
] |
permissive
|
ECP-WarpX/WarpX
|
0cfc85ce306cc5d5caa3108e8e9aefe4fda44cd3
|
e052c6d1994e4edb66fc27763d72fcf08c3a112a
|
refs/heads/development
| 2023-08-19T07:50:12.539926
| 2023-08-17T20:01:41
| 2023-08-17T20:01:41
| 150,626,842
| 210
| 147
|
NOASSERTION
| 2023-09-13T23:38:13
| 2018-09-27T17:53:35
|
C++
|
UTF-8
|
Python
| false
| false
| 145
|
py
|
# Copyright 2016 David Grote
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
from .Bucket import Bucket
psatd = Bucket('psatd')
|
[
"noreply@github.com"
] |
ECP-WarpX.noreply@github.com
|
4c6d69cd4dd555e87bd94a6e5ae50b59b3b33462
|
1306c798b27f17972165daf51dae54ae71da0ddb
|
/tests/unit/utils/test_throttle.py
|
f25175c7ca5bb6dad0cf4ccbec95c8511187de6b
|
[] |
no_license
|
Delaunay/track
|
0dc8dbbc438011a26d06c06edadaa2daad1ce1a7
|
c92247e28ef554f125ed7c8e6cd334327ec6600e
|
refs/heads/master
| 2020-05-26T03:22:20.756722
| 2020-01-10T02:36:40
| 2020-01-10T02:36:40
| 188,088,865
| 0
| 1
| null | 2019-10-16T14:49:39
| 2019-05-22T17:59:00
|
Python
|
UTF-8
|
Python
| false
| false
| 602
|
py
|
from track.utils.throttle import throttled
import time
class Inc:
sum = 0
def add(self):
self.sum += 1
def test_throttle_time():
inc = Inc()
inc_throttled = throttled(inc.add, every=0.05)
for i in range(0, 100):
inc_throttled()
time.sleep(0.01)
assert inc.sum == int(0.01 * 100 / 0.05)
def test_throttle_count():
inc = Inc()
inc_throttled = throttled(inc.add, throttle=5)
for i in range(0, 100):
inc_throttled()
assert inc.sum == 100 // 5
if __name__ == '__main__':
test_throttle_time()
test_throttle_count()
|
[
"noreply@github.com"
] |
Delaunay.noreply@github.com
|
929a68a4552f9a5a766212acbc4403159e965110
|
b0748a6302d19cf6cf67e9258345d64506f28e12
|
/dlltest.py
|
470018ba4250b11becf35d4c8155c849c9de0bcb
|
[] |
no_license
|
whitexiong/ymjh
|
44077aec1671224ae016e0555e35b0d2fbcaa0c7
|
1a5f854c319a9661b3e7dbdc727b8c634787567d
|
refs/heads/master
| 2022-04-14T08:59:26.859384
| 2020-04-14T12:27:35
| 2020-04-14T12:27:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
import ctypes
if __name__ == "__main__":
tmp_cv = ctypes.CDLL('./libopencv_imgproc420.dll')
tmp_cv.matchTemplate()
print(123)
|
[
"talkand@sina.com"
] |
talkand@sina.com
|
b62ad75b919cf48d96d7ab9b9ea96cddc8c3c163
|
066ea30676154a8eddadbf72488d35cb19e4f508
|
/cars2.py
|
61f64750e505a90b1c95b419f276ab5b7e8f98a3
|
[] |
no_license
|
Xiecf/Exercises
|
1d5bf54f031958436dba9841b4e8569297aa7e6e
|
2d9db3d14e6ca0f079da8c22e5e8403cde904786
|
refs/heads/master
| 2021-09-02T08:25:23.427122
| 2017-12-31T23:35:18
| 2017-12-31T23:35:18
| 108,338,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
cars = ['bmw','audi','toyota','subaru']
for car in cars:
if car == 'bmw':
print(car.upper())
else:
print(car.title())
car = 'bwm'
print(car == 'bwm')
print(car == 'BWM')
print(car.upper() == 'BWM')
print(car == 'audi')
a = 'xiechaofan'
b = 'Xiechaofan'
c = 'gaojinlei'
d = ['xiechaofan','Xiechaofan','gaojinlei']
for i in d:
if i == b:
print('相同')
else:
print('不同')
for i in d:
if i == b.lower():
print('相同')
else:
print('不同')
numbers = [1,2,3,4,5]
for number in numbers:
if number == 3:
print(str(number) + '=' + '3')
if number < 3:
print(str(number) + '<' + '3')
if number > 3:
print(str(number) + '>' + '3')
|
[
"x405139708@gmail.com"
] |
x405139708@gmail.com
|
2e81b9016cec6a4b5615e50c923ca887fdc1e699
|
42d12cbc8ab91caa84eb2bf279fb9bb8898ea8e3
|
/chatbot.py
|
29e85ddcfee8c0013f9fa8796998d7165faf5377
|
[] |
no_license
|
gjason98/hi1
|
8d561404f8f4d4012284a2a2c3d1618139e7046b
|
7becb9cc167d16e6d6098606cae35c97e5e78b06
|
refs/heads/master
| 2020-05-03T12:04:24.371429
| 2019-03-30T21:55:30
| 2019-03-30T21:55:30
| 178,615,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,820
|
py
|
import sqlite3
import json
from datetime import datetime
timeframe = '2010-01'
sql_transaction = []
connection = sqlite3.connect('{}.db'.format(timeframe))
c = connection.cursor()
def create_table():
c.execute(
"CREATE TABLE IF NOT EXISTS parent_reply(parent_id TEXT PRIMARY KEY, comment_id TEXT UNIQUE, parent TEXT, comment TEXT, subreddit TEXT, unix INT, score INT)")
def format_data(data):
data = data.replace('\n', ' newlinechar ').replace('\r', ' newlinechar ').replace('"', "'")
return data
def transaction_bldr(sql):
global sql_transaction
sql_transaction.append(sql)
if len(sql_transaction) > 1000:
c.execute('BEGIN TRANSACTION')
for s in sql_transaction:
try:
c.execute(s)
except:
pass
connection.commit()
sql_transaction = []
def sql_insert_replace_comment(commentid, parentid, parent, comment, subreddit, time, score):
try:
sql = """UPDATE parent_reply SET parent_id = ?, comment_id = ?, parent = ?, comment = ?, subreddit = ?, unix = ?, score = ? WHERE parent_id =?;""".format(
parentid, commentid, parent, comment, subreddit, int(time), score, parentid)
transaction_bldr(sql)
except Exception as e:
print('s0 insertion', str(e))
def sql_insert_has_parent(commentid, parentid, parent, comment, subreddit, time, score):
try:
sql = """INSERT INTO parent_reply (parent_id, comment_id, parent, comment, subreddit, unix, score) VALUES ("{}","{}","{}","{}","{}",{},{});""".format(
parentid, commentid, parent, comment, subreddit, int(time), score)
transaction_bldr(sql)
except Exception as e:
print('s0 insertion', str(e))
def sql_insert_no_parent(commentid, parentid, comment, subreddit, time, score):
try:
sql = """INSERT INTO parent_reply (parent_id, comment_id, comment, subreddit, unix, score) VALUES ("{}","{}","{}","{}",{},{});""".format(
parentid, commentid, comment, subreddit, int(time), score)
transaction_bldr(sql)
except Exception as e:
print('s0 insertion', str(e))
def acceptable(data):
if len(data.split(' ')) > 50 or len(data) < 1:
return False
elif len(data) > 1000:
return False
elif data == '[deleted]':
return False
elif data == '[removed]':
return False
else:
return True
def find_parent(pid):
try:
sql = "SELECT comment FROM parent_reply WHERE comment_id = '{}' LIMIT 1".format(pid)
c.execute(sql)
result = c.fetchone()
if result != None:
return result[0]
else:
return False
except Exception as e:
# print(str(e))
return False
def find_existing_score(pid):
try:
sql = "SELECT score FROM parent_reply WHERE parent_id = '{}' LIMIT 1".format(pid)
c.execute(sql)
result = c.fetchone()
if result != None:
return result[0]
else:
return False
except Exception as e:
# print(str(e))
return False
if __name__ == '__main__':
create_table()
row_counter = 0
paired_rows = 0
with open('d:/Data/{}/RC_{}'.format(timeframe.split('-')[0], timeframe), buffering=1000) as f:
for row in f:
row_counter += 1
row = json.loads(row)
parent_id = row['parent_id']
body = format_data(row['body'])
created_utc = row['created_utc']
score = row['score']
comment_id = row['name']
subreddit = row['subreddit']
parent_data = find_parent(parent_id)
if score >= 2:
existing_comment_score = find_existing_score(parent_id)
if existing_comment_score:
if score > existing_comment_score:
if acceptable(body):
sql_insert_replace_comment(comment_id, parent_id, parent_data, body, subreddit, created_utc,
score)
else:
if acceptable(body):
if parent_data:
sql_insert_has_parent(comment_id, parent_id, parent_data, body, subreddit, created_utc,
score)
paired_rows += 1
else:
sql_insert_no_parent(comment_id, parent_id, body, subreddit, created_utc, score)
if row_counter % 100000 == 0:
print('Total Rows Read: {}, Paired Rows: {}, Time: {}'.format(row_counter, paired_rows,
str(datetime.now())))
|
[
"gouthamjason@gmail.com"
] |
gouthamjason@gmail.com
|
83604841e247e2351977a2bf84d6d4fba795a011
|
6ba9e90cfd2a3d03806f730fbbca574c2f8de9f8
|
/extensions/.stubs/clrclasses/__clrclasses__/Autodesk/AutoCAD/ApplicationServices/Core/__init__.py
|
16b83275e04a59474443da8ef2844a1a0301b5d0
|
[] |
no_license
|
anshe80/Pycad
|
696932a2e1eb720fec83790966b64cc4ff821426
|
8c238b23269fe70279a88626e9776a9b1ae9b9a2
|
refs/heads/master
| 2023-01-29T11:17:33.286871
| 2020-12-12T05:33:34
| 2020-12-12T05:33:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,289
|
py
|
from __clrclasses__.Autodesk.AutoCAD.ApplicationServices import BeginDoubleClickEventHandler as _n_0_t_0
from __clrclasses__.Autodesk.AutoCAD.ApplicationServices import PreTranslateMessageEventHandler as _n_0_t_1
from __clrclasses__.Autodesk.AutoCAD.ApplicationServices import SystemVariableChangedEventHandler as _n_0_t_2
from __clrclasses__.Autodesk.AutoCAD.ApplicationServices import SystemVariableChangingEventHandler as _n_0_t_3
from __clrclasses__.Autodesk.AutoCAD.ApplicationServices import DocumentCollection as _n_0_t_4
from __clrclasses__.Autodesk.AutoCAD.ApplicationServices import LongTransactionManager as _n_0_t_5
from __clrclasses__.Autodesk.AutoCAD.ApplicationServices import RecentDocumentCollection as _n_0_t_6
from __clrclasses__.Autodesk.AutoCAD.ApplicationServices import UnmanagedResources as _n_0_t_7
from __clrclasses__.Autodesk.AutoCAD.ApplicationServices import UserConfigurationManager as _n_0_t_8
from __clrclasses__.Autodesk.AutoCAD.ApplicationServices import WhoHasInfo as _n_0_t_9
from __clrclasses__.Autodesk.AutoCAD.DatabaseServices import ResultBuffer as _n_1_t_0
from __clrclasses__.Autodesk.AutoCAD.Publishing import Publisher as _n_2_t_0
from __clrclasses__.Autodesk.AutoCAD.Windows import Window as _n_3_t_0
from __clrclasses__.System import EventHandler as _n_4_t_0
from __clrclasses__.System import Version as _n_4_t_1
from __clrclasses__.System import Uri as _n_4_t_2
from __clrclasses__.System import Nullable as _n_4_t_3
from __clrclasses__.System import IntPtr as _n_4_t_4
from __clrclasses__.System import MulticastDelegate as _n_4_t_5
from __clrclasses__.System import ICloneable as _n_4_t_6
from __clrclasses__.System import IAsyncResult as _n_4_t_7
from __clrclasses__.System import Type as _n_4_t_8
from __clrclasses__.System import AsyncCallback as _n_4_t_9
from __clrclasses__.System.Runtime.Serialization import ISerializable as _n_5_t_0
from __clrclasses__.System.Windows import Window as _n_6_t_0
import typing
class Application(object):
@property
def DocumentManager(self) -> _n_0_t_4:"""DocumentManager { get; } -> DocumentCollection"""
@property
def HasPickFirst(self) -> bool:"""HasPickFirst { get; } -> bool"""
@property
def IsInBackgroundMode(self) -> bool:"""IsInBackgroundMode { get; } -> bool"""
@property
def IsInPlaceServer(self) -> bool:"""IsInPlaceServer { get; } -> bool"""
@property
def IsQuiescent(self) -> bool:"""IsQuiescent { get; } -> bool"""
@property
def LongTransactionManager(self) -> _n_0_t_5:"""LongTransactionManager { get; } -> LongTransactionManager"""
@property
def MainWindow(self) -> _n_3_t_0:"""MainWindow { get; } -> Window"""
@property
def Publisher(self) -> _n_2_t_0:"""Publisher { get; } -> Publisher"""
@property
def RecentDocuments(self) -> _n_0_t_6:"""RecentDocuments { get; } -> RecentDocumentCollection"""
@property
def UnmanagedResources(self) -> _n_0_t_7:"""UnmanagedResources { get; } -> UnmanagedResources"""
@property
def UserConfigurationManager(self) -> _n_0_t_8:"""UserConfigurationManager { get; } -> UserConfigurationManager"""
@property
def Version(self) -> _n_4_t_1:"""Version { get; } -> Version"""
@property
def BeginDoubleClick(self) -> _n_0_t_0:
"""BeginDoubleClick Event: BeginDoubleClickEventHandler"""
@property
def BeginQuit(self) -> _n_4_t_0:
"""BeginQuit Event: EventHandler"""
@property
def EnterModal(self) -> _n_4_t_0:
"""EnterModal Event: EventHandler"""
@property
def Idle(self) -> _n_4_t_0:
"""Idle Event: EventHandler"""
@property
def LeaveModal(self) -> _n_4_t_0:
"""LeaveModal Event: EventHandler"""
@property
def PreTranslateMessage(self) -> _n_0_t_1:
"""PreTranslateMessage Event: PreTranslateMessageEventHandler"""
@property
def QuitAborted(self) -> _n_4_t_0:
"""QuitAborted Event: EventHandler"""
@property
def QuitWillStart(self) -> _n_4_t_0:
"""QuitWillStart Event: EventHandler"""
@property
def SystemVariableChanged(self) -> _n_0_t_2:
"""SystemVariableChanged Event: SystemVariableChangedEventHandler"""
@property
def SystemVariableChanging(self) -> _n_0_t_3:
"""SystemVariableChanging Event: SystemVariableChangingEventHandler"""
@staticmethod
def EvaluateDiesel(dieselCmd: str) -> str:...
@staticmethod
def GetSystemVariable(name: str) -> object:...
@staticmethod
def GetWhoHasInfo(pathname: str) -> _n_0_t_9:...
@staticmethod
def Invoke(args: _n_1_t_0) -> _n_1_t_0:...
@staticmethod
def IsFileLocked(pathname: str) -> bool:...
@staticmethod
def LoadJSScript(urlJSFile: _n_4_t_2):...
@staticmethod
def Quit():...
@staticmethod
def SetSystemVariable(name: str, value: object):...
@staticmethod
def ShowAlertDialog(message: str):...
@staticmethod
def ShowModalWindow(owner: _n_4_t_4, formToShow: _n_6_t_0, persistSizeAndPosition: bool) -> _n_4_t_3[bool]:...
@staticmethod
def ShowModalWindow(owner: _n_6_t_0, formToShow: _n_6_t_0, persistSizeAndPosition: bool) -> _n_4_t_3[bool]:...
@staticmethod
def ShowModalWindow(owner: _n_4_t_4, formToShow: _n_6_t_0) -> _n_4_t_3[bool]:...
@staticmethod
def ShowModalWindow(owner: _n_6_t_0, formToShow: _n_6_t_0) -> _n_4_t_3[bool]:...
@staticmethod
def ShowModalWindow(formToShow: _n_6_t_0) -> _n_4_t_3[bool]:...
@staticmethod
def ShowModelessWindow(owner: _n_4_t_4, formToShow: _n_6_t_0, persistSizeAndPosition: bool):...
@staticmethod
def ShowModelessWindow(owner: _n_6_t_0, formToShow: _n_6_t_0, persistSizeAndPosition: bool):...
@staticmethod
def ShowModelessWindow(owner: _n_4_t_4, formToShow: _n_6_t_0):...
@staticmethod
def ShowModelessWindow(owner: _n_6_t_0, formToShow: _n_6_t_0):...
@staticmethod
def ShowModelessWindow(formToShow: _n_6_t_0):...
@staticmethod
def TryGetSystemVariable(name: str) -> object:...
@staticmethod
def UpdateScreen():...
class CreateContextMenuHandler(_n_4_t_5, _n_4_t_6, _n_5_t_0):
def __init__(self, A_0: object, A_1: _n_4_t_4) -> CreateContextMenuHandler:...
def BeginInvoke(self, type: _n_4_t_8, callback: _n_4_t_9, obj: object) -> _n_4_t_7:...
def EndInvoke(self, result: _n_4_t_7) -> object:...
def Invoke(self, type: _n_4_t_8) -> object:...
class EnableWinFormHandler(_n_4_t_5, _n_4_t_6, _n_5_t_0):
def __init__(self, A_0: object, A_1: _n_4_t_4) -> EnableWinFormHandler:...
def BeginInvoke(self, o: object, enable: bool, callback: _n_4_t_9, obj: object) -> _n_4_t_7:...
def EndInvoke(self, result: _n_4_t_7) -> bool:...
def Invoke(self, o: object, enable: bool) -> bool:...
class FilterMessageWinFormsHandler(_n_4_t_5, _n_4_t_6, _n_5_t_0):
def __init__(self, A_0: object, A_1: _n_4_t_4) -> FilterMessageWinFormsHandler:...
def BeginInvoke(self, msg: object, callback: _n_4_t_9, obj: object) -> _n_4_t_7:...
def EndInvoke(self, result: _n_4_t_7) -> bool:...
def Invoke(self, msg: object) -> bool:...
class OnIdleWinFormsHandler(_n_4_t_5, _n_4_t_6, _n_5_t_0):
def __init__(self, A_0: object, A_1: _n_4_t_4) -> OnIdleWinFormsHandler:...
def BeginInvoke(self, callback: _n_4_t_9, obj: object) -> _n_4_t_7:...
def EndInvoke(self, result: _n_4_t_7):...
def Invoke(self):...
class OnWinFormsLoadedHandler(_n_4_t_5, _n_4_t_6, _n_5_t_0):
def __init__(self, A_0: object, A_1: _n_4_t_4) -> OnWinFormsLoadedHandler:...
def BeginInvoke(self, callback: _n_4_t_9, obj: object) -> _n_4_t_7:...
def EndInvoke(self, result: _n_4_t_7):...
def Invoke(self):...
class OnWpfLoadedHandler(_n_4_t_5, _n_4_t_6, _n_5_t_0):
def __init__(self, A_0: object, A_1: _n_4_t_4) -> OnWpfLoadedHandler:...
def BeginInvoke(self, callback: _n_4_t_9, obj: object) -> _n_4_t_7:...
def EndInvoke(self, result: _n_4_t_7):...
def Invoke(self):...
class PreProcessMessageWinFormsHandler(_n_4_t_5, _n_4_t_6, _n_5_t_0):
def __init__(self, A_0: object, A_1: _n_4_t_4) -> PreProcessMessageWinFormsHandler:...
def BeginInvoke(self, msg: object, callback: _n_4_t_9, obj: object) -> _n_4_t_7:...
def EndInvoke(self, result: _n_4_t_7) -> bool:...
def Invoke(self, msg: object) -> bool:...
|
[
"34087817+xsfhlzh@users.noreply.github.com"
] |
34087817+xsfhlzh@users.noreply.github.com
|
dd7ac29d40bea1f4506785804167ad263dad33c7
|
46ccf20b5e9e03ece36c260ee910e0838796fa6b
|
/technology/computerBasis/algorithms/others/简单递归.py
|
6110623678eeaebe3e964dfe695c11149385f1db
|
[] |
no_license
|
alexwufeihao/Learn
|
2d34f24695e7152d29d120821394b00d628355b5
|
f8e0d7e05df7be3800aaca8ee53c726899bcdad1
|
refs/heads/master
| 2020-09-15T18:17:03.945239
| 2019-11-15T10:22:51
| 2019-11-15T10:22:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 843
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
def sumList(numList) :
if len(numList) == 1 :
return numList[0]
else :
return numList[0] + sumList(numList[1:])
def systemConversion(n, base) :
convertStr = "0123456789ABCDEF"
if n < base :
return convertStr[n]
else :
return systemConversion(n/base, base) + convertStr[n%base]
# 汉诺塔
def moveTower( height, fromPole, withPole, toPole ) :
if height >= 1 :
moveTower( height-1, fromPole, toPole, withPole )
moveDisk( fromPole, toPole )
moveTower( height-1, withPole, fromPole, toPole )
def moveDisk( fromPole, toPole ) :
print ( fromPole + " -> " + toPole )
if __name__ == "__main__" :
print ( sumList([1,2,3,4,5]) )
print ( systemConversion(1453,16) )
print ( moveTower( 3, "A", "B", "C" ) )
|
[
"alex@Alexs-MacBook-Pro.local"
] |
alex@Alexs-MacBook-Pro.local
|
c7ac74822eca60f7e35c153feddfccd92762e0d1
|
7ef302e0d3a9579740c822d4e4fd806ed43e21a7
|
/web/app/controllers/indicators_handler.py
|
ae4f0cf78f4246076db60501defb8a811e1ef694
|
[] |
no_license
|
gcinnovate/mtracpro
|
d9b7327f4a02894621978fc65cf12b13e495e358
|
7220211e0af0c31991e4d456fd94a2b4e843859a
|
refs/heads/master
| 2023-05-25T07:01:41.074148
| 2023-05-11T13:07:40
| 2023-05-11T13:07:40
| 157,218,976
| 0
| 0
| null | 2018-11-12T13:37:21
| 2018-11-12T13:37:21
| null |
UTF-8
|
Python
| false
| false
| 5,874
|
py
|
import web
from . import csrf_protected, db, require_login, render, get_session
from app.tools.utils import audit_log, default, lit
from app.tools.pagination2 import doquery, countquery, getPaginationString
from settings import PAGE_LIMIT
class Indicators:
@require_login
def GET(self):
params = web.input(page=1, ed="", d_id="")
allow_edit = False
try:
edit_val = int(params.ed)
allow_edit = True
except ValueError:
pass
try:
page = int(params.page)
except:
page = 1
limit = PAGE_LIMIT
start = (page - 1) * limit if page > 0 else 0
if params.ed and allow_edit:
res = db.query(
"SELECT id, description, shortname, slug, cmd, form, form_order, dataset, dataelement, "
"category_combo, threshold FROM dhis2_mtrack_indicators_mapping WHERE id = $id",
{'id': edit_val})
if res:
r = res[0]
name = r.description
shortname = r.shortname
slug = r.slug
cmd = r.cmd
form = r.form
form_order = r.form_order
dataset = r.dataset
dataelement = r.dataelement
category_combo = r.category_combo
threshold = r.threshold
dic = lit(
relations="dhis2_mtrack_indicators_mapping",
fields=(
"id, description, shortname, slug, form, form_order, cmd, dataset, "
"dataelement, category_combo "),
criteria="",
order="dataset, form, form_order",
limit=limit, offset=start
)
mappings = doquery(db, dic)
count = countquery(db, dic)
pagination_str = getPaginationString(default(page, 0), count, limit, 2, "indicators", "?page=")
l = locals()
del l['self']
return render.indicators(**l)
@csrf_protected
@require_login
def POST(self):
params = web.input(
name="", shortname="", form="", form_order="", slug="", cmd="",
dataset="", dataelement="", category_combo="", threshold="", page="1", ed="", d_id="")
session = get_session()
allow_edit = False
try:
edit_val = int(params.ed)
allow_edit = True
except:
pass
with db.transaction():
if params.ed and allow_edit:
r = db.query(
"UPDATE dhis2_mtrack_indicators_mapping SET "
"(description, shortname, form, form_order, slug, cmd, dataset, dataelement, category_combo, threshold) "
"= ($descr, $shortname, $form, $form_order, $slug, $cmd, $dataset, $dataelement, $category_combo, $threshold) "
"WHERE id = $id", {
'descr': params.name, 'shortname': params.shortname, 'form': params.form,
'form_order': params.form_order, 'slug': params.slug,
'cmd': params.cmd, 'dataset': params.dataset,
'threshold': None if not params.threshold else params.threshold,
'dataelement': params.dataelement, 'category_combo': params.category_combo,
'id': params.ed}
)
log_dict = {
'logtype': 'Web', 'action': 'Update', 'actor': session.username,
'ip': web.ctx['ip'],
'descr': 'Updated Indicator Mapping. id:%s => Dataset %s: Form:%s Cmd:%s)' % (
params.ed, params.dataset, params.form, params.cmd),
'user': session.sesid
}
audit_log(db, log_dict)
return web.seeother("/indicators")
else:
has_indicator = db.query(
"SELECT id FROM dhis2_mtrack_indicators_mapping "
"WHERE form=$form AND cmd=$cmd AND dataset=$dataset",
{'form': params.form, 'cmd': params.cmd, 'dataset': params.dataset})
if has_indicator:
session.idata_err = (
"Indicator with Dataset:%s, From:%s, Command:%s "
"already registered" % (params.dataset, params.form, params.cmd)
)
session.idata_err = ""
r = db.query(
"INSERT INTO dhis2_mtrack_indicators_mapping (description, shortname, form, form_order, "
"slug, cmd, dataset, dataelement, category_combo, threshold) VALUES "
"($descr, $shortname, $form, $form_order, $slug, $cmd, $dataset, $dataelement, "
"$category_combo, $threshold) RETURNING id", {
'descr': params.name, 'shortname': params.shortname, 'form': params.form,
'form_order': params.form_order, 'slug': params.slug,
'cmd': params.cmd, 'dataset': params.dataset,
'threshold': None if not params.threshold else params.threshold,
'dataelement': params.dataelement, 'category_combo': params.category_combo}
)
log_dict = {
'logtype': 'Web', 'action': 'Create', 'actor': session.username,
'ip': web.ctx['ip'],
'descr': 'Created Indicator Mapping. Dataset %s: Form:%s Cmd:%s)' % (
params.dataset, params.form, params.cmd),
'user': session.sesid
}
audit_log(db, log_dict)
return web.seeother("/indicators")
l = locals()
del l['self']
return render.indicators(**l)
|
[
"sekiskylink@gmail.com"
] |
sekiskylink@gmail.com
|
663d22f51d1e0e55f0fe03f62605da3846c2db14
|
44f52bd8972633f4c4904352e3e1fbbc86f0dff4
|
/series_2/Invincible/invincible.py
|
0dc92fdeaf6a13c3dab980d60d2e0ba97a2ff0a0
|
[] |
no_license
|
MmahdiM79/Algorithms_Design_Sprig2021
|
ca364fc59870960c755e334b48d080cf26237405
|
52ef00e2e3b882b698aadd5b1cfd8d1d767d3eb0
|
refs/heads/main
| 2023-06-29T04:11:59.860984
| 2021-07-27T12:48:21
| 2021-07-27T12:48:21
| 362,185,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
if __name__ == "__main__":
n, m, k = [int(num) for num in input().split(' ')]
buckets = {}
for i in range(m//k):
buckets[i] = []
for num in input().split():
buckets[int(num)//k].append(int(num))
largest_bucket = (-1, [])
for bucket in buckets.items():
if len(bucket[1]) > len(largest_bucket[1]):
largest_bucket = bucket
print(largest_bucket[1][0])
|
[
"MmahdiM79@users.noreply.github.com"
] |
MmahdiM79@users.noreply.github.com
|
17958f36120e426c6d30503b1e0896488c0ec86c
|
4dbfe46fc45348845f059bf3191c409e4a3a31ef
|
/codeforces/158A.py
|
5b2e8aa9f3525fe95f07fdcd6122d109992be901
|
[] |
no_license
|
chinatip/problem-solving
|
b5b1ac40d354322599bbc39daed00a9b1e6280f8
|
7cff855a4bfd1a02815e15c98cf5ba2378bb1f9c
|
refs/heads/master
| 2021-07-04T02:20:19.956352
| 2021-06-07T16:57:15
| 2021-06-07T16:57:15
| 83,819,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
from sys import stdin, stdout
n, k = map(int, stdin.readline().rstrip().split())
count = 0
list_a = map(int, stdin.readline().rstrip().split())
max = list_a[k-1]
for x in list_a:
if x >= max and x > 0:
count += 1
print count
|
[
"chinatip.v@ku.th"
] |
chinatip.v@ku.th
|
3e1d0e7d3a78fc1ec942c840368eb6532468301d
|
d80dd843b3d4ed1fff6e192695837a14d322c831
|
/utils/crawl_rq.py
|
4a58a3270465a9a4ef27eb975f76137279828ce0
|
[] |
no_license
|
scottliyq/convertible_bond
|
9faea4e1949b56e0efce6aec3c81d3b01a83f222
|
d01a5dad4a6edb34adc066d7a239e9dccd7ea1c0
|
refs/heads/main
| 2023-08-25T23:10:58.114982
| 2021-11-02T00:12:03
| 2021-11-02T00:12:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
#!/usr/bin/env python3
import json
import pathlib
from absl import app, flags, logging
from datetime import date, timedelta
from conbond import ricequant
import pandas as pd
from tqdm import tqdm
import logging
FLAGS = flags.FLAGS
flags.DEFINE_string('cache_dir', None, 'Cache directory')
flags.mark_flag_as_required('cache_dir')
flags.DEFINE_string('start_date',
date.today().strftime('%Y-%m-%d'), 'Date to start')
flags.DEFINE_string('end_date',
date.today().strftime('%Y-%m-%d'), 'Date to end')
def main(argv):
username = None
password = None
auth_file = pathlib.Path('.auth.json')
auth = json.loads(auth_file.open('r').read())
username = auth['rqdata']['username']
password = auth['rqdata']['password']
df_trading_dates = pd.read_excel('trading_dates.xlsx')
df_trading_dates = df_trading_dates[
df_trading_dates.trading_date.dt.date >= date.fromisoformat(
FLAGS.start_date)]
df_trading_dates = df_trading_dates[df_trading_dates.trading_date.dt.date
<= date.fromisoformat(FLAGS.end_date)]
ricequant.auth(username, password)
dates = df_trading_dates.trading_date.to_list()
for i in tqdm(range(0, len(dates))):
ricequant.fetch(dates[i], FLAGS.cache_dir, logging)
if __name__ == '__main__':
app.run(main)
|
[
"paulhybryant@gmail.com"
] |
paulhybryant@gmail.com
|
5e5b7725a3819c8115be977c155635c73eee8e85
|
7316b60dcb8de197f075863c5014ff5b9c1ef3ce
|
/manage.py
|
2bf2aff58f8266d586673c52452e4f99c969f314
|
[
"MIT"
] |
permissive
|
jirah/django-metrics-dashboard
|
944c154222968c6f9d45dfd96ce1062f446bbd4d
|
f4275398960b286a087af94b3929397d1d8e173f
|
refs/heads/master
| 2020-12-25T12:18:08.151073
| 2013-03-06T15:07:14
| 2013-03-06T15:07:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE', 'metrics_dashboard.tests.south_settings')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"mbrochh@gmail.com"
] |
mbrochh@gmail.com
|
229ce162d2cfd193e01b36085ab0dc6132abacd2
|
b8e6e35123911bb57027b751272f566ca2811a2f
|
/OpenCV-Ex4-Haar-ROI.py
|
498e16670197891fa7c69e5e7e71a6f6af804eb8
|
[] |
no_license
|
amaxwong/mbs-3523
|
113ca7df3224b3ebd5e0722d9ca777ea6f3041b2
|
a62f91c7f5b676a3cbe665789379829cd345ca42
|
refs/heads/main
| 2023-06-17T09:07:16.362774
| 2021-07-20T06:18:31
| 2021-07-20T06:18:31
| 335,199,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
# Save this file to your Github as OpenCV-Ex4-Haar-ROI.py
import cv2
import numpy as np
print(cv2.__version__)
faceCascade = cv2.CascadeClassifier('Resources/haarcascade_frontalface_default.xml')
capture = cv2.VideoCapture('Resources/IU-edited.mp4')
# capture = cv2.VideoCapture(0)
capture.set(3,640)
capture.set(4,480)
font = cv2.FONT_HERSHEY_PLAIN
while True:
success, img = capture.read()
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# print(np.shape(imgGray))
imgGray = cv2.cvtColor(imgGray, cv2.COLOR_GRAY2BGR)
# print(np.shape(imgGray))
faces = faceCascade.detectMultiScale(imgGray, 1.2, 10)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
roiImg = img[y:y + h, x:x + w].copy()
imgGray[y:y + h, x:x + w]=roiImg
cv2.imshow('Frame', imgGray)
#cv2.moveWindow('Frame', 100,20)
if cv2.waitKey(1) == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
amaxwong.noreply@github.com
|
07a65541dd334256a2a5e69f6d0ad3c139b8da01
|
ab1dd7005b4a4273da3771665250c0b3fcf9d0c1
|
/scripts/RoutVsTime.py
|
3ddaa9962fb4d9f26f7bc34980e3e75af476b258
|
[] |
no_license
|
jordan-stone/Disks
|
5388eb29e1bc38382ddb2cd25b921e31decba9bc
|
8083c5e1c17e22356a396dfd7c08ed3df5fae605
|
refs/heads/master
| 2021-01-17T04:43:33.635093
| 2016-07-01T21:55:33
| 2016-07-01T21:55:33
| 61,930,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,208
|
py
|
import numpy as np
import matplotlib.pyplot as mpl
from Disks import *
from Disks.evolve import j_mdot_new_rout, Offner_McKee_dotm_2CTC, J0, J_of_m
from Disks import Q
mf=0.05
j0=J0(1.5,mf,100)#units of specific angular momentum
mstar =[0.001]
mdisks=[0.0001]
routs=[0.5]
t=[0]
deltat=[0]
minfs=[0]
Js=[]
Ltots=[]
oneMore=False
while True:
minfs.append(Offner_McKee_dotm_2CTC(mstar[-1]+mdisks[-1],mf,50))#m_current should be total star+disk mass
mdot_star=minfs[-1]*0.9
deltat.append(0.01*(mdisks[-1]+mstar[-1])/(minfs[-1]-mdot_star))
Js.append(J_of_m((mstar[-1]+mdisks[-1]),mf,j0,1.5))#specific
rout0,mstar0,mdisk0,Ltot0=j_mdot_new_rout(mstar[-1],mdisks[-1],
mdot_star,Js[-1],minfs[-1],deltat[-1],
rout_0=routs[-1],returnMstarMdisk=True)
routs.append(rout0)
mdisks.append(mdisk0)
mstar.append(mstar0)
Ltots.append(Ltot0)
t.append(t[-1]+deltat[-1])
#deltat.append((1/Q.Omega(mstar[-1],routs[-1]))/year2second)
if oneMore:
break
if (mstar[-1]+mdisks[-1]) > mf:
oneMore=True
from plottingTools.fixplotsImproved import compModern, tickFont
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter, MaxNLocator
f=mpl.figure(figsize=(16,8))
a=f.add_subplot(121)
a2=f.add_subplot(122)
#formatter=ScalarFormatter()
#formatter.set_powerlimits((-3,3))
#formatter.set_scientific(True)
a.loglog(t,routs,linewidth=3)
a.set_xlabel('Time [years]',fontproperties=compModern(25))
a.set_ylabel('R$_{\mathrm{out}}$ [AU]',fontproperties=compModern(25))
#formatter=FormatStrFormatter('%6.1e')
#locator=MaxNLocator(nbins=3,prune='both')
#a.xaxis.set_major_formatter(formatter)
#a.xaxis.set_major_locator(locator)
tickFont(a,'x',compModern(18))
tickFont(a,'y',compModern(18))
a2.loglog(t[1:],Ltots,linewidth=3)
a2.loglog(t[1:],np.cumsum(np.array(Js)*np.array(minfs[1:])*Msolar2g*np.array(deltat[1:])),linewidth=3)
a2.set_xlabel('Time [years]',fontproperties=compModern(25))
a2.set_ylabel('L',fontproperties=compModern(25))
tickFont(a2,'x',compModern(18))
tickFont(a2,'y',compModern(18))
mpl.tight_layout()
mpl.tight_layout()
mpl.tight_layout()
mpl.show()
mpl.show()
|
[
"jstone@as.arizona.edu"
] |
jstone@as.arizona.edu
|
cb4922bf9a019bc69e5ac01be3bd96a2cc2222da
|
3a94c9aab15768da927c1f4a8edfb94b41f8c9bf
|
/api/views.py
|
816d65a7c8c763ed0ff43b2d308ec9693938734c
|
[] |
no_license
|
humanbeing-dev/wb_todo-ex3
|
6ed7c8b2d9b9a1b2d033bfe1449e8971749aacde
|
ec4c6bcecf88492c22401158a15ff6659a240608
|
refs/heads/master
| 2022-12-03T16:55:03.466409
| 2020-08-22T09:05:59
| 2020-08-22T09:05:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,452
|
py
|
from django.shortcuts import render
from django.http import JsonResponse
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import TaskSerializer
from .models import Task
@api_view(['GET'])
def api_overview(request):
api_urls = {
'List': '/task-list/',
'Detail View': '/task-detail/<str:pk>',
'Create': '/task-create/',
'Update': '/task-update/<str:pk>',
'Delete': '/task-delete/<str:pk>'
}
return Response(api_urls)
@api_view(['GET'])
def task_list(request):
tasks = Task.objects.all()
serializers = TaskSerializer(tasks, many=True)
return Response(serializers.data)
@api_view(['GET'])
def task_detail(request, pk):
tasks = Task.objects.get(id=pk)
serializers = TaskSerializer(tasks, many=False)
return Response(serializers.data)
@api_view(['POST'])
def task_create(request):
serializer = TaskSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(['POST'])
def task_update(request, pk):
task = Task.objects.get(id=pk)
serializer = TaskSerializer(instance=task, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(['DELETE'])
def task_delete(request, pk):
task = Task.objects.get(id=pk)
task.delete()
return Response("DELETED")
|
[
"maciej.sitowski@gmail.com"
] |
maciej.sitowski@gmail.com
|
f746a4ebd5b0ac7fc775267895f58a74ed2423be
|
f736d31cfaceb87e123b41ffdc2d02a344a18854
|
/PM_Interview/16_2.py
|
c300ca8c00d982f592d3d8f2dfee83176b5cb838
|
[] |
no_license
|
jackiezeng01/Coding_Practice
|
4aeddb38f986c31330851b4fc2f38ddcd3b6d863
|
670904585c3c9538832c6042e46e0bfb298b1cfd
|
refs/heads/master
| 2023-07-08T21:45:14.447494
| 2021-08-02T02:27:30
| 2021-08-02T02:27:30
| 306,463,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
'''
Reverse the order of elements in an array (without creating a new array)
'''
def reverse_array(arr):
p1 = 0
p2 = len(arr)-1
while p1 < p2:
arr[p1], arr[p2] = arr[p2], arr[p1]
p1+=1
p2-=1
return arr
if __name__ == "__main__":
arr = [1,2,3,4]
print(arr)
print("reversed: ", reverse_array(arr))
|
[
"dolphin.pool01@gmail.com"
] |
dolphin.pool01@gmail.com
|
d347704bb2848e440833e1b735926887eb7f9704
|
4af6edfc1d581ff8f2aaaafbb7628461e4d3a7e6
|
/pictomood/conf/oea_less_cross_val_all.py
|
530429237facacd029c9e58b5815d5c6c0451048
|
[
"MIT"
] |
permissive
|
pic2mood/pictomood
|
ec6a91f3e9e0c8ce0ef3b4a50e7c760df1d2ef66
|
3878e12002b008991571d63820f60154f500ce24
|
refs/heads/master
| 2021-05-09T12:32:55.441895
| 2018-03-03T17:02:01
| 2018-03-03T17:02:01
| 119,013,693
| 0
| 1
|
MIT
| 2018-03-03T08:06:17
| 2018-01-26T06:16:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
from pictomood.config import *
model = {
'name': 'oea_less',
'validator': 'cross_val',
'emotions': 'all',
'ext': '.pkl'
}
trainer = {
'dataset': path_as([
'data',
model['name'],
name_generator(model, suffix='dataset')
]),
'model': path_as([
'data',
model['name'],
name_generator(model, suffix='model')
]),
'raw_images_dataset': os.path.join(
os.getcwd(),
'training_p2m',
'data',
dir_generator(model)
),
'raw_images_testset': os.path.join(
os.getcwd(),
'training_p2m',
'data',
dir_generator(model)
),
'features': {
'top_colors': Palette.dominant_colors,
'colorfulness': Color.scaled_colorfulness,
'texture': Texture.texture
},
'columns': [
'Image Path',
'Top Color 1st',
'Top Color 2nd',
'Top Color 3rd',
'Colorfulness',
'Texture',
'Emotion Tag',
'Emotion Value'
]
}
|
[
"franciscoraymel@gmail.com"
] |
franciscoraymel@gmail.com
|
dca7fb7cb747b50c4973e0758058bca3cef0d6a0
|
eef17f98a29cec4e432df8b45e148117c75363a2
|
/apps/apartments/admin.py
|
a39b26434d68bbeee3e0d2091f6bba10586a69df
|
[] |
no_license
|
patriciohc1234/season_booking2
|
e59e4b6cf63343ba0c157f3427e8aa41ab4da148
|
a9a9032df5f3c47fcccbdf0b8799bd4e6ead8cd5
|
refs/heads/main
| 2023-09-04T21:38:27.099649
| 2021-11-25T16:02:38
| 2021-11-25T16:02:38
| 431,904,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
from django.contrib import admin
from .models import Apartment
admin.site.register(Apartment)
|
[
"p.huentemil@alumnos.duoc.cl"
] |
p.huentemil@alumnos.duoc.cl
|
fce32e3a6c06c5cdd01be9d8fd1b7129fe988b18
|
a6c07cba0aa8f35e169291ceb0f6ff1a71087016
|
/TareaTony.py
|
61cec593205fce9d2353710feb52d00d09bef570
|
[] |
no_license
|
parra320/Python
|
fea52b46fb469f76cec2b3d451075c99b2bb1f93
|
a691423fbbbbc0e4a00852d7074e8a4a6e1e93e4
|
refs/heads/main
| 2023-05-05T06:41:36.064912
| 2021-05-28T07:58:20
| 2021-05-28T07:58:20
| 371,624,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,797
|
py
|
#Programa que permita calcular la presencia en las redes
AbrirArchivo= open("presenciaderedes.csv", "r")
Contenido= AbrirArchivo.readlines()
AbrirArchivo.close()
valor=[]
valor.append(Contenido[3])
print(type(valor))
print("\n\t¿Que operación quiere realizar?: ")
print("1)Calcular diferencia de cierto aspecto comparando 2 meses \n2)Calcular tasa de crecimiento de una red social")
Operacion= input("Elija una opcion: ")
Meses= ["Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto", "Septiembre", "Octubre", "Noviembre", "Diciembre"]
if Operacion=="1":
print("\n\tElija el primer mes para comparar: ")
print("1)Enero \t2)Febrero \t3)Marzo \n4)Abril \t5)Mayo \t\t6)Junio \n7)Julio \t8)Agosto \t9)Septiembre \n10)Octubre \t11)Noviembre \t12)Diciembre")
PrimerMes= input("Elija una Opcion: ")
print("\n\tElija el segundo mes para comparar: ")
SegundoMes= input("Elija una Opcion: ")
if (0<int(PrimerMes) and int(PrimerMes)<=12):
if (0<int(SegundoMes) and int(SegundoMes)<=12):
print("\n\t¿De que red social desea conocer la información?")
print("1)Facebook \t2)Twitter \t3)Youtube")
RedSocial= input("Elija una opcion: ")
if RedSocial=="1":
print("\n\t\tContenedores: \n1)Seguidores \t\t\t2)Porcentaje de Crecimiento \n3)Publicaciones \t\t4)Me gusta en publicaciones \n5)Publicaciones compartidas \t6)Comentarios\n")
Contenedor= input("Elija una opcion: ")
elif RedSocial=="2":
print("\n\t\tContenedores: \n1)Seguidores \t\t2)Porcentaje de Crecimiento \n3)Publicaciones \t4)Retuits \n5)Me Gusta \t\t6)Impactos \n7)Videos \t\t8)Visualizaciones \n9)Comentarios \t\t10)Me Gusta\n")
elif RedSocial=="3":
print("\n\t\tContenedores: \n1)Videos \t2)Visualizaciones \n3)Comentarios \t4)Me Gusta")
else:
print("Opcion no valida")
else:
print("Opcion no valida")
elif Operacion=="2":
print("\n\tElija el primer mes para comparar: ")
print("1)Enero \t2)Febrero \t3)Marzo \n4)Abril \t5)Mayo \t\t6)Junio \n7)Julio \t8)Agosto \t9)Septiembre \n10)Octubre \t11)Noviembre \t12)Diciembre")
PrimerMes= input("Elija una Opcion: ")
print("\n\tElija el segundo mes para comparar: ")
SegundoMes= input("Elija una Opcion: ")
if (0<int(PrimerMes) and int(PrimerMes)<=12):
if (0<int(SegundoMes) and int(SegundoMes)<=12):
print("\n\t¿De que red social desea conocer la información?")
print("1)Facebook \t2)Twitter")
RedSocial= input("Elija una opcion: ")
if RedSocial=="1":
MesInicial= Meses[int(PrimerMes)-1]
MesFinal= Meses[int(SegundoMes)-1]
ValorDeMesInicial= int(Contenido[1][int(PrimerMes+2)])
ValorDeMesFinal= int(Contenido[1][int(SegundoMes+2)])
print(ValorDeMesFinal)
TasaDeIncremento= float(((ValorDeMesFinal-ValorDeMesInicial)/ValorDeMesInicial)*100)
print("La Tasa de Incremento entre el mes de",MesInicial,"y el mes de",MesFinal,"es ",TasaDeIncremento)
elif RedSocial=="2":
print("cuack2")
else:
print("Opcion no valida")
else:
print("Opcion no valida")
else:
print("Opcion no valida")
else:
print("Opcion no valida")
"""print("\n\tElija el primer mes para comparar: ")
print("1)Enero \t2)Febrero \t3)Marzo \n4)Abril \t5)Mayo \t\t6)Junio \n7)Julio \t8)Agosto \t9)Septiembre \n10)Octubre \t11)Noviembre \t12)Diciembre")
PrimerMes= input("Elija una Opcion: ")
print("\n\tElija el segundo mes para comparar: ")
SegundoMes= input("Elija una Opcion: ")
if (0<int(PrimerMes) and int(PrimerMes)<=12):
if (0<int(SegundoMes) and int(SegundoMes)<=12):
print("\n\t¿De que red social desea conocer información?")
print("1)Facebook \t2)Twitter \t3)Youtube")
RedSocial= input("Elija una opcion: ")
if RedSocial=="1":
print("\n\t\tContenedores: \n1)Seguidores \t\t\t2)Porcentaje de Crecimiento \n3)Publicaciones \t\t4)Me gusta en publicaciones \n5)Publicaciones compartidas \t6)Comentarios\n")
Contenedor= input("Elija una opcion: ")
elif RedSocial=="2":
print("\n\t\tContenedores: \n1)Seguidores \t\t2)Porcentaje de Crecimiento \n3)Publicaciones \t4)Retuits \n5)Me Gusta \t\t6)Impactos \n7)Videos \t\t8)Visualizaciones \n9)Comentarios \t\t10)Me Gusta\n")
elif RedSocial=="3":
print("\n\t\tContenedores: \n1)Videos \t2)Visualizaciones \n3)Comentarios \t4)Me Gusta")
else:
print("Opcion no valida")
else:
print("Opcion no valida")
else:
print("Opcion no valida")"""
|
[
"luis.deathofnote@gmail.com"
] |
luis.deathofnote@gmail.com
|
f707c226507a04668fafe52bd8750d1ee2f28d0e
|
a4c9bbc5f46973111a52cb9cfe952fafe962ba9e
|
/src/board.py
|
2740c4db1d99a2593b222914ef4733fe1d1e88ae
|
[] |
no_license
|
aksalcido/connect-four-ai
|
a29654bc8cefef79099f44c4daf680e2c14ebf7a
|
f6698dbd7f139089106eb1f40b98d61635e90003
|
refs/heads/main
| 2023-05-04T11:31:40.037358
| 2021-05-22T01:00:17
| 2021-05-22T01:00:17
| 369,395,643
| 0
| 0
| null | 2021-05-22T01:00:18
| 2021-05-21T02:49:04
|
Python
|
UTF-8
|
Python
| false
| false
| 8,616
|
py
|
from python_settings import settings
class Board:
def __init__(self):
'''
Initializes a board object. This object contains the contents for the current game state of the board.
It essentially does the moving and winner checks but does not contain any of the game attributes
(e.g: players, current_turn, etc).
'''
self.board = self.initialize_new_board()
self.winning_color = None
def move(self, player_move: int, player_color: int):
'''
Makes a move based off player_move and player_color. Error checking is done before this
function is called, so row is guaranteed to be available and the board[row][player_move - 1] is updated
to reflect the move.
'''
row = self.column_available(player_move)
self.board[row][player_move - 1] = player_color
def all_moves(self) -> list:
'''
Gets all of the moves that are possible on the current board. Meaning an index of each column that is not
full and can receieve a piece.
Returns a list of these indexes starting at 0
'''
moves = []
for i in range(settings.COLS):
if self.board[0][i] == settings.EMPTY:
moves.append(i + 1)
return moves
# ===== Move Validation Checking ===== #
def valid_move(self, player_move) -> bool:
'''
Returns a bool:
True if the player_move is a valid move that can be made. The requirements for validity
is to be a column integer that ranges from [1-7] and the column has to be available,
meaning the column can not be full. False otherwise.
'''
return player_move and player_move.isnumeric() and \
self.in_bounds(int(player_move)) and self.column_available(int(player_move)) != settings.INVALID_COLUMN
def in_bounds(self, player_move) -> bool:
'''
Returns a bool:
True if the player_move integer is within the range of [1-7], meaning it is a valid
column argument. False otherwise.
'''
return player_move > 0 and player_move <= settings.COLS
def column_available(self, column: int) -> int:
'''
Returns an int representing either a valid row that the player_move can be made in or INVALID_COLUMN
to represent that the move can not be made in this column.
'''
for i in range(1, settings.ROWS + 1):
if self.board[settings.ROWS - i][column - 1] == settings.EMPTY:
return settings.ROWS - i
return settings.INVALID_COLUMN
# ===== Winner/Tie Checks ===== #
def winner(self) -> bool:
'''
Returns a bool:
True if any of the four in a row checks pass, meaning there is a winner on the board.
'''
return self.check_vertical() or self.check_horizontal() or self.check_diagonal_left() or self.check_diagonal_right()
def tie(self) -> bool:
'''
Returns a bool:
True if there is a tie on the board, meaning there is no winner which is checked by self.winner() before this function
is checked. A tie in Connect Four means every slot is full and there is no four in a row anywhere on the board.
This function does that by checking if every piece on the top row is not empty, if it is not then we know there is
no more available columns for moves.
False otherwise.
'''
for j in range(settings.COLS):
if self.board[0][j] == settings.EMPTY:
return False
return True
def gameover(self) -> bool:
'''
Returns a bool:
True if there is a winner or tie on the board.
False otherwise.
This function is utility to check various board states for the AI to base its next move off of. A board resulting in gameover
can net the biggest gain/loss depending on who wins/loses.
'''
return self.winner() or self.tie()
def get_winner(self) -> int:
'''
Returns the winning color of the board or None. Used to check if winner is found when looking for
best move for AI.
'''
return self.winning_color
# ===== Four in a Row Checks ===== #
def check_vertical(self) -> bool:
'''
Returns a bool:
True if there is four in a row -- in a vertical line in the board that is not an empty slot but a piece color.
False otherwise.
'''
for i in range(settings.ROWS - 3):
for j in range(settings.COLS):
if self.board[i][j] != settings.EMPTY and self.board[i][j] == self.board[i + 1][j] == self.board[i + 2][j] == self.board[i + 3][j]:
self.winning_color = self.board[i][j]
return True
return False
def check_horizontal(self) -> bool:
'''
Returns a bool:
True if there is four in a row -- in a horizontal line in the board that is not an empty slot but a piece color.
False otherwise.
'''
for i in range(settings.ROWS):
for j in range(settings.COLS - 3):
if self.board[i][j] != settings.EMPTY and self.board[i][j] == self.board[i][j + 1] == self.board[i][j + 2] == self.board[i][j + 3]:
self.winning_color = self.board[i][j]
return True
return False
def check_diagonal_right(self) -> bool:
'''
Returns a bool:
True if there is four in a row -- in a diagonal right line towards the bottom right of the board; that is not an empty slot but a piece color.
False otherwise.
'''
for i in range(settings.ROWS - 3):
for j in range(settings.COLS - 3):
if self.board[i][j] != settings.EMPTY and self.board[i][j] == self.board[i + 1][j + 1] == self.board[i + 2][j + 2] == self.board[i + 3][j + 3]:
self.winning_color = self.board[i][j]
return True
return False
def check_diagonal_left(self) -> bool:
'''
Returns a bool:
True if there is four in a row -- in a diagonal left line towards the bottom left of the board; that is not an empty slot but a piece color.
False otherwise.
'''
for i in range(3, settings.ROWS):
for j in range(settings.COLS - 3):
if self.board[i][j] != settings.EMPTY and self.board[i][j] == self.board[i - 1][j + 1] == self.board[i - 2][j + 2] == self.board[i - 3][j + 3]:
self.winning_color = self.board[i][j]
return True
return False
# ===== Display Methods ===== #
def display(self) -> None:
'''
Prints the current board and displays to the user.
'''
print(self)
# Initialization Method #
def initialize_new_board(self) -> list:
'''
Initializes a new board and assigns to self.board. This function is called at initialization
and when the player wishes to play again.
'''
new_board = []
for i in range(settings.ROWS):
new_row = []
for j in range(settings.COLS):
new_row.append(settings.EMPTY)
new_board.append(new_row)
self.board = new_board
self.winning_color = None
return self.board
# ===== Override Methods ===== #
def __getitem__(self, coords: tuple) -> int:
'''
Overrides the __getitem__ method so that we do not have to access the self.board attribute
from outside of this class. Instead we can directly index this board object with coords (tuple).
'''
x, y = coords
return self.board[x][y]
def __repr__(self) -> str:
'''
Override the __repr__ method so that we are able to print a displayable board
for the user.
'''
board_str = ""
for i in range(settings.ROWS):
row_str = "|"
for j in range(settings.COLS):
if self.board[i][j] == settings.RED:
row_str += 'R'
elif self.board[i][j] == settings.YELLOW:
row_str += 'Y'
else:
row_str += ' '
row_str += '|'
board_str += f'{row_str}\n'
board_str += '---------------'
return board_str
|
[
"aksalcid@uci.edu"
] |
aksalcid@uci.edu
|
f4e55afb4d540776177b889d78b3b62e4ea7ed2e
|
d99f3d21700b56d0b9f2fd30cccf23d3bebda3fa
|
/rdfdatabank/tools/broadcastDatasets.py
|
18d32cd594872fbe1b5a6736dc80047f507b9336
|
[
"MIT"
] |
permissive
|
dataflow/RDFDatabank
|
6e713044d8fd06644667b6346f63f57dee124a89
|
8a3abd28fefc62cbbfb9f77e7ddc920e23794f34
|
refs/heads/master
| 2021-01-18T07:44:31.110786
| 2015-01-28T15:48:56
| 2015-01-28T15:48:56
| 1,910,458
| 7
| 0
|
MIT
| 2018-12-05T16:20:21
| 2011-06-17T10:12:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,229
|
py
|
#-*- coding: utf-8 -*-
"""
Copyright (c) 2012 University of Oxford
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, --INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
from rdfdatabank.lib.broadcast import BroadcastToRedis
from pylons import config
def get_objs_in_dir(items_list, dirname, fnames):
for fname in fnames:
a = os.path.join(dirname,fname)
if fname == 'obj':
item = a.split('pairtree_root')[1].strip('/').split('obj')[0].replace('/', '')
silo = a.split('pairtree_root')[0].strip('/').split('/')[-1]
if not (silo, item) in items_list:
items_list.append((silo, item))
return
def broadcast_links(src_dir):
links_list = []
os.path.walk(src_dir,get_objs_in_dir,links_list)
b = BroadcastToRedis(config['redis.host'], config['broadcast.queue'])
for silo, item in links_list:
b.creation(silo, item)
return
src_dirs = [
'/silos',
]
for src_dir in src_dirs:
print "starting", src_dir
links_list = []
os.path.walk(src_dir,get_objs_in_dir,links_list)
b = BroadcastToRedis(config['redis.host'], config['broadcast.queue'])
for silo, item in links_list:
b.creation(silo, item)
|
[
"anusha.ranganathan@bodleian.ox.ac.uk"
] |
anusha.ranganathan@bodleian.ox.ac.uk
|
5b8aee1b33555ac5e21ed6566244155d7a42529c
|
33424e1a3cc65ac7d4d5f88c5897ae59a32ae91e
|
/train.py
|
b1bb0c57c8f3b5279b7e19fb55abc6d03e23db7b
|
[] |
no_license
|
file-campuran/Auto-Attendance-System-using-Deep-Learning
|
08d5888d2ac046a35554c86cd4a4c15a485500bb
|
495003e2dfe1a5ced097223dfaa1d328a44198a8
|
refs/heads/master
| 2023-03-22T23:11:28.366108
| 2020-09-25T20:39:29
| 2020-09-25T20:39:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
import cognitive_face as CF
from global_variables import personGroupId
Base_url = 'https://westcentralus.api.cognitive.microsoft.com/face/v1.0' # default endpoint when use free
CF.BaseUrl.set(Base_url)
Key= '20c5c11712244ba4b340ac8bf949b8ef' # subscription key needed
CF.Key.set(Key)
res = CF.person_group.train(personGroupId)
print(res)
|
[
"nasirh831@gmail.com"
] |
nasirh831@gmail.com
|
d0d2b7a48aac0d51708a4b7cb8b413fae2d39bc3
|
bc1c3b7386b80c8aa1ee9660606084aacf499e93
|
/Bronze III/1267.py
|
ce2135f42a9bee7c2580012ad118b4898d82c210
|
[] |
no_license
|
Thesine/BOJ_Sine
|
ad93bcd47f622cb68ae794fbd70c3b73044c76aa
|
4bde63726c0f14e9dea30eeb5d262039509b9e8b
|
refs/heads/master
| 2022-11-05T01:22:55.674606
| 2020-07-15T07:14:49
| 2020-07-15T07:14:49
| 265,505,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
import math
N = int(input())
a = []
a = input().split()
Mres = 0
Yres = 0
if len(a)==N:
for i in range(N):
M = (math.trunc(int(a[i])/30)+1) * 10
Y = (math.trunc(int(a[i])/60)+1) * 15
Mres = Mres + M
Yres = Yres + Y
if Mres < Yres:
print('Y', Mres)
elif Yres < Mres:
print('M', Yres)
elif Mres==Yres:
print('Y', 'M', Mres)
|
[
"noreply@github.com"
] |
Thesine.noreply@github.com
|
0fc4dd5e9ee64a6900a149cc32b70bc8fced8207
|
43ff465e2f2818f859596a22d9987b9aa984c54e
|
/emojix.py
|
bbca135b93d5cb5894c28d6418fdd6862ad174b6
|
[] |
no_license
|
ANSH3LL/Chatz-UI
|
df99329eb693cd85dad059cfd7473a34e2ad5f5b
|
bea54ba4c8b8445f3d4fbd944488dcbb528825da
|
refs/heads/master
| 2020-12-04T14:43:02.170118
| 2020-01-27T07:07:31
| 2020-01-27T07:07:31
| 231,804,569
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,375
|
py
|
import os, sys
from PyQt4 import QtCore, QtGui
class EmojiLabel(QtGui.QLabel):
def __init__(self, parent):
QtGui.QLabel.__init__(self, parent)
def mouseReleaseEvent(self, event):
self.emit(QtCore.SIGNAL('clicked()'))
class EmojiFragment(QtGui.QScrollArea):
def __init__(self, emoji_hook):
QtGui.QScrollArea.__init__(self)
self.widget = QtGui.QWidget()
self.setWidget(self.widget)
self.setWidgetResizable(True)
self.setStyleSheet('''QScrollArea{border: none;}''')
self.widget.setStyleSheet('''QWidget{background: #fff;}''')
self.emoji = {'people':[], 'food':[], 'nature':[], 'things':[]}
self.populate()
self.emoji_hook = emoji_hook
def populate(self):
for item in self.emoji:
path = os.path.join('resources', 'emoji', 'lists', '{}.txt'.format(item))
with open(path, 'r') as handle:
self.emoji[item] = [line.strip() for line in handle]
def get_rows(self, columns, length):
rows = length/columns
if length % columns > 0: rows += 1
return rows
def ret_code(self, layout, pointer):
which = self.sender()
index = layout.indexOf(which)
fname = self.emoji[pointer][index]
self.emoji_hook(fname, pointer)
def load_emoji(self, pointer, cols):
counter = 0
length = len(self.emoji[pointer])
pith = os.path.join('resources', 'emoji', pointer)
layout = QtGui.QGridLayout(self.widget)
rows = self.get_rows(cols, length)
for row in range(rows):
for column in range(cols):
path = os.path.join(pith, self.emoji[pointer][counter])
pixmap = QtGui.QPixmap(path)
label = EmojiLabel(self)
label.setPixmap(pixmap)
label.setStyleSheet('''QLabel::hover{background: #dcdcdc;}''')
self.connect(label, QtCore.SIGNAL('clicked()'), lambda: self.ret_code(layout, pointer))
layout.addWidget(label, row, column)
if counter == length - 1: break
else: counter += 1
class EmojiWindow(QtGui.QTabWidget):
def __init__(self, receiver):
QtGui.QTabWidget.__init__(self)
self.setIconSize(QtCore.QSize(32, 32))
self.setStyleSheet('''QTabWidget::tab-bar{alignment: center;}QTabWidget::pane{border: none;}''')
self.load_emoji(receiver)
self.setup_tabs()
def load_emoji(self, receiver):
self.pfrag = EmojiFragment(receiver)
self.pfrag.load_emoji('people', 10)
self.ffrag = EmojiFragment(receiver)
self.ffrag.load_emoji('food', 10)
self.nfrag = EmojiFragment(receiver)
self.nfrag.load_emoji('nature', 10)
self.tfrag = EmojiFragment(receiver)
self.tfrag.load_emoji('things', 10)
def setup_tabs(self):
path = os.path.join('resources', 'emoji', 'icons')
self.addTab(self.pfrag, QtGui.QIcon(os.path.join(path, 'people.svg')), '')
self.addTab(self.ffrag, QtGui.QIcon(os.path.join(path, 'food.svg')), '')
self.addTab(self.nfrag, QtGui.QIcon(os.path.join(path, 'nature.svg')), '')
self.addTab(self.tfrag, QtGui.QIcon(os.path.join(path, 'things.svg')), '')
|
[
"noreply@github.com"
] |
ANSH3LL.noreply@github.com
|
8c5d004e1a7e6632d8a036443c0040386507fabf
|
f2d189d9321f88d3f6104c81109dcebb2087578d
|
/wombot_teleop/src/joy_teleop.py
|
09372c54cb03b7acbf12c44402ccc046d59e358d
|
[] |
no_license
|
merlinwu/wombot
|
c7ec39a28cf745d1b9dd2edbe7982b2a27bd81ff
|
215a8b57e2b3483c89f625dbe8f7b64f7508ad11
|
refs/heads/master
| 2021-01-01T04:01:16.058403
| 2012-02-18T08:30:46
| 2012-02-18T08:30:46
| 56,123,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,653
|
py
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('wombot_teleop')
import rospy
from art_msgs.msg import SteeringCommand
from art_msgs.msg import ThrottleCommand
from art_msgs.msg import BrakeCommand
from sensor_msgs.msg import Joy
from wombot_msgs.msg import WombotVehicle
from wombot_msgs.msg import DrivetrainCommand
class JoyTeleop(object):
def __init__(self):
rospy.init_node('joy_teleop')
rospy.Subscriber("joy",Joy, self._JoyCallback)
self.steering_publisher = rospy.Publisher("steering/cmd",SteeringCommand)
self.throttle_publisher = rospy.Publisher("throttle/cmd",ThrottleCommand)
self.drivetrain_publisher = rospy.Publisher("drivetrain/cmd",DrivetrainCommand)
self.brake_publisher = rospy.Publisher("brake/cmd",BrakeCommand)
rospy.loginfo("Starting")
self.steering_axis = 0
self.throttle_axis = 1
self.brake_axis = 2
self.rear_diff_lock_button = 2
self.front_diff_lock_button = 3
self.gear_shift_button = 0
self._deadman_button = 5
self.last_angle = 0.0;
self.last_throttle_position = 1.0;
self.last_brake_position = 1.0;
self._current_gear = DrivetrainCommand.LowRange
self._fdiff_state = DrivetrainCommand.Unlock
self._rdiff_state = DrivetrainCommand.Unlock
self._last_gear_button_state = 0;
self._last_fdiff_button_state = 0;
self._last_rdiff_button_state = 0;
# clamp value to within given values
def _Clamp(self,value,minimum,maximum):
return min(maximum,max(minimum,value))
def _JoyCallback(self,joyMessage):
# Implement a dead-man button. (right shoulder button, if not pressed, cancel
if joyMessage.buttons[self._deadman_button] == 0:
return
# Steering
angle = WombotVehicle.max_steer_degrees * joyMessage.axes[self.steering_axis]
if angle != self.last_angle:
rospy.loginfo("Publishing angle: "+str(angle))
cmd = SteeringCommand()
cmd.request = SteeringCommand.Degrees
cmd.angle = -angle
self.steering_publisher.publish(cmd)
self.last_angle = angle
#Throttle
# The signal from the Xbox goes from 1 being released to -1 being fully depressed
# We need to adjust the range to 0.0-1.0 and invert to get the throttle position
# of 0 = Off and 1 = Full
#position = 1-((joyMessage.axes[self.throttle_axis] + 1.0)/2.0) # for a trigger
position = joyMessage.axes[self.throttle_axis]
if position >= -0.1 and position <= 0.1:
position = 0.0
if position != self.last_throttle_position:
rospy.loginfo("Publishing throttle position: "+str(position))
cmd = ThrottleCommand()
cmd.request = ThrottleCommand.Absolute
cmd.position = position
self.throttle_publisher.publish(cmd)
self.last_throttle_position = position
# Brake
# because the brake is simply running the motor in reverse (tick values lower than
# 1340, to treat this like a 'normal' brake will require active mixing with the current
# throttle point, which will be done in the future.
position = 1-((joyMessage.axes[self.brake_axis] + 1.0)/2.0)
if position != self.last_brake_position:
rospy.loginfo("Publishing brake position: "+str(position))
cmd = BrakeCommand()
cmd.request = BrakeCommand.Absolute
cmd.position = position
self.brake_publisher.publish(cmd)
self.last_brake_position = position
drivetrain_cmd = DrivetrainCommand()
drivetrain_cmd.gear = DrivetrainCommand.NoChange
drivetrain_cmd.front_diff = DrivetrainCommand.NoChange
drivetrain_cmd.rear_diff = DrivetrainCommand.NoChange
# Gear
if (joyMessage.buttons[self.gear_shift_button] == 1) and (self._last_gear_button_state == 0):
drivetrain_cmd.gear = (DrivetrainCommand.LowRange,DrivetrainCommand.HighRange)[self._current_gear == DrivetrainCommand.LowRange]
self._current_gear = drivetrain_cmd.gear
self._last_gear_button_state = joyMessage.buttons[self.gear_shift_button]
# Rear Diff
if (joyMessage.buttons[self.rear_diff_lock_button] == 1) and (self._last_rdiff_button_state == 0):
drivetrain_cmd.rear_diff = (DrivetrainCommand.Unlock,DrivetrainCommand.Lock)[self._rdiff_state == DrivetrainCommand.Unlock]
self._rdiff_state = drivetrain_cmd.rear_diff
self._last_rdiff_button_state = joyMessage.buttons[self.rear_diff_lock_button]
# Front Diff
if (joyMessage.buttons[self.front_diff_lock_button] == 1) and (self._last_fdiff_button_state == 0):
drivetrain_cmd.front_diff = (DrivetrainCommand.Unlock,DrivetrainCommand.Lock)[self._fdiff_state == DrivetrainCommand.Unlock]
self._fdiff_state = drivetrain_cmd.front_diff
self._last_fdiff_button_state = joyMessage.buttons[self.front_diff_lock_button]
if drivetrain_cmd.gear + drivetrain_cmd.rear_diff + drivetrain_cmd.front_diff > 0:
rospy.loginfo("Drivetrain Command:- gear: "+str(drivetrain_cmd.gear)+", rdiff: "+str(drivetrain_cmd.rear_diff)+", fdiff: "+str(drivetrain_cmd.front_diff))
self.drivetrain_publisher.publish(drivetrain_cmd)
#Init and run
if __name__ == '__main__':
joy_teleop = JoyTeleop()
rospy.spin()
|
[
"tim@scott.id.au"
] |
tim@scott.id.au
|
93f85d2a622e280d631d659e7b62ab562683d6e3
|
99297ea97c84625d5cbe28ea2c862c2944f3f781
|
/abstract_dump.py
|
0ce6245b2930f9192f46462a74f2347bb2ad378a
|
[
"Apache-2.0"
] |
permissive
|
subho007/apk_api_key_extractor
|
f7ad4fe8780586661f65526d871b1653651e1425
|
cbc2a8b02a5758886ac6aa08ed4b1b193e2b02aa
|
refs/heads/master
| 2020-12-22T20:23:46.901996
| 2019-09-02T09:19:04
| 2019-09-02T09:19:04
| 236,922,364
| 1
| 0
|
Apache-2.0
| 2020-01-29T06:57:54
| 2020-01-29T06:57:53
| null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from abc import ABC, abstractmethod
class AbstractDump(ABC):
@abstractmethod
def dump_apikeys(self, entries, package, version_code, version_name):
pass
@abstractmethod
def dump_strings(self, entries):
pass
|
[
"didiego.alessandro@gmail.com"
] |
didiego.alessandro@gmail.com
|
1ed9797eae96b6c837f11417f40e151d6b5c53e6
|
e717d3bc0d6c70f27f81d83d1adf43e71fd2b8bb
|
/molecule/default/tests/test_default.py
|
3ec76a5d8cb681195e7076edc03ece52da653cb3
|
[
"MIT"
] |
permissive
|
gabops/ansible-role-packages-os
|
85b178c645a6a497bf875ec1f21963335f99a599
|
72220dd4c9810f3971cdc23b0008a9df529a5478
|
refs/heads/master
| 2023-02-26T15:08:28.368881
| 2020-01-24T19:33:02
| 2020-01-24T19:33:02
| 205,724,504
| 4
| 0
|
MIT
| 2023-02-07T22:59:04
| 2019-09-01T19:45:22
|
Python
|
UTF-8
|
Python
| false
| false
| 216
|
py
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_pkg_installed(host):
pass
|
[
"gabriel.suarez@solarwinds.com"
] |
gabriel.suarez@solarwinds.com
|
0088c054c40c1b257edbf5330063ca493ed72165
|
2eb86b328bfa24a826a1ced7b6e58db234b0f044
|
/algoritm_w2/p2.py
|
f8fc28d572a92370072cd95491dde7255b938c76
|
[] |
no_license
|
stkang9409/algorithm
|
ec965864f6226d5d82d0ef4d5d2e9264171ba5ca
|
2660f015ea3f9bfb61fa43238696f767a7d1a909
|
refs/heads/main
| 2023-02-28T18:08:02.797369
| 2021-02-08T02:08:24
| 2021-02-08T02:08:24
| 324,918,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
import sys
input = sys.stdin.readline
tree_num, need = map(int, input().split())
trees = list(map(int, input().split()))
def get_tree_amount(h):
amount = 0
for tree in trees:
amount += tree - h if tree - h > 0 else 0
return amount
def bin_search(need):
low = 0
high = max(trees)
result = 0
while low <= high:
middle = (low + high)//2
if get_tree_amount(middle) >= need:
result = middle
low = middle + 1
else:
high = middle - 1
return result
print(bin_search(need))
|
[
"stkang9409@gmail.com"
] |
stkang9409@gmail.com
|
44ec1bf6c464c84cdeb3ebabf286821591ab9118
|
c0f3714db335601d94172362b872142ab4cf14ed
|
/pos_receipt_invoice_number/__openerp__.py
|
18ebd32d45d9738a505a09f7a0dcb4504d4ebc2d
|
[] |
no_license
|
osmincano/number_invoice
|
6f0843fbd90332a4ce6d421b492cadcb879d04f3
|
28010945d57e0eb900beb9f6b089bd4467da252e
|
refs/heads/master
| 2020-05-09T14:55:15.457867
| 2019-04-16T06:41:32
| 2019-04-16T06:41:32
| 181,214,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
# -*- coding: utf-8 -*-
{
'name': "POS Receipt Show Invoice Number",
'version': '8.0.1.0.1',
'category': 'Point of Sale',
'author': ['TL Technology'],
'sequence': 0,
'depends': [
'point_of_sale'
],
'data': [
'template/import_library.xml',
'views/pos_config.xml',
],
'qweb': [
'static/src/xml/*.xml'
],
"external_dependencies": {
"python": [],
"bin": []
},
'images': ['static/description/icon.png'],
'installable': True,
'application': True,
}
|
[
"root@ubuntu.(none)"
] |
root@ubuntu.(none)
|
5c0be6abcd37ca54a235856fd740d48fbdaf07e5
|
d6e2a4c5a45762abe4cda59695d4c50da557dd32
|
/haming_fast.py
|
7cf2b5cefc4d477502065ece9db32a69111d70da
|
[] |
no_license
|
miskiewiczm/primers
|
388f9f9967c168b8673dc89ec57c785a74683c82
|
5985ff0eef9515fb1752e25acc2d82eaeda4d5de
|
refs/heads/main
| 2023-02-24T19:40:57.068003
| 2020-11-06T16:19:49
| 2020-11-06T16:19:49
| 307,627,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
primer1 = "AACCTTAAGGCCTTGGAA"
primer2 = "CCTTGGTTGGAAAATTGG"
dna_to_bin = {"A": "00", "C": "01", "T": "11", "G": "10"}
dna_B1 = ''.join(dna_to_bin[i] for i in primer1)
dna_B2 = ''.join(dna_to_bin[i] for i in primer2)
print(dna_B1)
print(dna_B2)
a = "".join(str(int(i == j)) for i, j in zip(dna_B1, dna_B2))
print(a.count("1"))
|
[
"noreply@github.com"
] |
miskiewiczm.noreply@github.com
|
ebf4d433e4477a4936756e073428420c5428f87c
|
3e50ed55208122b2f8b34e7f26f33c9ef70efce5
|
/python/requests_test/m1.py
|
d6f7f6b65b1ce61cd5e423cbf4700572f2ed54ae
|
[] |
no_license
|
brady-wang/mac_home
|
b8343da428a4e6696b89d0e6a53ff0dfc87ffd21
|
c56a739c31d3c0f62d26d8512fe1a90c036a1f96
|
refs/heads/master
| 2023-01-14T11:42:02.544322
| 2019-10-02T11:47:27
| 2019-10-02T11:47:27
| 193,177,718
| 0
| 0
| null | 2023-01-04T13:55:31
| 2019-06-22T01:27:10
|
PHP
|
UTF-8
|
Python
| false
| false
| 1,145
|
py
|
# *_*coding:utf-8 *_*
# 网易云音乐批量下载
import requests
import urllib
# 榜单歌曲批量下载
# r = requests.get('http://music.163.com/api/playlist/detail?id=2884035') # 网易原创歌曲榜
# r = requests.get('http://music.163.com/api/playlist/detail?id=19723756') # 云音乐飙升榜
# r = requests.get('http://music.163.com/api/playlist/detail?id=3778678') # 云音乐热歌榜
r = requests.get('http://music.163.com/api/playlist/detail?id=3779629') # 云音乐新歌榜
# 歌单歌曲批量下载
# r = requests.get('http://music.163.com/api/playlist/detail?id=123415635') # 云音乐歌单——【华语】中国风的韵律,中国人的印记
# r = requests.get('http://music.163.com/api/playlist/detail?id=122732380') # 云音乐歌单——那不是爱,只是寂寞说的谎
arr = r.json()['result']['tracks'] # 共有100首歌
for i in range(10): # 输入要下载音乐的数量,1到100。
name = str(i+1) + ' ' + arr[i]['name'] + '.mp3'
link = arr[i]['mp3Url']
urllib.request.urlretrieve(link, '网易云音乐\\' + name) # 提前要创建文件夹
print(name + ' 下载完成')
|
[
"brady.wang@qq.com"
] |
brady.wang@qq.com
|
df9c3f26577d821ab512476e94dfcb5d9f813e80
|
6af5bfa49a4fa2a4a1cdc094fa55a5ac38f4baff
|
/homework/mp2-code/instances.py
|
542a47ee9beeeb06e504808bb6eeb3b4cd5cee01
|
[] |
no_license
|
milkshakeiii/simple_python_projects
|
8d89493df14884c739b77d1ca1d39bc5b1f0f8da
|
aeffff25d3d0e9210a2ca93149cceded9eec796d
|
refs/heads/master
| 2023-04-14T02:25:03.897211
| 2023-04-05T17:23:03
| 2023-04-05T17:23:03
| 69,521,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,691
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
uninos = [np.array([[i]]) for i in range(1,61)]
dominos = [np.array([[i],[i]]) for i in range(1,31)]
triominos = [np.array([[i,0],[i,i]]) for i in range(1,21)]
petnominos = [np.array([[0,1,1],
[1,1,0],
[0,1,0]]),
np.array([[2],
[2],
[2],
[2],
[2]]),
np.array([[3,0],
[3,0],
[3,0],
[3,3]]),
np.array([[0,4],
[0,4],
[4,4],
[4,0]]),
np.array([[5,5],
[5,5],
[5,0]]),
np.array([[6,6,6],
[0,6,0],
[0,6,0]]),
np.array([[7,0,7],
[7,7,7]]),
np.array([[8,0,0],
[8,0,0],
[8,8,8]]),
np.array([[9,0,0],
[9,9,0],
[0,9,9]]),
np.array([[0,10,0],
[10,10,10],
[0,10,0]]),
np.array([[0,11],
[11,11],
[0,11],
[0,11]]),
np.array([[12,12,0],
[0,12,0],
[0,12,12]])]
board_6x10 = np.ones((6,10))
board_5x12 = np.ones((5,12))
board_3x20 = np.ones((3,20))
empty_chessboard = np.ones((8,8))
empty_chessboard[3][3] = empty_chessboard[3][4] = empty_chessboard[4][3] = empty_chessboard[4][4] = 0
empty_chessboard2 = np.ones((8,8))
empty_chessboard2[0][0] = empty_chessboard2[7][7] = empty_chessboard2[7][6] = empty_chessboard2[1][0] = 0
|
[
"milkshakeiii@gmail.com"
] |
milkshakeiii@gmail.com
|
30b6d0f6844219533ac55b79bf10342cfab2d660
|
921bc873efb33d32df249732ba77e32995b707a1
|
/poems.py
|
d2c4bf3ef36e785e2b7ae6989e10cfff43f76517
|
[] |
no_license
|
AtharvaTawde/larvabot
|
0df52bad4ae58390bf3701feff691fa8706ea3aa
|
ccf164f1fe42626ea491d691df14d66f7dfc5b69
|
refs/heads/master
| 2023-04-22T15:44:46.917354
| 2021-05-08T19:13:26
| 2021-05-08T19:13:26
| 363,477,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,880
|
py
|
import random
poems = {
"Moon and Sun":
"Oh, those days when the sun and moon were alike\nBut now the moon envies the sun for it is so bright\nThe sun envies the cool dark night",
"Lava":
"I was in lava\nMy body was roasting fast\nIn the orange lake",
"Ain":
"I am Ain\nI am pain\nI like plane\nI have no brain\nI am a big stain\nI have no brain\nI like plane\nI am pain\nI am Ain",
"What is that German building?????":
"Storm ze Reichstag\nPut Soviet phlag\nDrink some Molotov too!",
"Lavender":
"The purple flower\nSmells of just like morning dew\nOnly to wilt soon",
"Pizza":
"I love pizza\nwhen the crust is just\nbrown and crisp\nand the cheese is\na melty gold\nand the mushrooms are charred well\nI would not miss out\non the creamy, rich\ntomato sauce\nand the toppings make\nthe pizza a pizza",
"Autumn":
"It is autumn\nThe leaves have turned\nto a beautiful gold\nwhile some\nstubbornly stay green,\nor some change to red\norange and then brown\nclinging to their tree\nsoon the trees will bear\nno fruit or leaves\nbut I enjoy\nthe sunset view\nby the lake.",
"Zombies":
"I was\nRunning\nAway\nFrom\nThe rotting green\nZombies\nChasing me\nBegging for my\nBrain",
"Formula 1":
"The cars sped\nDown\nThe track\nFlashes of bright\nOrange, green, and blue\nWith\nEngine roars\nPast the\nFinish line.",
"Village":
"The people dressed in simple clothes\nWalked to the house\nMade of simple straw\nWith pitchforks in hand\n\nThey charged at the door\nAnd broke the measly\nwooden structure\nThey walked inside\nAnd searched the house of its goods\n\nAnd when the owner arrived\nHe stood there in shock\nSeeing the house intact\nAll but the door"
}
def output(message):
title, poem = random.choice(list(poems.items()))
return f'''```\nPresenting "{title}" by Aaryan Tawde\n{poem}```'''
|
[
"atharvatawde894@gmail.com"
] |
atharvatawde894@gmail.com
|
41c6c69e9b8b7c35436dc0fb748fd21273bc0b83
|
9d388f338ccb52a818c345a6ccdc87775eb38c52
|
/src/cobwebs/cobwebs/exceptions.py
|
33797591c70f8b397e19e43ee8180df55a26355e
|
[
"Apache-2.0"
] |
permissive
|
asteroide/immo_spider
|
3750887d62f876ce35a70b9172caf4fd935a1487
|
864828c389173f6d6417392983bc8d39b5fd4ea2
|
refs/heads/master
| 2023-01-11T02:13:47.460448
| 2016-09-21T12:55:47
| 2016-09-21T12:55:47
| 56,408,229
| 0
| 0
|
Apache-2.0
| 2022-12-24T02:34:04
| 2016-04-16T22:22:37
|
Python
|
UTF-8
|
Python
| false
| false
| 622
|
py
|
class NotImplementedException(BaseException):
message_format = "This function is not implemented."
code = 500
title = 'Not implemented'
class UnknownCommandError(BaseException):
message_format = "This command is not authorized."
code = 500
title = 'Unknown Command'
class ConfigFileException(BaseException):
message_format = "Cannot find a usable configuration file."
code = 500
title = 'Config File Error'
class AuthException(BaseException):
message_format = "There is an error with the username or the password supplied..."
code = 401
title = 'Authentication Error'
|
[
"dt_github@domtombox.net"
] |
dt_github@domtombox.net
|
8dadd4adfb54d496723bb512cdb47c1749c4242a
|
99eba20f2b0a547acb106ab4bdd94a7df330ff6b
|
/Utils.py
|
7015c4b1152c05bb235ee4a8ddbd7ddadc1a8b5f
|
[] |
no_license
|
apitsaer/Deep-Artwork-Analysis
|
7e7d7836a4069dea674e99a6968717b8a3046bc4
|
b8a6056e43e3de8100eaefaeea72f87bb6fec5ae
|
refs/heads/master
| 2020-05-02T08:21:08.944920
| 2019-05-25T23:54:35
| 2019-05-25T23:54:35
| 177,840,557
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,409
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 3 16:11:22 2019
@author: admin
"""
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
def getAllTypeMat(AWTableTOP):
all_type = set()
all_mat = set()
for row in AWTableTOP.itertuples():
text_mat = re.split(";", row.Material_all)
for mat in text_mat:
if not mat == '':
all_mat.add(mat)
text_type = re.split(";", row.Type_all)
for typ in text_type:
if not typ == '':
all_type.add(typ)
return (all_type, all_mat)
def smooth_curve(points, factor=0.8):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
def makePlots(DIR, MODEL):
INPUT_FILE = DIR + MODEL + '.log'
OUTPUT_FILE = DIR + MODEL + '_train_valid_metrics.png'
OUTPUT_FILE_L1 = DIR + MODEL + '_train_valid_loss.png'
OUTPUT_FILE_LS = DIR + MODEL + '_train_valid_losses_split.png'
train_logs = pd.read_csv(INPUT_FILE, keep_default_na=False)
plt.figure(0)
fig, axs = plt.subplots(2, 2, constrained_layout=True)
axs[0, 0].plot(train_logs.epoch, smooth_curve(train_logs.artist_weighted_categorical_accuracy), 'b', label='Train')
axs[0, 0].plot(train_logs.epoch, smooth_curve(train_logs.val_artist_weighted_categorical_accuracy), 'r', label='Val')
axs[0, 0].set_title('Artist')
axs[0, 0].set_ylabel('Avg Class Acc')
axs[0, 0].legend()
axs[0, 1].plot(train_logs.epoch, smooth_curve(train_logs.year_mean_absolute_error), 'b', label='Train')
axs[0, 1].plot(train_logs.epoch, smooth_curve(train_logs.val_year_mean_absolute_error), 'r', label='Val')
axs[0, 1].set_title('Year')
axs[0, 1].set_ylabel('MAE')
axs[0, 1].legend()
axs[1, 0].plot(train_logs.epoch, smooth_curve(train_logs.type_precision), 'b', label='Train')
axs[1, 0].plot(train_logs.epoch, smooth_curve(train_logs.val_type_precision), 'r', label='Val')
axs[1, 0].set_title('Type')
axs[1, 0].set_ylabel('Precision')
axs[1, 0].legend()
axs[1, 1].plot(train_logs.epoch, smooth_curve(train_logs.mat_precision), 'b', label='Train')
axs[1, 1].plot(train_logs.epoch, smooth_curve(train_logs.val_mat_precision), 'r', label='Val')
axs[1, 1].set_title('Material')
axs[1, 1].set_ylabel('Precision')
axs[1, 1].legend()
#fig.tight_layout()
for ax in axs.flat:
ax.set(xlabel='Epoch')
## Hide x labels and tick labels for top plots
#for ax in axs.flat:
# ax.label_outer()
fig.suptitle('Train & Val Metrics')
#plt.subplots_adjust(top=1.2, bottom=0.2, left=0.10, right=0.95, hspace=0.45, wspace=0.35)
plt.savefig(OUTPUT_FILE)
plt.show()
plt.figure(1)
fig, axs = plt.subplots(2, 2, constrained_layout=True)
axs[0, 0].plot(train_logs.epoch, smooth_curve(train_logs.artist_loss), 'b', label='Train')
axs[0, 0].plot(train_logs.epoch, smooth_curve(train_logs.val_artist_loss), 'r', label='Val')
axs[0, 0].set_title('Artist')
axs[0, 0].set_ylabel('Cat x entropy')
axs[0, 0].legend()
axs[0, 1].plot(train_logs.epoch, smooth_curve(train_logs.year_loss), 'b', label='Train')
axs[0, 1].plot(train_logs.epoch, smooth_curve(train_logs.val_year_loss), 'r', label='Val')
axs[0, 1].set_title('Year')
axs[0, 1].set_ylabel('MAE')
axs[0, 1].legend()
axs[1, 0].plot(train_logs.epoch, smooth_curve(train_logs.type_loss), 'b', label='Train')
axs[1, 0].plot(train_logs.epoch, smooth_curve(train_logs.val_type_loss), 'r', label='Val')
axs[1, 0].set_title('Type')
axs[1, 0].set_ylabel('Bin x entropy')
axs[1, 0].legend()
axs[1, 1].plot(train_logs.epoch, smooth_curve(train_logs.mat_loss), 'b', label='Train')
axs[1, 1].plot(train_logs.epoch, smooth_curve(train_logs.val_mat_loss), 'r', label='Val')
axs[1, 1].set_title('Material')
axs[1, 1].set_ylabel('Bin x entropy')
axs[1, 1].legend()
#fig.tight_layout()
for ax in axs.flat:
ax.set(xlabel='Epoch')
## Hide x labels and tick labels for top plots
#for ax in axs.flat:
# ax.label_outer()
#plt.subplots_adjust(top=1.2, bottom=0.2, left=0.10, right=0.95, hspace=0.65, wspace=0.55)
fig.suptitle('Train & Val Losses')
plt.savefig(OUTPUT_FILE_LS)
plt.show()
plt.figure(2)
plt.plot(train_logs.epoch, smooth_curve(train_logs.loss), 'b', label='Train')
plt.plot(train_logs.epoch, smooth_curve(train_logs.val_loss), 'r', label='Val')
plt.title('Total Loss')
plt.ylabel('Multi-task sum weighted loss')
plt.xlabel('Epoch')
plt.legend()
plt.savefig(OUTPUT_FILE_L1)
# get some statistics on the original image size in order to determine acceptable resizing
def get_IMG_size_statistics(DIR):
heights = []
widths = []
directory = DIR + 'original'
for img in os.listdir(directory):
if(img.endswith('jpg')):
path = os.path.join(directory, img)
data = image.load_img(path)
data = image.img_to_array(data)
heights.append(data.shape[0])
widths.append(data.shape[1])
print(img)
avg_height = sum(heights) / len(heights)
avg_width = sum(widths) / len(widths)
print("Average Height: " + str(avg_height))
print("Max Height: " + str(max(heights)))
print("Min Height: " + str(min(heights)))
print('\n')
print("Average Width: " + str(avg_width))
print("Max Width: " + str(max(widths)))
print("Min Width: " + str(min(widths)))
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plot_confusion_matrix(y_test, y_pred, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plot_confusion_matrix(y_test, y_pred, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
#makePlots('/Users/admin/Documents/AWImpl/logs/', 'RESNET')
|
[
"apitsaer@gmail.com"
] |
apitsaer@gmail.com
|
9ddc24cc3be2b389b945332eb5b4ae28a086dae6
|
3374acf28d40e502b00680aacbcab00e111fa437
|
/cspp1-practice/m7/Functions - Square Exercise/square.py
|
55828979709d62de40a4c16a757b689d589473f7
|
[] |
no_license
|
saisrinivasreddypatlolla/6019_CSPP1
|
db2bd0ddb63b6a332b029340bf73f31017616d93
|
e4092709128bcae778a4ee2e6c765b919762a2e5
|
refs/heads/master
| 2020-03-24T20:36:55.394170
| 2018-08-25T07:27:08
| 2018-08-25T07:27:08
| 142,987,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
# Exercise: square
# Write a Python function, square, that takes in one number and returns the square of that number.
# This function takes in one number and returns one number.
def square(x):
'''
x: int or float.
'''
return x*x
# Correct
def main():
data = input()
data = float(data)
temp = str(data).split('.')
if(temp[1] == '0'):
print(square(int(float(str(data)))))
else:
print(square(data))
if __name__== "__main__":
main()
|
[
"nivasreddy08@msitprogram.net"
] |
nivasreddy08@msitprogram.net
|
839a175c960224f1cd6c4f26e5f954b2b481bc55
|
337562baac02e9ef39acf48c0cb0d0e612f42372
|
/scripts/go.py
|
0ca910891f0ec0683de6ec4964c50e36e84c55a1
|
[] |
no_license
|
y-shiigi/task_editor
|
9d7a1ae3acd27faf0c00bf3ca582ae1d47874bd9
|
95f9e6f0e2d4812fe88bd38216d686499fa98392
|
refs/heads/master
| 2021-07-12T18:09:25.106358
| 2020-07-06T07:18:20
| 2020-07-06T07:18:20
| 174,313,759
| 0
| 0
| null | 2020-07-06T07:18:21
| 2019-03-07T09:30:14
|
Python
|
UTF-8
|
Python
| false
| false
| 5,154
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import time
import rospy
##-- for navigation
import tf
import actionlib
from actionlib_msgs.msg import *
from geometry_msgs.msg import *
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import yaml
import tf
import math
##-- for find pkg
import rospkg
###########################################
## @brief ナビゲーション関連のクラス
class NaviAction:
## @brief コンストラクタ。waypointsの読込とmove_baseのアクションクライアントの定義
def __init__(self):
rospack = rospkg.RosPack()
rospack.list()
path = rospack.get_path('task_editor')
## @brief 読み込まれたwaypointsのデータ
self.config = yaml.load(file(path + "/config/waypoints.yaml"))
rospy.on_shutdown(self.shutdown)
## @brief /move_baseアクションクライアント
self.ac = actionlib.SimpleActionClient('move_base', MoveBaseAction)
while not self.ac.wait_for_server(rospy.Duration(5)):
rospy.loginfo("Waiting for the move_base action server to come up")
rospy.loginfo("The server comes up");
## @brief MoveBaseGoal型のゴール
self.goal = MoveBaseGoal()
self.vel_pub_ = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self.vel_ = Twist()
## @brief ゴールポジションの設定と移動の開始
# @param _number waypointsの番号(0以上の数値)
# @return ゴールに到着したか否か(succeeded or aborted)
def set_goal(self,_number):
rospy.on_shutdown(self.shutdown)
rev = dict(self.config[_number]) #List to Dictionary
self.goal.target_pose.header.frame_id = 'map'
self.goal.target_pose.header.stamp = rospy.Time.now()
self.goal.target_pose.pose.position.x = rev['pose']['position']['x']
self.goal.target_pose.pose.position.y = rev['pose']['position']['y']
self.goal.target_pose.pose.position.z = rev['pose']['position']['z']
self.goal.target_pose.pose.orientation.x = rev['pose']['orientation']['x']
self.goal.target_pose.pose.orientation.y = rev['pose']['orientation']['y']
self.goal.target_pose.pose.orientation.z = rev['pose']['orientation']['z']
self.goal.target_pose.pose.orientation.w = rev['pose']['orientation']['w']
rospy.loginfo('Sending goal')
self.ac.send_goal(self.goal)
succeeded = self.ac.wait_for_result(rospy.Duration(60));
state = self.ac.get_state();
if succeeded:
rospy.loginfo("Succeed")
return 'succeeded'
else:
rospy.loginfo("Failed")
return 'aborted'
## @brief 経由地点の設定と移動の開始
# @param _number waypointsの番号(0以上の数値)
# @return ゴール付近に到着したか否か(succeeded or aborted)
def set_via_point(self,_number):
rospy.on_shutdown(self.shutdown)
rev = dict(self.config[_number]) #List to Dictionary
self.goal.target_pose.header.frame_id = 'map'
self.goal.target_pose.header.stamp = rospy.Time.now()
self.goal.target_pose.pose.position.x = rev['pose']['position']['x']
self.goal.target_pose.pose.position.y = rev['pose']['position']['y']
self.goal.target_pose.pose.position.z = rev['pose']['position']['z']
self.goal.target_pose.pose.orientation.x = rev['pose']['orientation']['x']
self.goal.target_pose.pose.orientation.y = rev['pose']['orientation']['y']
self.goal.target_pose.pose.orientation.z = rev['pose']['orientation']['z']
self.goal.target_pose.pose.orientation.w = rev['pose']['orientation']['w']
rospy.loginfo('Sending goal')
self.ac.send_goal(self.goal)
listener = tf.TransformListener()
timeout = time.time() + 10 #[sec]
while True:
try:
(position, quaternion) = listener.lookupTransform('map', 'base_link', rospy.Time(0) )
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
#ゴールから0.5[m]以内なら、succeededを返す
if(math.sqrt((position[0]-self.goal.target_pose.pose.position.x)**2
+ (position[1]-self.goal.target_pose.pose.position.y)**2 ) <= 0.5):
rospy.loginfo("Succeed")
return 'succeeded'
elif (time.time() > timeout):
rospy.loginfo("Timeout")
return 'aborted'
if(len(sys.argv) != 2): waypoint = 0
else:
rospy.sleep(0.5)
def set_velocity(self,_x,_y,_theta,_time):
self.vel_.linear.x = _x
self.vel_.linear.y = _y
self.vel_.angular.z = _theta
end_time = time.time() + _time #[sec]
while(time.time() < end_time):
self.vel_pub_.publish(self.vel_)
self.vel_.linear.x = 0
self.vel_.linear.y = 0
self.vel_.angular.z = 0
self.vel_pub_.publish(self.vel_)
return 'succeeded'
def cancel(self):
self.ac.cancel_all_goals()
## @brief move_baseの終了
def shutdown(self):
self.ac.cancel_goal()
#==================================
#==================================
if __name__ == '__main__':
rospy.init_node('got_to_waypoint_server')
na = NaviAction()
if(len(sys.argv) != 2): na.cancel()
else:
waypoint = int(sys.argv[1])
na.set_goal(int(waypoint))
|
[
"y.shiigi@thk.co.jp"
] |
y.shiigi@thk.co.jp
|
c09a11b8e00247f2e94a1ba4ec0526088e30eb49
|
1f99ddccebb5edfd133d5579c1889425dc966a49
|
/grpc_interceptor_headers/header_manipulator_client_interceptor.py
|
39404a92a44d7e0d2cb677a64132a3fec9fdd178
|
[
"MIT"
] |
permissive
|
sempr/grpc-interceptor-headers
|
6452185e543e8bc6c499462c8017c372aed1e441
|
5ff36ccab2fc6f776db5c1fbbeb78538933bff9a
|
refs/heads/master
| 2022-10-18T19:07:08.366404
| 2022-09-24T06:41:44
| 2022-09-24T06:41:44
| 155,700,022
| 0
| 0
|
MIT
| 2022-09-24T06:41:45
| 2018-11-01T10:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,584
|
py
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interceptor that adds headers to outgoing requests."""
import collections
import grpc
from . import generic_client_interceptor
class _ClientCallDetails(
collections.namedtuple(
'_ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials')),
grpc.ClientCallDetails):
pass
def header_adder_interceptor(header, value):
def intercept_call(client_call_details, request_iterator,
request_streaming, response_streaming):
metadata = []
if client_call_details.metadata is not None:
metadata = list(client_call_details.metadata)
metadata.append((
header,
value,
))
client_call_details = _ClientCallDetails(
client_call_details.method, client_call_details.timeout, metadata,
client_call_details.credentials)
return client_call_details, request_iterator, None
return generic_client_interceptor.create(intercept_call)
|
[
"iamsempr@gmail.com"
] |
iamsempr@gmail.com
|
4299b17a8916546238e85ee8fc1d9d23471f5891
|
921b0e79421e58c09c2331365c48b7fe991c8c88
|
/feeder1.py
|
f632f9afcd73e331d5e106049b6f53ea5d0a9a67
|
[] |
no_license
|
ssaisanthosh/FishFeeder
|
0fdcd035a211afcb2a7c85eb398cef86f852c197
|
0ec6598f9391a7a06344ed0908c8b04c00dc105c
|
refs/heads/main
| 2023-01-21T10:31:38.526355
| 2020-12-06T20:51:14
| 2020-12-06T20:51:14
| 319,128,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,638
|
py
|
#!/usr/local/bin/python
import RPi.GPIO as GPIO
import time
#control = [5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10]
control = [5,15]
servo = 17
#GPIO.setmode(GPIO.BOARD)
GPIO.setmode(GPIO.BCM)
GPIO.setup(servo,GPIO.OUT)
# in servo motor,
# 1ms pulse for 0 degree (LEFT)
# 1.5ms pulse for 90 degree (MIDDLE)
# 2ms pulse for 180 degree (RIGHT)
# so for 50hz, one frequency is 20ms
# duty cycle for 0 degree = (1/20)*100 = 5%
# duty cycle for 90 degree = (1.5/20)*100 = 7.5%
# duty cycle for 180 degree = (2/20)*100 = 10%
p=GPIO.PWM(servo,50)# 50hz frequency
p.start(0)# starting duty cycle ( it set the servo to 0 degree )
#try:
# while True:
# for x in range(11):
time.sleep(0.3);
p.ChangeDutyCycle(15);
time.sleep(0.3);
# p.stop();
#p.start(7.5);
p.ChangeDutyCycle(4);
time.sleep(0.3);
# p.stop();
# GPIO.cleanup();
# duty = float(5) / 10.0 + 2.5
# p.ChangeDutyCycle(5);
# print duty;
# time.sleep(1);
# p.stop();
# print x
#
# for x in range(9,0,-1):
# for x in range(2):
# p.ChangeDutyCycle(control[x])
# time.sleep(0.03)
# print x
# p.stop()
# p.ChangeDutyCycle(control[0])
# time.sleep(0.05)
# print x
# p.ChangeDutyCycle(control[1])
# time.sleep(0.05)
# print x
# p.ChangeDutyCycle(control[0])
# time.sleep(0.05)
# print x
# p.ChangeDutyCycle(control[1])
# time.sleep(0.1)
# p.stop()
#except KeyboardInterrupt:
GPIO.cleanup()
|
[
"me@ssaisanthosh.com"
] |
me@ssaisanthosh.com
|
6be45dd06c5be6fafefaef95a61ae6f1c20a4184
|
2f41d9b78a88a6ed4c5724eae4da2d6c58b70d55
|
/recipes/cairo/all/conanfile.py
|
1ee19505ae145d6335d0796747ff6e91a51b07e1
|
[
"MIT"
] |
permissive
|
hrantzsch/conan-center-index
|
378e951aab108d801be6f3c59a5481f443b31bab
|
057f0a81e4ab01944220478a3227c58445b184db
|
refs/heads/master
| 2023-07-06T02:22:46.043174
| 2023-07-04T13:53:53
| 2023-07-04T13:53:53
| 226,821,993
| 0
| 1
|
MIT
| 2022-01-29T16:05:40
| 2019-12-09T08:29:30
|
Python
|
UTF-8
|
Python
| false
| false
| 15,841
|
py
|
import os
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.apple import fix_apple_shared_install_name, is_apple_os
from conan.tools.build import cross_building
from conan.tools.env import VirtualBuildEnv, VirtualRunEnv
from conan.tools.files import (
apply_conandata_patches,
copy,
export_conandata_patches,
get,
replace_in_file,
rename,
rm,
rmdir
)
from conan.tools.gnu import PkgConfigDeps, Autotools, AutotoolsDeps, AutotoolsToolchain
from conan.tools.layout import basic_layout
from conan.tools.microsoft import is_msvc, unix_path
from conan.tools.scm import Version
required_conan_version = ">=1.54.0"
class CairoConan(ConanFile):
name = "cairo"
description = "Cairo is a 2D graphics library with support for multiple output devices"
topics = ("cairo", "graphics")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://cairographics.org/"
license = ("LGPL-2.1-only", "MPL-1.1")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_freetype": [True, False],
"with_fontconfig": [True, False],
"with_xlib": [True, False],
"with_xlib_xrender": [True, False],
"with_xcb": [True, False],
"with_glib": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_freetype": True,
"with_fontconfig": True,
"with_xlib": True,
"with_xlib_xrender": False,
"with_xcb": True,
"with_glib": True,
}
short_paths = True
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
del self.options.with_fontconfig
if is_msvc(self):
del self.options.with_freetype
del self.options.with_glib
if self.settings.os != "Linux":
del self.options.with_xlib
del self.options.with_xlib_xrender
del self.options.with_xcb
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.cppstd")
self.settings.rm_safe("compiler.libcxx")
def layout(self):
basic_layout(self, src_folder="src")
def requirements(self):
if self.options.get_safe("with_freetype", True):
self.requires("freetype/2.13.0")
if self.options.get_safe("with_fontconfig", False):
self.requires("fontconfig/2.13.93")
if self.settings.os == "Linux":
if self.options.with_xlib or self.options.with_xlib_xrender or self.options.with_xcb:
self.requires("xorg/system")
if self.options.get_safe("with_glib", True):
self.requires("glib/2.76.1")
self.requires("zlib/1.2.13")
self.requires("pixman/0.40.0")
self.requires("libpng/1.6.39")
def package_id(self):
if self.options.get_safe("with_glib") and not self.dependencies["glib"].options.shared:
self.info.requires["glib"].full_package_mode()
def validate(self):
if is_msvc(self):
# TODO autotools build results in LNK1127 error from a library in the WindowsSDK on CCI
# should be retested in case this is just a CCI environment issue
raise ConanInvalidConfiguration("MSVC autotools build is not supported. Use the Meson build instead.")
def build_requirements(self):
self.tool_requires("libtool/2.4.7")
if not self.conf.get("tools.gnu:pkg_config", default=False, check_type=str):
self.tool_requires("pkgconf/1.9.3")
if self._settings_build.os == "Windows":
self.win_bash = True
if not self.conf.get("tools.microsoft.bash:path", default=False, check_type=str):
self.tool_requires("msys2/cci.latest")
if is_msvc(self):
self.tool_requires("automake/1.16.5")
else:
self.tool_requires("gtk-doc-stub/cci.20181216")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def _create_toolchain(self, namespace, directory):
def is_enabled(value):
return "yes" if value else "no"
def dep_path(dependency):
return unix_path(self, self.deps_cpp_info[dependency].rootpath)
tc = AutotoolsToolchain(self, namespace=namespace)
tc.configure_args += [
f"--datarootdir={unix_path(self, os.path.join(self.package_folder, 'res'))}",
f"--enable-ft={is_enabled(self.options.get_safe('with_freetype', True))}",
f"--enable-gobject={is_enabled(self.options.get_safe('with_glib', True))}",
f"--enable-fc={is_enabled(self.options.get_safe('with_fontconfig'))}",
f"--enable-xlib={is_enabled(self.options.get_safe('with_xlib'))}",
f"--enable-xlib_xrender={is_enabled(self.options.get_safe('with_xlib_xrender'))}",
f"--enable-xcb={is_enabled(self.options.get_safe('xcb'))}",
"--disable-gtk-doc"
]
if is_msvc(self):
tc.make_args += [
"--directory", directory,
"-f", "Makefile.win32",
f"CFG={str(self.settings.build_type).lower()}",
"CAIRO_HAS_FC_FONT=0",
f"ZLIB_PATH={dep_path('zlib')}",
f"LIBPNG_PATH={dep_path('libpng')}",
f"PIXMAN_PATH={dep_path('pixman')}",
f"FREETYPE_PATH={dep_path('freetype')}",
f"GOBJECT_PATH={dep_path('glib')}"
]
tc.extra_cflags += ["-FS"]
if self.settings.compiler in ["gcc", "clang", "apple-clang"]:
tc.extra_cflags.append("-Wno-enum-conversion")
return tc
def generate(self):
VirtualBuildEnv(self).generate()
if not cross_building(self):
VirtualRunEnv(self).generate(scope="build")
tc_main = self._create_toolchain("main", unix_path(self, self.source_folder))
tc_main.generate()
if is_msvc(self):
tc_gobject = self._create_toolchain("gobject", unix_path(self, os.path.join(self.source_folder, "util", "cairo-gobject")))
tc_gobject.generate()
PkgConfigDeps(self).generate()
deps = AutotoolsDeps(self)
if is_msvc(self):
cppflags = deps.vars().get("CPPFLAGS")
deps.environment.append('CFLAGS', cppflags.replace("/I", "-I"))
ldflags = deps.vars().get("LDFLAGS")
deps.environment.define('LDFLAGS', ldflags.replace("/LIBPATH:", "-LIBPATH:"))
deps.environment.append('LDFLAGS', deps.vars().get("LIBS"))
deps.generate()
def _patch_sources(self):
apply_conandata_patches(self)
def fix_freetype_version():
replace_in_file(
self,
os.path.join(self.source_folder, "configure.ac"),
"FREETYPE_MIN_VERSION=9.7.3",
f"FREETYPE_MIN_VERSION={Version(self.dependencies['freetype'].ref.version)}"
)
def exclude_tests_and_docs_from_build():
makefile_am = os.path.join(self.source_folder, "Makefile.am")
replace_in_file(self, makefile_am, "SUBDIRS += boilerplate test perf", "")
replace_in_file(self, makefile_am, "SUBDIRS = src doc util", "SUBDIRS = src util")
fix_freetype_version()
exclude_tests_and_docs_from_build()
if self.options.get_safe("with_freetype"):
replace_in_file(self, os.path.join(self.source_folder, "src", "cairo-ft-font.c"),
"#if HAVE_UNISTD_H", "#ifdef HAVE_UNISTD_H")
if is_msvc(self):
# https://cairographics.org/end_to_end_build_for_win32/
win32_common = os.path.join(self.source_folder, "build", "Makefile.win32.common")
replace_in_file(self, win32_common, "-MD ", f"-{self.settings.compiler.runtime} ")
replace_in_file(self, win32_common, "-MDd ", f"-{self.settings.compiler.runtime} ")
replace_in_file(self, win32_common, "$(PIXMAN_PATH)/lib/pixman-1.lib",
self.deps_cpp_info["pixman"].libs[0] + ".lib")
replace_in_file(self, win32_common, "$(FREETYPE_PATH)/lib/freetype.lib",
self.deps_cpp_info["freetype"].libs[0] + ".lib")
replace_in_file(self, win32_common, "$(ZLIB_PATH)/lib/zlib1.lib",
self.deps_cpp_info["zlib"].libs[0] + ".lib")
replace_in_file(self, win32_common, "$(LIBPNG_PATH)/lib/libpng16.lib",
self.deps_cpp_info["libpng"].libs[0] + ".lib")
def build(self):
self._patch_sources()
autotools = Autotools(self, namespace="main")
if is_msvc(self):
autotools.make()
autotools_gobject = Autotools(self, namespace="gobject")
autotools_gobject.make()
else:
autotools.autoreconf()
autotools.configure()
autotools.make()
def package(self):
copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
if is_msvc(self):
src = os.path.join(self.source_folder, "src")
inc = os.path.join(self.package_folder, "include", "cairo")
copy(self, "cairo-version.h", (src if Version(self.version) >= "1.17.4" else self.source_folder), inc)
copy(self, "cairo-features.h", src, inc)
copy(self, "cairo.h", src, inc)
copy(self, "cairo-deprecated.h", src, inc)
copy(self, "cairo-win32.h", src, inc)
copy(self, "cairo-script.h", src, inc)
copy(self, "cairo-ft.h", src, inc)
copy(self, "cairo-ps.h", src, inc)
copy(self, "cairo-pdf.h", src, inc)
copy(self, "cairo-svg.h", src, inc)
copy(self, "cairo-gobject.h", inc, os.path.join(self.source_folder, "util", "cairo-gobject"))
config = str(self.settings.build_type).lower()
lib_src_path = os.path.join(self.source_folder, "src", config)
cairo_gobject_src_path = os.path.join(self.source_folder, "util", "cairo-gobject", config)
lib_dest_path = os.path.join(self.package_folder, "lib")
runtime_dest_path = os.path.join(self.package_folder, "bin")
if self.options.shared:
copy(self, "*cairo.lib", lib_src_path, lib_dest_path)
copy(self, "*cairo.dll", lib_src_path, runtime_dest_path)
copy(self, "*cairo-gobject.lib", cairo_gobject_src_path, lib_dest_path)
copy(self, "*cairo-gobject.dll", cairo_gobject_src_path, runtime_dest_path)
else:
copy(self, "cairo-static.lib", lib_src_path, lib_dest_path)
copy(self, "cairo-gobject.lib", cairo_gobject_src_path, lib_dest_path)
rename(self, os.path.join(lib_dest_path, "cairo-static.lib"), os.path.join(lib_dest_path, "cairo.lib"))
else:
autotools = Autotools(self, namespace="main")
autotools.install()
copy(self, "COPYING*", self.source_folder, os.path.join(self.package_folder, "licenses"))
rm(self, "*.la", self.package_folder, recursive=True)
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
fix_apple_shared_install_name(self)
def package_info(self):
self.cpp_info.set_property("pkg_config_name", "cairo-all-do-no-use")
self.cpp_info.components["cairo_"].set_property("pkg_config_name", "cairo")
self.cpp_info.components["cairo_"].libs = ["cairo"]
self.cpp_info.components["cairo_"].includedirs.insert(0, os.path.join("include", "cairo"))
self.cpp_info.components["cairo_"].requires = ["pixman::pixman", "libpng::libpng", "zlib::zlib"]
if self.options.get_safe("with_freetype", True):
self.cpp_info.components["cairo_"].requires.append("freetype::freetype")
if self.settings.os == "Windows":
self.cpp_info.components["cairo_"].system_libs.extend(["gdi32", "msimg32", "user32"])
if not self.options.shared:
self.cpp_info.components["cairo_"].defines.append("CAIRO_WIN32_STATIC_BUILD=1")
else:
if self.options.with_glib:
self.cpp_info.components["cairo_"].requires.extend(["glib::gobject-2.0", "glib::glib-2.0"])
if self.options.with_fontconfig:
self.cpp_info.components["cairo_"].requires.append("fontconfig::fontconfig")
if self.settings.os == "Linux":
self.cpp_info.components["cairo_"].system_libs = ["pthread", "rt"]
self.cpp_info.components["cairo_"].cflags = ["-pthread"]
self.cpp_info.components["cairo_"].cxxflags = ["-pthread"]
if self.options.with_xcb:
self.cpp_info.components["cairo_"].requires.extend(["xorg::xcb-shm", "xorg::xcb"])
if self.options.with_xlib_xrender:
self.cpp_info.components["cairo_"].requires.extend(["xorg::xcb-render"])
if self.options.with_xlib:
self.cpp_info.components["cairo_"].requires.extend(["xorg::x11", "xorg::xext"])
if is_apple_os(self):
self.cpp_info.components["cairo_"].frameworks.append("CoreGraphics")
if self.settings.os == "Windows":
self.cpp_info.components["cairo-win32"].set_property("pkg_config_name", "cairo-win32")
self.cpp_info.components["cairo-win32"].requires = ["cairo_", "pixman::pixman", "libpng::libpng"]
if self.options.get_safe("with_glib", True):
self.cpp_info.components["cairo-gobject"].set_property("pkg_config_name", "cairo-gobject")
self.cpp_info.components["cairo-gobject"].libs = ["cairo-gobject"]
self.cpp_info.components["cairo-gobject"].requires = ["cairo_", "glib::gobject-2.0", "glib::glib-2.0"]
if self.settings.os != "Windows":
if self.options.with_fontconfig:
self.cpp_info.components["cairo-fc"].set_property("pkg_config_name", "cairo-fc")
self.cpp_info.components["cairo-fc"].requires = ["cairo_", "fontconfig::fontconfig"]
if self.options.get_safe("with_freetype", True):
self.cpp_info.components["cairo-ft"].set_property("pkg_config_name", "cairo-ft")
self.cpp_info.components["cairo-ft"].requires = ["cairo_", "freetype::freetype"]
self.cpp_info.components["cairo-pdf"].set_property("pkg_config_name", "cairo-pdf")
self.cpp_info.components["cairo-pdf"].requires = ["cairo_", "zlib::zlib"]
if self.settings.os == "Linux":
if self.options.with_xlib:
self.cpp_info.components["cairo-xlib"].set_property("pkg_config_name", "cairo-xlib")
self.cpp_info.components["cairo-xlib"].requires = ["cairo_", "xorg::x11", "xorg::xext"]
if is_apple_os(self):
self.cpp_info.components["cairo-quartz"].set_property("pkg_config_name", "cairo-quartz")
self.cpp_info.components["cairo-quartz"].requires = ["cairo_"]
self.cpp_info.components["cairo-quartz"].frameworks.extend(["CoreFoundation", "CoreGraphics", "ApplicationServices"])
self.cpp_info.components["cairo-quartz-font"].set_property("pkg_config_name", "cairo-quartz-font")
self.cpp_info.components["cairo-quartz-font"].requires = ["cairo_"]
|
[
"noreply@github.com"
] |
hrantzsch.noreply@github.com
|
7db08649355d77be0d93b6eb24505fb3b1fcf3d6
|
1d2876eca2103bd67d8eb695b0096db4acaec1fe
|
/generate_face/utils.py
|
19b7696d79c586d5cf7da1333fd4166f93492e8e
|
[] |
no_license
|
liuaishan/FaceAdv
|
7a02e245593ac0d643a08da7e9d176f0fc5650c3
|
5e953d6c861991e715f3078c7c9426fca0551164
|
refs/heads/master
| 2020-05-15T05:12:06.661716
| 2019-04-28T15:31:03
| 2019-04-28T15:31:03
| 182,101,738
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,845
|
py
|
import random
import time
import datetime
import sys
import cv2
from torch.autograd import Variable
import torch
import numpy as np
from torchvision.utils import save_image
def tensor2image(tensor):
image = 127.5*(tensor[0].cpu().float().numpy() + 1.0)
if image.shape[0] == 1:
image = np.tile(image, (3,1,1))
return image.astype(np.uint8)
class Logger():
def __init__(self, n_epochs, batches_epoch, sample_interval, n_samples=5):
self.n_epochs = n_epochs
self.batches_epoch = batches_epoch
self.sample_interval = sample_interval
self.batches_done = 1
self.prev_time = time.time()
self.mean_period = 0
self.losses = {}
self.n_samples = n_samples
self.past_images = []
self.past_cifar = []
def log(self, losses=None, images=None, epoch=0, batch=0):
self.mean_period += (time.time() - self.prev_time)
self.prev_time = time.time()
epoch += 1
batch += 1
sys.stdout.write('\rEpoch %03d/%03d [%04d/%04d] -- ' % (epoch, self.n_epochs, batch, self.batches_epoch))
for i, loss_name in enumerate(losses.keys()):
if loss_name not in self.losses:
self.losses[loss_name] = 0
self.losses[loss_name] += losses[loss_name].item()
sys.stdout.write('%s: %.4f | ' % (loss_name, self.losses[loss_name]/self.batches_done))
batches_left = self.batches_epoch*(self.n_epochs - epoch) + self.batches_epoch - batch
sys.stdout.write('ETA: %s' % (datetime.timedelta(seconds=batches_left*self.mean_period/self.batches_done)))
# Save image sample
image_sample = torch.cat((images['real'].data, images['fake'].data), -2)
image_cifar = images['cifar'].data
self.past_images.append(image_sample)
self.past_cifar.append(image_cifar)
if len(self.past_images) > self.n_samples:
self.past_images.pop(0)
if len(self.past_cifar) > self.n_samples:
self.past_cifar.pop(0)
# If at sample interval save past samples
if self.batches_done % self.sample_interval == 0 and images is not None:
save_image(torch.cat(self.past_images, -1),
'./images/%d_gener.png' % self.batches_done,
normalize=True)
save_image(torch.cat(self.past_cifar, -1),
'./images/%d_cifar.png' % self.batches_done,
normalize=True)
self.batches_done += 1
class ReplayBuffer():
def __init__(self, max_size=50):
assert (max_size > 0), 'Empty buffer or trying to create a black hole. Be careful.'
self.max_size = max_size
self.data = []
def push_and_pop(self, data):
to_return = []
for element in data.data:
element = torch.unsqueeze(element, 0)
if len(self.data) < self.max_size:
self.data.append(element)
to_return.append(element)
else:
if random.uniform(0,1) > 0.5:
i = random.randint(0, self.max_size-1)
to_return.append(self.data[i].clone())
self.data[i] = element
else:
to_return.append(element)
return Variable(torch.cat(to_return))
class LambdaLR():
def __init__(self, n_epochs, offset, decay_start_epoch):
assert ((n_epochs - decay_start_epoch) > 0), "Decay must start before the training session ends!"
self.n_epochs = n_epochs
self.offset = offset
self.decay_start_epoch = decay_start_epoch
def step(self, epoch):
return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch)/(self.n_epochs - self.decay_start_epoch)
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
def possion(src, dst, center, mode = cv2.NORMAL_CLONE):
"""
@src: the pic which will be embed
shape = N W H C, requires element range form 0 to 255
@dst: the pic the src will be embed in
@center: where to embed the picture
@mode: how to embed
@return: composite picture
"""
src = src.astype(np.uint8)
dst = dst.astype(np.uint8)
shape = src[0].shape
out = []
mask = 255 * np.ones(shape, src.dtype)
mask = mask.astype(np.uint8)
for i in range(src.shape[0]):
out.append(cv2.seamlessClone(src[i], dst[i], mask, center[i], mode))
out = np.array(out).swapaxes(2,3).swapaxes(1,2)
return out
|
[
"157908876@qq.com"
] |
157908876@qq.com
|
89bd3418dcef3de5223cd4e346518cdd303334a8
|
6f0e0cb3b408f803bd31577959a8a05b90d37e42
|
/utils/serialization.py
|
f647db170958fbc2eeb22b535100ec04ee0ceadd
|
[] |
no_license
|
aralhekimoglu/ThesisImagePR
|
8860189bc2869aa03b19f36eaca283b41f7ef39f
|
3842e12312fa699cc0dc49e3a112aca453e2aae5
|
refs/heads/master
| 2022-12-10T10:06:59.324211
| 2020-01-05T15:49:12
| 2020-01-05T15:49:12
| 231,939,688
| 0
| 0
| null | 2022-12-07T23:54:12
| 2020-01-05T15:47:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,246
|
py
|
from __future__ import print_function, absolute_import
import json
import os.path as osp
import shutil
import torch
from torch.nn import Parameter
def read_json(fpath):
with open(fpath, 'r') as f:
obj = json.load(f)
return obj
def load_checkpoint(fpath):
if osp.isfile(fpath):
checkpoint = torch.load(fpath)
print("=> Loaded checkpoint '{}'".format(fpath))
return checkpoint
else:
raise ValueError("=> No checkpoint found at '{}'".format(fpath))
def copy_state_dict(state_dict, model, strip=None):
tgt_state = model.state_dict()
copied_names = set()
for name, param in state_dict.items():
if strip is not None and name.startswith(strip):
name = name[len(strip):]
if name not in tgt_state:
continue
if isinstance(param, Parameter):
param = param.data
if param.size() != tgt_state[name].size():
print('mismatch:', name, param.size(), tgt_state[name].size())
continue
tgt_state[name].copy_(param)
copied_names.add(name)
missing = set(tgt_state.keys()) - copied_names
if len(missing) > 0:
print("missing keys in state_dict:", missing)
return model
|
[
"aral.hekimoglu@gmail.com"
] |
aral.hekimoglu@gmail.com
|
6edcf5b03150c8e943e7fa762917b36ca3391087
|
0fa00ecf2dd671515dc001d4b14049ec6a0c1f1c
|
/custom_components/icloud3/support/service_handler.py
|
ca716f98124a0c0f3ddcc8d976d93751544e5677
|
[
"Unlicense"
] |
permissive
|
bacco007/HomeAssistantConfig
|
d91a5368344f50abbea881bd1e6dfc57a0e456ca
|
8548d9999ddd54f13d6a307e013abcb8c897a74e
|
refs/heads/master
| 2023-08-30T07:07:33.571959
| 2023-08-29T20:00:00
| 2023-08-29T20:00:00
| 230,585,631
| 98
| 16
|
Unlicense
| 2023-09-09T08:28:39
| 2019-12-28T09:05:02
|
Python
|
UTF-8
|
Python
| false
| false
| 22,333
|
py
|
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#
# ICLOUD SERVICE HANDLER MODULE
#
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
import homeassistant.helpers.config_validation as cv
from homeassistant import data_entry_flow
import voluptuous as vol
import asyncio
from ..global_variables import GlobalVariables as Gb
from ..const import (DOMAIN,
HHMMSS_ZERO, HIGH_INTEGER, EVLOG_ALERT, EVLOG_ERROR,
WAZE,
CMD_RESET_PYICLOUD_SESSION,
LOCATION, NEXT_UPDATE_TIME, NEXT_UPDATE, INTERVAL,
CONF_DEVICENAME, CONF_ZONE, CONF_COMMAND, CONF_LOG_LEVEL,
ICLOUD_LOST_MODE_CAPABLE,
)
from ..support import config_file
from ..support import iosapp_interface
from ..support import start_ic3
from ..support import determine_interval as det_interval
from ..helpers.common import (instr, )
from ..helpers.messaging import (post_event, post_error_msg, post_monitor_msg,
write_ic3_log_recd, post_alert, clear_alert,
log_info_msg, log_debug_msg, log_exception,
open_ic3_log_file, close_ic3_log_file,
close_reopen_ic3_log_file, delete_open_log_file,
_trace, _traceha, )
from ..helpers.time_util import (secs_to_time, time_str_to_secs, datetime_now, secs_since,
time_now_secs, time_now, )
# from ..config_flow import ActionSettingsFlowManager
# from .. import config_flow
# EvLog Action Commands
CMD_ERROR = 'error'
CMD_PAUSE = 'pause'
CMD_RESUME = 'resume'
CMD_WAZE = 'waze'
CMD_REQUEST_LOCATION = 'location'
CMD_EXPORT_EVENT_LOG = 'export_event_log'
CMD_WAZEHIST_MAINTENANCE = 'wazehist_maint'
CMD_WAZEHIST_TRACK = 'wazehist_track'
CMD_DISPLAY_STARTUP_EVENTS = 'startuplog'
CMD_RESET_PYICLOUD_SESSION = 'reset_session'
CMD_LOG_LEVEL = 'log_level'
CMD_REFRESH_EVENT_LOG = 'refresh_event_log'
CMD_RESTART = 'restart'
CMD_CONFIG_FLOW = 'config_flow'
CMD_FIND_DEVICE_ALERT = 'find_alert'
CMD_LOCATE = 'locate'
REFRESH_EVLOG_FNAME = 'Refresh Event Log'
HIDE_TRACKING_MONITORS_FNAME = 'Hide Tracking Monitors'
SHOW_TRACKING_MONITORS_FNAME = 'Show Tracking Monitors'
GLOBAL_ACTIONS = [CMD_EXPORT_EVENT_LOG,
CMD_DISPLAY_STARTUP_EVENTS,
CMD_RESET_PYICLOUD_SESSION,
CMD_WAZE,
CMD_REFRESH_EVENT_LOG,
CMD_RESTART,
CMD_CONFIG_FLOW,
CMD_LOG_LEVEL,
CMD_WAZEHIST_MAINTENANCE,
CMD_WAZEHIST_TRACK,
'event_log_version',
]
DEVICE_ACTIONS = [CMD_REQUEST_LOCATION,
CMD_PAUSE,
CMD_RESUME,
CMD_FIND_DEVICE_ALERT,
CMD_LOCATE, ]
NO_EVLOG_ACTION_POST_EVENT = [
'Show Startup Log, Errors & Alerts',
REFRESH_EVLOG_FNAME,
HIDE_TRACKING_MONITORS_FNAME,
SHOW_TRACKING_MONITORS_FNAME,
CMD_DISPLAY_STARTUP_EVENTS,
'event_log_version',
'Event Log Version']
ACTION_FNAME_TO_ACTION = {
'Restart iCloud3': 'restart',
'Pause Tracking': 'pause',
'Resume Tracking': 'resume',
'Locate Device(s) using iCloud FamShr': 'locate',
'Send Locate Request to iOS App': 'locate iosapp'
}
SERVICE_SCHEMA = vol.Schema({
vol.Optional('command'): cv.string,
vol.Optional('action'): cv.string,
vol.Optional(CONF_DEVICENAME): cv.slugify,
vol.Optional('action_fname'): cv.string,
vol.Optional('number'): cv.string,
vol.Optional('message'): cv.string,
})
from homeassistant.util.location import distance
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#
# DEFINE THE PROCESS INVOKED BY THE HASS.SERVICES.REGISTER FOR EACH SERVICE
#
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def process_update_service_request(call):
""" icloud3.update service call request """
action = call.data.get('command').lower() or call.data.get('action').lower()
action_fname = call.data.get('action_fname')
devicename = call.data.get(CONF_DEVICENAME)
action, devicename = resolve_action_devicename_values(action, devicename)
if action is not None:
update_service_handler(action, action_fname, devicename)
#--------------------------------------------------------------------
def process_restart_icloud3_service_request(call):
""" icloud3.restart service call request """
Gb.restart_icloud3_request_flag = True
#--------------------------------------------------------------------
def process_find_iphone_alert_service_request(call):
"""Call the find_iphone_alert to play a sound on the phone"""
devicename = call.data.get(CONF_DEVICENAME)
action, devicename = resolve_action_devicename_values("", devicename)
find_iphone_alert_service_handler(devicename)
#--------------------------------------------------------------------
def process_lost_device_alert_service_request(call):
"""Call the find_iphone_alert to play a sound on the phone"""
devicename = call.data.get(CONF_DEVICENAME)
number = call.data.get('number')
message = call.data.get('message')
action, devicename = resolve_action_devicename_values("", devicename)
try:
Device = Gb.Devices_by_devicename.get(devicename)
devicename = devicename or '?'
number = number or '?'
message = message or ('This Phone has been lost. \
Please call this number to report it found.')
if Device is None:
result_msg = f"Failed, Unknown device_name-{devicename}"
elif devicename == '?' or number == '?' or message == '?' :
result_msg = ( f"Required field missing, device_name-{devicename}, "
f"number-{number}, message-{message}")
elif (Device.PyiCloud_RawData_famshr
and Device.PyiCloud_RawData_famshr.device_data
and Device.PyiCloud_RawData_famshr.device_data.get(ICLOUD_LOST_MODE_CAPABLE, False)):
lost_device_alert_service_handler(devicename, number, message)
result_msg = ( f"Alert Notification sent, Device-{Device.fname_devicename}, "
f"Number-{number}, Message-{message}")
else:
result_msg = f"Device {Device.fname_devicename} can not receive Lost Device Alerts"
except Exception as err:
log_exception(err)
result_msg = "Internal Error"
post_event(f"{EVLOG_ERROR}Lost Mode Alert > {result_msg}")
#--------------------------------------------------------------------
def resolve_action_devicename_values(action, devicename):
'''
Convert the action and devicenames to their actual intervalues when they are being executed
from the Developer Tools/Services screen.
- action text dexcription --> action parameter
- ha_device_id --> devicename parameter
Return the action and devicename values
'''
# Convert action and devicename to the real values if the service call
# is coming in from the Developer Tools/Services screen
if action in ACTION_FNAME_TO_ACTION:
action = ACTION_FNAME_TO_ACTION[action]
if devicename in Gb.Devices_by_ha_device_id:
devicename = Gb.Devices_by_ha_device_id[devicename].devicename
if devicename not in Gb.Devices_by_devicename:
devicename = None
return action, devicename
#--------------------------------------------------------------------
def _post_device_event_msg(devicename, msg):
if devicename:
post_event(devicename, msg)
else:
post_event(msg)
def _post_device_monitor_msg(devicename, msg):
if devicename:
post_monitor_msg(devicename, msg)
else:
post_monitor_msg(msg)
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#
# DEFINE THE PROCESS INVOKED BY THE HASS.SERVICES.REGISTER FOR EACH SERVICE
#
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def register_icloud3_services():
''' Register iCloud3 Service Call Handlers '''
try:
Gb.hass.services.register(DOMAIN, 'action',
process_update_service_request, schema=SERVICE_SCHEMA)
Gb.hass.services.register(DOMAIN, 'update',
process_update_service_request, schema=SERVICE_SCHEMA)
Gb.hass.services.register(DOMAIN, 'restart',
process_restart_icloud3_service_request, schema=SERVICE_SCHEMA)
Gb.hass.services.register(DOMAIN, 'find_iphone_alert',
process_find_iphone_alert_service_request, schema=SERVICE_SCHEMA)
Gb.hass.services.register(DOMAIN, 'lost_device_alert',
process_lost_device_alert_service_request, schema=SERVICE_SCHEMA)
return True
except Exception as err:
log_exception(err)
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#
# ROUTINES THAT HANDLE THE INDIVIDUAL SERVICE REQUESTS
#
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def update_service_handler(action_entry=None, action_fname=None, devicename=None):
"""
Authenticate against iCloud and scan for devices.
Actions:
- pause - stop polling for the devicename or all devices
- resume - resume polling devicename or all devices, reset
the interval override to normal interval
calculations
- pause-resume - same as above but toggles between pause and resume
- reset - reset everything and rescans all of the devices
- location - request location update from ios app
- locate x mins - locate in x minutes from FamShr or FmF
- locate iosapp - request location update from ios app
- config_flow - Display the Configure screens handled by the config_flow module
"""
# Ignore Action requests during startup. They are caused by the devicename changes
# to the EvLog attributes indicating the startup stage.
if (Gb.start_icloud3_inprocess_flag
or action_entry is None):
return
action = action_entry
if action == f"{CMD_REFRESH_EVENT_LOG}+clear_alerts":
action = CMD_REFRESH_EVENT_LOG
clear_alert()
clear_alert()
if (action == CMD_REFRESH_EVENT_LOG
and Gb.EvLog.secs_since_refresh <= 2
and Gb.EvLog.last_refresh_devicename == devicename):
_post_device_monitor_msg(devicename, f"Service Action Ignored > {action_fname}, Action-{action_entry}")
return
if action_fname not in NO_EVLOG_ACTION_POST_EVENT:
_post_device_monitor_msg(devicename, f"Service Action Received > Action-{action_entry}")
action_entry = action_entry.replace('eventlog', 'monitor')
action_entry = action_entry.replace(':', '')
action = action_entry.split(' ')[0]
action_option = action_entry.replace(action, '').strip()
# EvLog version sent from the EvLog program already set, ignore the svc call
if action == 'event_log_version' and Gb.evlog_version == action_option:
return
devicename_msg = devicename if devicename in Gb.Devices_by_devicename else None
action_msg = action_fname if action_fname else f"{action.title()}"
event_msg = f"Service Action > Action-{action_msg}"
if action_option: event_msg += f", Options-{action_option}"
if devicename: event_msg += f", Device-{devicename}"
if action_fname not in NO_EVLOG_ACTION_POST_EVENT:
_post_device_event_msg(devicename_msg, event_msg)
if action in GLOBAL_ACTIONS:
_handle_global_action(action, action_option)
elif devicename == 'startup_log':
pass
elif action in DEVICE_ACTIONS:
if devicename:
Devices = [Gb.Devices_by_devicename[devicename]]
else:
Devices = [Device for Device in Gb.Devices_by_devicename.values()]
if action == CMD_PAUSE:
if devicename is None:
Gb.all_tracking_paused_flag = True
Gb.EvLog.display_user_message('Tracking is Paused', alert=True)
for Device in Devices:
Device.pause_tracking()
elif action == CMD_RESUME:
Gb.all_tracking_paused_flag = False
Gb.EvLog.display_user_message('', clear_alert=True)
for Device in Devices:
Device.resume_tracking()
elif action == CMD_LOCATE:
for Device in Devices:
_handle_action_device_locate(Device, action_option)
elif action == CMD_REQUEST_LOCATION:
for Device in Devices:
_handle_action_device_location_iosapp(Device)
elif action == 'delete_log':
delete_open_log_file()
else:
return
if devicename == 'startup_log':
pass
elif (Gb.EvLog.evlog_attrs['fname'] == 'Startup Events'
and action == 'log_level'
and action_option == 'monitor'):
devicename = 'startup_log'
Gb.EvLog.update_event_log_display(devicename)
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#
# HANDLER THE VARIOUS ACTION ACTION REQUESTS
#
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def _handle_global_action(global_action, action_option):
if global_action == CMD_RESTART:
Gb.log_debug_flag_restart = Gb.log_debug_flag
Gb.log_rawdata_flag_restart = Gb.log_rawdata_flag
Gb.restart_icloud3_request_flag = True
Gb.EvLog.display_user_message('iCloud3 is Restarting', clear_alert=True)
close_ic3_log_file()
open_ic3_log_file()
write_ic3_log_recd(f"\n{'-'*25} Opened by Event Log > Actions > Restart {'-'*25}")
return
elif global_action == CMD_EXPORT_EVENT_LOG:
Gb.EvLog.export_event_log()
return
elif global_action == CMD_REFRESH_EVENT_LOG:
return
elif global_action == CMD_CONFIG_FLOW:
_handle_action_config_flow_settings()
return
elif global_action == CMD_DISPLAY_STARTUP_EVENTS:
return
elif global_action == CMD_RESET_PYICLOUD_SESSION:
# This will be handled in the 5-second ic3 loop
Gb.evlog_action_request = CMD_RESET_PYICLOUD_SESSION
return
elif global_action == CMD_LOG_LEVEL:
handle_action_log_level(action_option)
return
elif global_action == CMD_WAZEHIST_MAINTENANCE:
event_msg = "Waze History > Recalculate Route Time/Distance "
if Gb.wazehist_recalculate_time_dist_flag:
event_msg += "Starting Immediately"
post_event(event_msg)
Gb.WazeHist.wazehist_recalculate_time_dist_all_zones()
else:
Gb.wazehist_recalculate_time_dist_flag = True
event_msg += "Scheduled to run tonight at Midnight"
post_event(event_msg)
elif global_action == CMD_WAZEHIST_TRACK:
event_msg = ("Waze History > Update Location Map Display Points "
"Scheduled for Midnight")
post_event(event_msg)
Gb.WazeHist.wazehist_update_track_sensor()
return
elif global_action == 'event_log_version':
# Gb.evlog_version = action_option
# Gb.EvLog.evlog_attrs["version_evlog"] = action_option
Gb.conf_profile['event_log_version'] = action_option
config_file.write_storage_icloud3_configuration_file()
#--------------------------------------------------------------------
def handle_action_log_level(action_option, change_conf_log_level=True):
if instr(action_option, 'monitor'):
Gb.evlog_trk_monitors_flag = (not Gb.evlog_trk_monitors_flag)
return
new_log_debug_flag = Gb.log_debug_flag
new_log_rawdata_flag = Gb.log_rawdata_flag
if instr(action_option, 'debug'):
new_log_debug_flag = (not Gb.log_debug_flag)
new_log_rawdata_flag = False
if instr(action_option, 'rawdata'):
new_log_rawdata_flag = (not Gb.log_rawdata_flag)
new_log_debug_flag = new_log_rawdata_flag
if new_log_rawdata_flag is False:
Gb.log_rawdata_flag_unfiltered = False
new_log_level = 'rawdata-auto-reset' if new_log_rawdata_flag \
else 'debug-auto-reset' if new_log_debug_flag \
else 'info'
start_ic3.set_log_level(new_log_level)
start_ic3.update_conf_file_log_level(new_log_level)
log_level_fname = new_log_level.replace('-', ' ').title()
event_msg = f"Debug Log Level > {log_level_fname}"
post_event(event_msg)
open_ic3_log_file()
write_ic3_log_recd(f"\n{'-'*25} Change Log Level to: {log_level_fname} {'-'*25}")
def _on_off_text(condition):
return 'On' if condition else 'Off'
#--------------------------------------------------------------------
def _handle_action_config_flow_settings():
'''
Handle displaying and updating the parameters using the config_flow screens
'''
try:
if Gb.SettingsFlowManager is not None:
Gb.hass.loop.create_task(Gb.SettingsFlowManager.async_show_menu_handler())
except Exception as err:
log_exception(err)
#--------------------------------------------------------------------
def _handle_action_device_location_iosapp(Device):
'''
Request ios app location from the EvLog > Actions
'''
Device.display_info_msg('Updating Location')
if Device.iosapp_monitor_flag:
Device.iosapp_data_change_reason = f"Location Requested@{time_now()}"
iosapp_interface.request_location(Device, force_request=True)
# Device.resume_tracking()
# Device.write_ha_sensor_state(NEXT_UPDATE, 'Locating')
#--------------------------------------------------------------------
def _handle_action_device_locate(Device, action_option):
'''
Set the next update time & interval from the Action > locate service call
'''
if action_option == 'iosapp':
_handle_action_device_location_iosapp(Device)
return
if Gb.primary_data_source_ICLOUD is False or Device.is_data_source_ICLOUD is False:
post_event(Device.devicename, "iCloud Location Tracking is not available")
return
try:
interval_secs = time_str_to_secs(action_option)
if interval_secs == 0:
interval_secs = 5
except:
interval_secs = 5
Gb.force_icloud_update_flag = True
det_interval.update_all_device_fm_zone_sensors_interval(Device, interval_secs)
Device.icloud_update_reason = f"Location Requested@{time_now()}"
post_event(Device.devicename, f"Location will be updated at {Device.sensors[NEXT_UPDATE_TIME]}")
Device.write_ha_sensors_state([NEXT_UPDATE, INTERVAL])
#--------------------------------------------------------------------
def set_ha_notification(title, message, issue=True):
'''
Format an HA Notification
'''
Gb.ha_notification = {
'title': title,
'message': f'{message}<br><br>*iCloud3 Notification {datetime_now()}*',
'notification_id': DOMAIN}
if issue:
issue_ha_notification()
#--------------------------------------------------------------------
def issue_ha_notification():
if Gb.ha_notification == {}:
return
Gb.hass.services.call("persistent_notification", "create", Gb.ha_notification)
Gb.ha_notification = {}
#--------------------------------------------------------------------
def find_iphone_alert_service_handler(devicename):
"""
Call the lost iPhone function if using th e FamShr tracking method.
Otherwise, send a notification to the iOS App
"""
Device = Gb.Devices_by_devicename[devicename]
if Device.is_data_source_FAMSHR:
device_id = Device.device_id_famshr
if device_id and Gb.PyiCloud and Gb.PyiCloud.FamilySharing:
Gb.PyiCloud.FamilySharing.play_sound(device_id, subject="Find My iPhone Alert")
post_event(devicename, "iCloud Find My iPhone Alert sent")
return
event_msg =("iCloud Device not available, the alert will be sent to the iOS App")
post_event(devicename, event_msg)
message = {"message": "Find My iPhone Alert",
"data": {
"push": {
"sound": {
"name": "alarm.caf",
"critical": 1,
"volume": 1
}
}
}
}
iosapp_interface.send_message_to_device(Device, message)
#--------------------------------------------------------------------
def lost_device_alert_service_handler(devicename, number, message=None):
"""
Call the lost iPhone function if using th e FamShr tracking method.
Otherwise, send a notification to the iOS App
"""
if message is None:
message = 'This Phone has been lost. Please call this number to report it found.'
Device = Gb.Devices_by_devicename[devicename]
if Device.is_data_source_FAMSHR:
device_id = Device.device_id_famshr
if device_id and Gb.PyiCloud and Gb.PyiCloud.FamilySharing:
Gb.PyiCloud.FamilySharing.lost_device(device_id, number=number, message=message)
post_event(devicename, "iCloud Lost Device Alert sent")
return
|
[
"thomas@thomasbaxter.info"
] |
thomas@thomasbaxter.info
|
48cc0d40c135bbab7009debb0fa2b4ef391485d5
|
d74afbb1cfd00786fda380eb44f9524dc3182fd5
|
/application/factory.py
|
416a557c3985b2803c0a90eeaa43000bb0ea8bf9
|
[
"MIT"
] |
permissive
|
kdknive/flask-api-template
|
95b9ea35ec9b0742e55459aa6b4934f2ffbea33f
|
f816d8af8633f5eabb9c8b611841a77cf6cd7c9b
|
refs/heads/main
| 2023-08-10T17:56:08.853642
| 2021-10-03T21:19:17
| 2021-10-03T21:19:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
from flask import Flask
from application.api.moon import moon
from application.api.sun import sun
def create_app():
app = Flask(__name__)
app.register_blueprint(moon, url_prefix=moon.url_prefix)
app.register_blueprint(sun, url_prefix=sun.url_prefix)
return app
|
[
"david.salter@outlook.com"
] |
david.salter@outlook.com
|
4492af906e447a4ad38c8eb08ed07bee989de5d3
|
cf646fadb13f645753b0a9d2353d5679929d22e8
|
/cricket_tournament/models/score_board.py
|
f9c93cc74ff0dbd2ec878a5d0ecf486282528809
|
[] |
no_license
|
vivektiwari-007/cricket_score_board
|
f84faefc152353f15a1612009c24e1d519311d0c
|
ab5b5231cae97198aabd2a93eaefa40fcf764a42
|
refs/heads/master
| 2022-04-08T07:46:38.602261
| 2020-02-18T13:18:53
| 2020-02-18T13:18:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,279
|
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class ScoreBoard(models.Model):
_name = "score.board"
_description = "Score Board"
company_id = fields.Many2one('res.company', required=True, default=lambda self: self.env.company)
over = fields.Float(string="Over", digits=(12, 1))
run = fields.Selection([('0', '0'), ('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6'), ('7', '7')])
ballresult = fields.Selection([('batted', 'Batted'), ('extra', 'Extra'), ('out', 'Out')], string="Ball Result")
selectruninextra = fields.Selection([('wide', 'Wide'), ('noball', 'Noball'), ('bye', 'Bye')], string="Select Run Extra")
selectbatsmanisout = fields.Selection([('bowled', 'Bowled'), ('catch', 'Catch')], string="Select Batsman Out")
stricker_id = fields.Many2one("user.detail", string="Stricker", domain="[('user_id', 'in', tosswinnername)]")
nonstricker_id = fields.Many2one("user.detail", string="Non-Stricker", domain="[('user_id', 'in', tosswinnername)]")
bowler_id = fields.Many2one("user.detail", string="Bowler", domain="[('user_id','not in',tosswinnername)]")
description = fields.Text(string="Commentry")
toss = fields.Many2one("toss.detail", string="toss")
tosswinnername = fields.Char(string="Batting Team")
totalrun = fields.Integer(compute="_compute_totalrun", store=True)
def button_run_0(self):
self.write({'run': "0"})
return True
def button_run_1(self):
self.write({'run': "1"})
return True
def button_run_2(self):
self.write({'run': "2"})
return True
def button_run_3(self):
self.write({'run': "3"})
return True
def button_run_4(self):
self.write({'run': "4"})
return True
def button_run_5(self):
self.write({'run': "5"})
return True
def button_run_6(self):
self.write({'run': "6"})
return True
def button_run_7(self):
self.write({'run': "7"})
return True
def button_run_out(self):
self.write({'ballresult': "out"})
return True
def button_run_wd(self):
self.write({'selectruninextra': "wide"})
return True
def button_run_nb(self):
self.write({'selectruninextra': "noball"})
return True
def button_run_bye(self):
self.write({'selectruninextra': "bye"})
return True
@api.onchange('run', 'totalrun')
def _compute_totalrun(self):
total = self.env['score.board'].search([])
score = 0
if total:
for t in total:
score = score + int(t.run)
for r in self:
r.totalrun = score
@api.onchange('toss')
def onchange_getData(self):
toss_data = self.env.context.get('current_id')
rec = self.env['toss.detail'].browse([toss_data])
if rec.decide == 'batting':
self.tosswinnername = rec.tosswinnername.name
else:
if rec.tosswinnername.name == rec.name.team2name_id.name:
self.tosswinnername = rec.name.team1name_id.name
else:
self.tosswinnername = rec.name.team2name_id.name
|
[
"tiwarivivek389.com"
] |
tiwarivivek389.com
|
bb4ad2746d26da85b8fdacf01f3f4797e6f2fe89
|
86f5d75d1bbbfa2d1f12c574d65415c9216a522f
|
/nairalirayoga/core/migrations/0013_auto_20160316_0242.py
|
1ee13901775281baa6621492c04d707d2618fe9c
|
[] |
no_license
|
joaosr/nairalirayoga
|
e5f260cc87348724ef8a8852655fc6ebd68172a4
|
4d222b14add9e832f217a8b9d14d6676eb560e03
|
refs/heads/master
| 2020-04-05T23:09:19.710486
| 2016-09-30T00:22:51
| 2016-09-30T00:22:51
| 58,102,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-16 02:42
from __future__ import unicode_literals
from django.db import migrations, models
import nairalirayoga.core.models
class Migration(migrations.Migration):
dependencies = [
('core', '0012_auto_20160316_0137'),
]
operations = [
migrations.AlterField(
model_name='professor',
name='foto',
field=models.ImageField(blank=True, default='no-img-professor.png', height_field='height_field', null=True, upload_to=nairalirayoga.core.models.upload_location, verbose_name='Foto', width_field='width_field'),
),
]
|
[
"eng.jmsoares@gmail.com"
] |
eng.jmsoares@gmail.com
|
7be7b33049864554e896f01363cd6ae90d48f9d6
|
d755c825cacbb60e9a23234fbaaf93de48c6a058
|
/June-CookOff-2019/multipleChoiceInput.py
|
dadd7cf19e50ae0fea6efb6676ab209a88a4c457
|
[] |
no_license
|
Subhash3/CodeChef
|
d82e4df751e6dd1e07205871b9e2c0b1202d0506
|
5301b0e4555aac55a72f175dfba8f786b4ea7bbd
|
refs/heads/master
| 2020-08-04T20:17:18.339602
| 2020-01-21T16:10:12
| 2020-01-21T16:10:12
| 212,267,130
| 2
| 3
| null | 2019-10-15T12:07:44
| 2019-10-02T06:07:41
|
Python
|
UTF-8
|
Python
| false
| false
| 559
|
py
|
#!/usr/bin/python3
import random
correct = ""
N = int(input())
for i in range(N) :
a = random.randint(1, 5)
if a == 1 :
correct += 'A'
elif a == 2 :
correct += 'B'
elif a == 3 :
correct += 'C'
else :
correct += 'D'
chef = ""
for i in range(N) :
a = random.randint(1, 6)
if a == 1 :
chef += 'A'
elif a == 2 :
chef+= 'B'
elif a == 3 :
chef += 'C'
elif a == 4 :
chef += 'D'
else :
chef += 'N'
print(int(1))
print(N)
print(correct)
print(chef)
|
[
"subhashsarangi123@gmail.com"
] |
subhashsarangi123@gmail.com
|
e1441f3392629cded5e0890daa2c8b20e8cb5e4c
|
dd405b65af6793d9df52b26a0d6d8ba2d07985f4
|
/trade/rl_train.py
|
b95cd1566cefc65877a340862b70ef908538a50e
|
[] |
no_license
|
bendavidsteel/trade-democratization
|
1965b16b38d77dcb860270c0f7dde3ba05ec056f
|
ea321f45b996f905db32535dd4d79902b350ce4a
|
refs/heads/master
| 2023-01-24T19:59:36.892975
| 2020-11-24T23:49:16
| 2020-11-24T23:49:16
| 265,402,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,306
|
py
|
import math
import torch
import tqdm
import nation_agent
import nation_env
import utils
GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 50
NUM_EPISODES = 100
REPLAY_CAPACITY = 20
NUM_COUNTRIES = 20
NUM_YEARS_PER_ROUND = 100
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
env = nation_env.NationEnvironment(NUM_COUNTRIES, device)
agents = nation_agent.InternationalAgentCollection(NUM_COUNTRIES, REPLAY_CAPACITY, env.num_foreign_actions, env.num_domestic_actions, GAMMA, device)
for i_episode in range(NUM_EPISODES):
# Initialize the environment and state
env.reset()
agents.reset(env.clusters, env.norm_initial_demo)
# reward stats
reward_mean = 0
reward_var = 0
with tqdm.tqdm(range(NUM_YEARS_PER_ROUND)) as years:
for year in years:
years.set_postfix(str="Reward Mean: %i, Reward Var: %i" % (reward_mean, reward_var))
# get state at start of round
state = env.norm_state
eps_threshold = EPS_END + (EPS_START - EPS_END) * \
math.exp(-1. * i_episode / EPS_DECAY)
# Select and perform an action
actions = agents.select_actions(env.norm_state, eps_threshold)
utils.apply_actions(actions, env)
# let environment take step
env.step()
# Observe new state
next_state = env.norm_state
# get the reward
rewards = env.get_rewards()
# Store the transition in memory
for agent_id in range(NUM_COUNTRIES):
reward = rewards[agent_id]
action = utils.Action(foreign = actions[agent_id][0],
domestic = actions[agent_id][1])
transition = utils.Transition(state = state,
action = action,
next_state = next_state,
reward = reward)
agents[agent_id].add_transition(transition)
reward_mean = torch.mean(rewards)
reward_var = torch.var(rewards)
# Perform one step of the optimization (on the target network)
agents.optimize()
|
[
"bendavidsteel@gmail.com"
] |
bendavidsteel@gmail.com
|
3fa553721e7d58259f8b91d2d8c2dda5cb91b569
|
27d92b640d3814fa5dc8040b79a99d077cba3aae
|
/cpython/Lib/wsgiref/validate.py
|
49eaa514cc5d0873f0bdcb9a4543cce71881803e
|
[
"GPL-1.0-or-later",
"Python-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-python-cwi"
] |
permissive
|
ms-iot/python
|
99a0f4d3dd3926703d49b75910c78c69cdb7aed7
|
a8f8fba1214289572713520f83409762a4446fea
|
refs/heads/develop
| 2022-12-07T23:26:31.339811
| 2017-11-17T02:24:32
| 2017-11-17T02:24:32
| 31,045,533
| 73
| 39
|
BSD-3-Clause
| 2022-11-16T20:24:24
| 2015-02-20T01:01:09
|
Python
|
UTF-8
|
Python
| false
| false
| 15,151
|
py
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Also licenced under the Apache License, 2.0: http://opensource.org/licenses/apache2.0.php
# Licensed to PSF under a Contributor Agreement
"""
Middleware to check for obedience to the WSGI specification.
Some of the things this checks:
* Signature of the application and start_response (including that
keyword arguments are not used).
* Environment checks:
- Environment is a dictionary (and not a subclass).
- That all the required keys are in the environment: REQUEST_METHOD,
SERVER_NAME, SERVER_PORT, wsgi.version, wsgi.input, wsgi.errors,
wsgi.multithread, wsgi.multiprocess, wsgi.run_once
- That HTTP_CONTENT_TYPE and HTTP_CONTENT_LENGTH are not in the
environment (these headers should appear as CONTENT_LENGTH and
CONTENT_TYPE).
- Warns if QUERY_STRING is missing, as the cgi module acts
unpredictably in that case.
- That CGI-style variables (that don't contain a .) have
(non-unicode) string values
- That wsgi.version is a tuple
- That wsgi.url_scheme is 'http' or 'https' (@@: is this too
restrictive?)
- Warns if the REQUEST_METHOD is not known (@@: probably too
restrictive).
- That SCRIPT_NAME and PATH_INFO are empty or start with /
- That at least one of SCRIPT_NAME or PATH_INFO are set.
- That CONTENT_LENGTH is a positive integer.
- That SCRIPT_NAME is not '/' (it should be '', and PATH_INFO should
be '/').
- That wsgi.input has the methods read, readline, readlines, and
__iter__
- That wsgi.errors has the methods flush, write, writelines
* The status is a string, contains a space, starts with an integer,
and that integer is in range (> 100).
* That the headers is a list (not a subclass, not another kind of
sequence).
* That the items of the headers are tuples of strings.
* That there is no 'status' header (that is used in CGI, but not in
WSGI).
* That the headers don't contain newlines or colons, end in _ or -, or
contain characters codes below 037.
* That Content-Type is given if there is content (CGI often has a
default content type, but WSGI does not).
* That no Content-Type is given when there is no content (@@: is this
too restrictive?)
* That the exc_info argument to start_response is a tuple or None.
* That all calls to the writer are with strings, and no other methods
on the writer are accessed.
* That wsgi.input is used properly:
- .read() is called with zero or one argument
- That it returns a string
- That readline, readlines, and __iter__ return strings
- That .close() is not called
- No other methods are provided
* That wsgi.errors is used properly:
- .write() and .writelines() is called with a string
- That .close() is not called, and no other methods are provided.
* The response iterator:
- That it is not a string (it should be a list of a single string; a
string will work, but perform horribly).
- That .__next__() returns a string
- That the iterator is not iterated over until start_response has
been called (that can signal either a server or application
error).
- That .close() is called (doesn't raise exception, only prints to
sys.stderr, because we only know it isn't called when the object
is garbage collected).
"""
__all__ = ['validator']
import re
import sys
import warnings
header_re = re.compile(r'^[a-zA-Z][a-zA-Z0-9\-_]*$')
bad_header_value_re = re.compile(r'[\000-\037]')
class WSGIWarning(Warning):
"""
Raised in response to WSGI-spec-related warnings
"""
def assert_(cond, *args):
if not cond:
raise AssertionError(*args)
def check_string_type(value, title):
if type (value) is str:
return value
raise AssertionError(
"{0} must be of type str (got {1})".format(title, repr(value)))
def validator(application):
"""
When applied between a WSGI server and a WSGI application, this
middleware will check for WSGI compliancy on a number of levels.
This middleware does not modify the request or response in any
way, but will raise an AssertionError if anything seems off
(except for a failure to close the application iterator, which
will be printed to stderr -- there's no way to raise an exception
at that point).
"""
def lint_app(*args, **kw):
assert_(len(args) == 2, "Two arguments required")
assert_(not kw, "No keyword arguments allowed")
environ, start_response = args
check_environ(environ)
# We use this to check if the application returns without
# calling start_response:
start_response_started = []
def start_response_wrapper(*args, **kw):
assert_(len(args) == 2 or len(args) == 3, (
"Invalid number of arguments: %s" % (args,)))
assert_(not kw, "No keyword arguments allowed")
status = args[0]
headers = args[1]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
check_status(status)
check_headers(headers)
check_content_type(status, headers)
check_exc_info(exc_info)
start_response_started.append(None)
return WriteWrapper(start_response(*args))
environ['wsgi.input'] = InputWrapper(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorWrapper(environ['wsgi.errors'])
iterator = application(environ, start_response_wrapper)
assert_(iterator is not None and iterator != False,
"The application must return an iterator, if only an empty list")
check_iterator(iterator)
return IteratorWrapper(iterator, start_response_started)
return lint_app
class InputWrapper:
def __init__(self, wsgi_input):
self.input = wsgi_input
def read(self, *args):
assert_(len(args) == 1)
v = self.input.read(*args)
assert_(type(v) is bytes)
return v
def readline(self, *args):
assert_(len(args) <= 1)
v = self.input.readline(*args)
assert_(type(v) is bytes)
return v
def readlines(self, *args):
assert_(len(args) <= 1)
lines = self.input.readlines(*args)
assert_(type(lines) is list)
for line in lines:
assert_(type(line) is bytes)
return lines
def __iter__(self):
while 1:
line = self.readline()
if not line:
return
yield line
def close(self):
assert_(0, "input.close() must not be called")
class ErrorWrapper:
def __init__(self, wsgi_errors):
self.errors = wsgi_errors
def write(self, s):
assert_(type(s) is str)
self.errors.write(s)
def flush(self):
self.errors.flush()
def writelines(self, seq):
for line in seq:
self.write(line)
def close(self):
assert_(0, "errors.close() must not be called")
class WriteWrapper:
def __init__(self, wsgi_writer):
self.writer = wsgi_writer
def __call__(self, s):
assert_(type(s) is bytes)
self.writer(s)
class PartialIteratorWrapper:
def __init__(self, wsgi_iterator):
self.iterator = wsgi_iterator
def __iter__(self):
# We want to make sure __iter__ is called
return IteratorWrapper(self.iterator, None)
class IteratorWrapper:
def __init__(self, wsgi_iterator, check_start_response):
self.original_iterator = wsgi_iterator
self.iterator = iter(wsgi_iterator)
self.closed = False
self.check_start_response = check_start_response
def __iter__(self):
return self
def __next__(self):
assert_(not self.closed,
"Iterator read after closed")
v = next(self.iterator)
if type(v) is not bytes:
assert_(False, "Iterator yielded non-bytestring (%r)" % (v,))
if self.check_start_response is not None:
assert_(self.check_start_response,
"The application returns and we started iterating over its body, but start_response has not yet been called")
self.check_start_response = None
return v
def close(self):
self.closed = True
if hasattr(self.original_iterator, 'close'):
self.original_iterator.close()
def __del__(self):
if not self.closed:
sys.stderr.write(
"Iterator garbage collected without being closed")
assert_(self.closed,
"Iterator garbage collected without being closed")
def check_environ(environ):
assert_(type(environ) is dict,
"Environment is not of the right type: %r (environment: %r)"
% (type(environ), environ))
for key in ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once']:
assert_(key in environ,
"Environment missing required key: %r" % (key,))
for key in ['HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH']:
assert_(key not in environ,
"Environment should not have the key: %s "
"(use %s instead)" % (key, key[5:]))
if 'QUERY_STRING' not in environ:
warnings.warn(
'QUERY_STRING is not in the WSGI environment; the cgi '
'module will use sys.argv when this variable is missing, '
'so application errors are more likely',
WSGIWarning)
for key in environ.keys():
if '.' in key:
# Extension, we don't care about its type
continue
assert_(type(environ[key]) is str,
"Environmental variable %s is not a string: %r (value: %r)"
% (key, type(environ[key]), environ[key]))
assert_(type(environ['wsgi.version']) is tuple,
"wsgi.version should be a tuple (%r)" % (environ['wsgi.version'],))
assert_(environ['wsgi.url_scheme'] in ('http', 'https'),
"wsgi.url_scheme unknown: %r" % environ['wsgi.url_scheme'])
check_input(environ['wsgi.input'])
check_errors(environ['wsgi.errors'])
# @@: these need filling out:
if environ['REQUEST_METHOD'] not in (
'GET', 'HEAD', 'POST', 'OPTIONS','PUT','DELETE','TRACE'):
warnings.warn(
"Unknown REQUEST_METHOD: %r" % environ['REQUEST_METHOD'],
WSGIWarning)
assert_(not environ.get('SCRIPT_NAME')
or environ['SCRIPT_NAME'].startswith('/'),
"SCRIPT_NAME doesn't start with /: %r" % environ['SCRIPT_NAME'])
assert_(not environ.get('PATH_INFO')
or environ['PATH_INFO'].startswith('/'),
"PATH_INFO doesn't start with /: %r" % environ['PATH_INFO'])
if environ.get('CONTENT_LENGTH'):
assert_(int(environ['CONTENT_LENGTH']) >= 0,
"Invalid CONTENT_LENGTH: %r" % environ['CONTENT_LENGTH'])
if not environ.get('SCRIPT_NAME'):
assert_('PATH_INFO' in environ,
"One of SCRIPT_NAME or PATH_INFO are required (PATH_INFO "
"should at least be '/' if SCRIPT_NAME is empty)")
assert_(environ.get('SCRIPT_NAME') != '/',
"SCRIPT_NAME cannot be '/'; it should instead be '', and "
"PATH_INFO should be '/'")
def check_input(wsgi_input):
for attr in ['read', 'readline', 'readlines', '__iter__']:
assert_(hasattr(wsgi_input, attr),
"wsgi.input (%r) doesn't have the attribute %s"
% (wsgi_input, attr))
def check_errors(wsgi_errors):
for attr in ['flush', 'write', 'writelines']:
assert_(hasattr(wsgi_errors, attr),
"wsgi.errors (%r) doesn't have the attribute %s"
% (wsgi_errors, attr))
def check_status(status):
status = check_string_type(status, "Status")
# Implicitly check that we can turn it into an integer:
status_code = status.split(None, 1)[0]
assert_(len(status_code) == 3,
"Status codes must be three characters: %r" % status_code)
status_int = int(status_code)
assert_(status_int >= 100, "Status code is invalid: %r" % status_int)
if len(status) < 4 or status[3] != ' ':
warnings.warn(
"The status string (%r) should be a three-digit integer "
"followed by a single space and a status explanation"
% status, WSGIWarning)
def check_headers(headers):
assert_(type(headers) is list,
"Headers (%r) must be of type list: %r"
% (headers, type(headers)))
header_names = {}
for item in headers:
assert_(type(item) is tuple,
"Individual headers (%r) must be of type tuple: %r"
% (item, type(item)))
assert_(len(item) == 2)
name, value = item
name = check_string_type(name, "Header name")
value = check_string_type(value, "Header value")
assert_(name.lower() != 'status',
"The Status header cannot be used; it conflicts with CGI "
"script, and HTTP status is not given through headers "
"(value: %r)." % value)
header_names[name.lower()] = None
assert_('\n' not in name and ':' not in name,
"Header names may not contain ':' or '\\n': %r" % name)
assert_(header_re.search(name), "Bad header name: %r" % name)
assert_(not name.endswith('-') and not name.endswith('_'),
"Names may not end in '-' or '_': %r" % name)
if bad_header_value_re.search(value):
assert_(0, "Bad header value: %r (bad char: %r)"
% (value, bad_header_value_re.search(value).group(0)))
def check_content_type(status, headers):
status = check_string_type(status, "Status")
code = int(status.split(None, 1)[0])
# @@: need one more person to verify this interpretation of RFC 2616
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
NO_MESSAGE_BODY = (204, 304)
for name, value in headers:
name = check_string_type(name, "Header name")
if name.lower() == 'content-type':
if code not in NO_MESSAGE_BODY:
return
assert_(0, ("Content-Type header found in a %s response, "
"which must not return content.") % code)
if code not in NO_MESSAGE_BODY:
assert_(0, "No Content-Type header found in headers (%s)" % headers)
def check_exc_info(exc_info):
assert_(exc_info is None or type(exc_info) is tuple,
"exc_info (%r) is not a tuple: %r" % (exc_info, type(exc_info)))
# More exc_info checks?
def check_iterator(iterator):
# Technically a bytestring is legal, which is why it's a really bad
# idea, because it may cause the response to be returned
# character-by-character
assert_(not isinstance(iterator, (str, bytes)),
"You should not return a string as your application iterator, "
"instead return a single-item list containing a bytestring.")
|
[
"juanyaw@exchange.microsoft.com"
] |
juanyaw@exchange.microsoft.com
|
985f2188c354d18e1eb3ea17b51af94488cee80a
|
5d4249cc7a738eacbd274bfb30c0088176e17eca
|
/client/config/__init__.py
|
cccefba868023df91107cac61a52f57473da872d
|
[] |
no_license
|
pstankiewicz/pihome
|
d194d14c376d28861bbf64da1ba9b3f6f80b1386
|
a930cd18767127c349cdcf1560e04619b3b02024
|
refs/heads/master
| 2023-01-02T06:12:28.839516
| 2020-10-26T06:28:52
| 2020-10-26T06:28:52
| 297,110,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 92
|
py
|
try:
from .local import *
except ImportError as e:
print(e)
from .base import *
|
[
"piotr.stankiewicz@gmail.com"
] |
piotr.stankiewicz@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.