blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c482974fcf95ec121c26800275426be227eee387
|
4cdc989c98a4217515456fc31c5bd622b2217be6
|
/sevenpro/messages/apps.py
|
4c403fc2847c5ba4325e0423b1648de344bcec4d
|
[] |
no_license
|
mobillight/messagesApi
|
9346b52098e274cf1198131f040cabea5d9db24f
|
728f082db3756bb817c95dfa61cba7f3211016ac
|
refs/heads/master
| 2021-09-23T07:48:02.215101
| 2020-01-16T07:06:15
| 2020-01-16T09:16:27
| 234,257,355
| 0
| 0
| null | 2021-09-22T18:24:19
| 2020-01-16T07:07:45
|
Python
|
UTF-8
|
Python
| false
| false
| 150
|
py
|
from django.apps import AppConfig
class MessagesConfig(AppConfig):
name = 'messages'
label = 'users_messages'
verbose_name = 'Messages'
|
[
"alexey.babarykin@cruxlab.com"
] |
alexey.babarykin@cruxlab.com
|
b956dcc45c3d0fef7b876d58b9ec71dc50767e7a
|
a65100d799656b196c1288dc0fa7e24b6ce81d27
|
/startsurvey/trash_forms.py
|
2f3eb674d756c5c64181d7918eecbd95e964e214
|
[] |
no_license
|
ioo11/survey
|
a612f2baa9d5e824a2db0d98876235a8cf7846d8
|
6a038bdca78d806b5c4ee2bc1b612158f8b6eb8c
|
refs/heads/master
| 2021-01-23T02:05:51.555431
| 2017-03-23T15:16:47
| 2017-03-23T15:16:47
| 85,964,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,466
|
py
|
from django import forms
from .models import Test, Question, SelectedRadioAnswer
class TestForm(forms.Form):
def __init__(self, name='', questions=[], *args, **kwargs):
super(TestForm, self).__init__()
QuestionFormset = forms.formset_factory(forms.CheckboxSelectMultiple, extra=0)
formset = QuestionFormset(initial={'choise':questions})
# for i, question in enumerate(questions):
# self.fields['question_%s' % i] = QuestionForm()
class QuestionForm(forms.Form):
def __init__(self, text='', answers=[], *args, **kwargs):
super(QuestionForm, self).__init__()
# text = forms.CharField(max_length=250)
self.fields['text'] = text
for i, answer in enumerate(answers):
self.fields['answer_%s' % i] = AnswerForm()
class AnswerForm(forms.Form):
def __init__(self, text='', *args, **kwargs):
super(AnswerForm, self).__init__()
# text = forms.CharField(max_length=250)
self.fields['text']= text
class FieldsetWidget(forms.Widget):
def render(self, name, value, attrs=None):
return self.attrs['form_html']
class FieldsetField(forms.Field):
def __init__(self, fieldset, *args, **kwargs):
widget = FieldsetWidget(attrs={
'form_html':'<div>%s</div>' % fieldset
})
kwargs.update({
'widget': widget,
'required': False
})
super(FieldsetField, self).__init__(*args, **kwargs)
# class TestForm(forms.Form):
# def __init__(self, name='', questions=[], *args, **kwargs):
# super(TestForm, self).__init__()
#
# # InlineFormSet = forms.formset_factory(QuestionForm, extra=0)
# # formset = InlineFormSet(prefix='formset', initial=questions)
# # self.fields['questions'] = FieldsetField(fieldset=formset, label=name)
# self.fields['text'] = forms.BooleanField(label=name)
# for i, question in enumerate(questions):
# # self.fields['question_%s' % i] = FieldsetField(fieldset=formset, label='test_form')
# self.fields['question_%s' % i] = FieldsetField(fieldset=question, label=question.get_label())
#
#
#
# class QuestionForm(forms.Form):
# def __init__(self, text='question', answers=[], *args, **kwargs):
# super(QuestionForm, self).__init__()
# self.text = text
# self.fields['answers'] = FieldsetField(fieldset=forms.CheckboxSelectMultiple(choices=answers))
# # self.fields['text'] = forms.CharField(max_length=30)
# # InlineFormSet = forms.formset_factory(AnswerForm)
# # formset = InlineFormSet(prefix='formset', initial=answers)
# # self.fields['answer'] = FieldsetField(fieldset=formset, label=text)
# # for i, answer in enumerate(answers):
# # # self.fields['answer_%s' % i] = FieldsetField(fieldset=formset, label='question_form')
# # self.fields['answer_%s' % i] = forms.BooleanField(label=answer.get_label())
# # # self.fields['answer_%s' % i] = FieldsetField(fieldset=answer, label=answer.get_label())
#
# def get_label(self):
# return self.text
#
#
# class AnswerForm(forms.Form):
# def __init__(self, text='answer', *args, **kwargs):
# super(AnswerForm, self).__init__()
# self.text = forms.BooleanField(label=text, required=False)
# # self.fields['text']= text
# def get_label(self):
# return self.text.label
|
[
"lexxtwolexx@gmail.com"
] |
lexxtwolexx@gmail.com
|
2baddaf2ccdf5370fad87703959844be2213d220
|
3adce822439943250c1a1578cb9edd285bfaf0ce
|
/django/admin.py
|
2b714a93d575f6a4ffca341d23ba232790ad46b9
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
resilientred/skaffold
|
feceb71acfa9183db2866f0c00904f4c6e00b38b
|
0d705d3907bc05e781141f62002a981683813658
|
refs/heads/master
| 2021-05-02T01:42:43.529831
| 2015-07-14T05:34:36
| 2015-07-14T05:34:36
| 120,873,685
| 1
| 0
| null | 2018-02-09T07:40:31
| 2018-02-09T07:40:31
| null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
from django.contrib import admin
import models
{%% for model_name in all_models %%}
admin.site.register(models.{{{ model_name|capitalize }}})
{%% endfor %%}
|
[
"dxdstudio@gmail.com"
] |
dxdstudio@gmail.com
|
ec21a20054eb4224c7042040ebd63cd603f06293
|
f8f7f7a9cda48467690c91e576d5ed994fd36b4c
|
/build/moveit/moveit_ros/moveit_servo/catkin_generated/pkg.develspace.context.pc.py
|
d128c85b80c72738f8380cd6078e88342ce607b3
|
[] |
no_license
|
sarthak-2019/ROS--Navigation-Perception-Identification-Pick-Place
|
cd7d5a0a46f418d8422d98903a4caf1ea45956ad
|
be462402874e5d07a6d553beb16ff7360efe0bd9
|
refs/heads/main
| 2023-06-10T11:19:57.172690
| 2021-06-30T05:51:09
| 2021-06-30T05:51:09
| 379,182,669
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/sarthak/catkin_ws/src/moveit/moveit_ros/moveit_servo/include;/usr/include/eigen3".split(';') if "/home/sarthak/catkin_ws/src/moveit/moveit_ros/moveit_servo/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "control_msgs;control_toolbox;geometry_msgs;moveit_msgs;moveit_ros_planning_interface;rosparam_shortcuts;sensor_msgs;std_msgs;std_srvs;tf2_eigen;trajectory_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lpose_tracking;-lmoveit_servo_cpp_api".split(';') if "-lpose_tracking;-lmoveit_servo_cpp_api" != "" else []
PROJECT_NAME = "moveit_servo"
PROJECT_SPACE_DIR = "/home/sarthak/catkin_ws/devel"
PROJECT_VERSION = "1.0.7"
|
[
"sarthak2019fractal@gmail.com"
] |
sarthak2019fractal@gmail.com
|
e4c869e731d1c53899e7bdd15cc1ce7582ade55a
|
0566cf76b456518875edecece15e763a36a4795f
|
/scrapers/seria_z_net.py
|
48bd9a16c5f04339b89321ef9bdf39120c202b3f
|
[] |
no_license
|
theclonedude/Scraping_BeautifulSoup_phantomjs
|
684b1f7a993e0d2555daa7a5455cf19bd29b0b1b
|
faf653feae46c21a72d13b2123cdebdb2f7c05d8
|
refs/heads/master
| 2023-03-16T19:36:14.867361
| 2018-06-14T14:21:02
| 2018-06-14T14:21:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,086
|
py
|
# coding=utf-8
from sandcrawler.scraper import ScraperBase
from sandcrawler.scraper import VideoCaptureMixin, SimpleScraperBase
import re
import json
class SeriaZNet(SimpleScraperBase, VideoCaptureMixin):
BASE_URL = 'http://seria-z.net'
def setup(self):
self.register_scraper_type(ScraperBase.SCRAPER_TYPE_OSP)
self.search_term_language = 'rus'
# self.requires_webdriver = ('parse',)
self.register_media(ScraperBase.MEDIA_TYPE_TV)
self.register_media(ScraperBase.MEDIA_TYPE_FILM)
self.register_url(ScraperBase.URL_TYPE_SEARCH, self.BASE_URL)
self.register_url(ScraperBase.URL_TYPE_LISTING, self.BASE_URL)
def _fetch_no_results_text(self):
return u'Ничего не найдено'
def _fetch_search_url(self, search_term, media_type=None, start=1):
self.start = start
self.search_term = search_term
return self.BASE_URL + '/island/{}?keyword={}'.format(start, search_term)
def _fetch_next_button(self, soup):
link = None
try:
link = soup.find('a', text=u'»')['href']
except TypeError:
pass
return link if link else None
def _parse_search_results(self, soup):
no_results_text = self._fetch_no_results_text()
if no_results_text and unicode(soup).find(no_results_text) >= 0:
return self.submit_search_no_results()
self._parse_search_result_page(soup)
self.start += 1
next_button_link = self._fetch_search_url(self.search_term, start=self.start)
if next_button_link and self.can_fetch_next():
self._parse_search_results(
self.get_soup(
next_button_link
)
)
def _parse_search_result_page(self, soup):
for link in soup.find_all('a', itemprop='url'):
self.submit_search_result(
link_title=link['title'],
link_url=link.href
)
def _video_player_ids(self):
return ('playerarea',)
def _video_player_classes(self):
return ()
def _get_playlist(self, packet):
return None
def parse(self, page_url, **extra):
soup = self.get_soup(page_url)
index_page_title = self.util.get_page_title(soup)
script_text = soup.select_one('div.leftside script').text
hash_text = re.search("""hash = \'(.*)\'; globals.player_type""", script_text)
if hash_text:
hash_text = hash_text.group(1)
season_id = re.search("""season_id = \'(.*)\'; globals.hash""", script_text)
if season_id:
season_id = season_id.group(1)
play_list_soup = json.loads(self.get_soup('http://seria-z.net/upp/player/{}/{}/plfl.txt'.format(hash_text, season_id)).text)
play_list = play_list_soup['playlist']
for url in play_list:
self.submit_parse_result(
index_page_title=index_page_title,
link_title=url['comment'],
link_url=url['file'],
)
|
[
"stryokka@gmail.com"
] |
stryokka@gmail.com
|
92a8563bcdcdf2c93edc2aa07ae6e7ffd8119716
|
97b5af6650e391bd937d49c1580462dee990d4bc
|
/01_california_housing/single_feature.py
|
4e362816b95ddf209fcafa85a09df5fe87809c25
|
[] |
no_license
|
ethaniz/tensorflow-the-master-way
|
67a293328cd62abdd82bf8ac7537f7e2e6f392a0
|
325fa5d00100d44f16bee76543dab2a2c76d0a85
|
refs/heads/master
| 2020-03-18T11:21:57.809957
| 2018-06-08T03:16:23
| 2018-06-08T03:16:23
| 134,666,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,934
|
py
|
# -*- coding:utf8 -*-
import math
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.python.data import Dataset
from sklearn import metrics
import matplotlib.pyplot as plt
tf.logging.set_verbosity(tf.logging.ERROR)
california_housing_dataframe = pd.read_csv(
"https://storage.googleapis.com/mledu-datasets/california_housing_train.csv",
sep=","
)
# np.random.permutation相比np.random.shuffle,前者会生成新对象
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index)
)
california_housing_dataframe['median_house_value'] /= 1000
#print(california_housing_dataframe)
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
features = {key: np.array(value) for key, value in dict(features).items()}
ds = Dataset.from_tensor_slices((features, targets))
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(buffer_size=10000)
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
def train_model(learning_rate, steps, batch_size, input_feature="total_rooms"):
periods = 10
step_per_period = steps / periods
my_feature = input_feature
my_feature_data = california_housing_dataframe[[my_feature]]
my_label = 'median_house_value'
targets = california_housing_dataframe[my_label]
feature_columns = [tf.feature_column.numeric_column(my_feature)]
training_input_fn = lambda: my_input_fn(my_feature_data, targets, batch_size, shuffle=True, num_epochs=None)
prediction_input_fn = lambda: my_input_fn(my_feature_data, targets, 1, False, 1)
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=feature_columns,
optimizer=my_optimizer
)
print("Training model...")
print("RMSE (on training data):")
root_mean_squared_errors = []
for period in range(0, periods):
linear_regressor.train(
input_fn=training_input_fn,
steps=step_per_period
)
predictions = linear_regressor.predict(input_fn=prediction_input_fn)
predictions = np.array([item['predictions'][0] for item in predictions])
root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(predictions, targets)
)
print("period %02d: %0.2f" % (period, root_mean_squared_error))
root_mean_squared_errors.append(root_mean_squared_error)
print("Model training finished!")
#plt.subplot(1, 2, 2)
plt.ylabel("RMSE")
plt.xlabel("Periods")
plt.tight_layout()
plt.plot(root_mean_squared_errors)
plt.show()
train_model(
learning_rate=0.00003,
steps=500,
batch_size=5
)
|
[
"ethan@yichendeMacBook-Pro.local"
] |
ethan@yichendeMacBook-Pro.local
|
c8c18d9577748710661843f93193ab7a9256315f
|
84c59080e410c3b4af03f191f3964bb325f8bdc0
|
/tests/configs/int_alus_4/systems/cpus/MyO3CPU.py
|
f583542bdaa8da8374cad74fe5786de3e54fe251
|
[] |
no_license
|
hlah/t2_orgb
|
daffea466c52a4986c68b4b3095eb7ed6f959266
|
209cc5268244ceea5c6fb6881b717b8b5efc2d97
|
refs/heads/master
| 2020-04-08T03:25:57.670644
| 2018-11-26T22:28:45
| 2018-11-26T22:28:45
| 158,975,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,851
|
py
|
# -*- coding: utf-8 -*-
######################################################################
######################################################################
##
## Arquivo de configuração da CPU
##
## Inicialmente, define uma série de classes que vão representar as
## unidades funcionais. Nessas classes, descreve-se o tipo de operação
## que aquela unidade funcional executa (opClass), a latência ou tempo
## que a operação leva para concluir (opLat), e a quantidade de
## unidades daquele tipo (count). Também é possível modelar se a
## unidade funcional opera em pipeline ou não (variável pipelined -
## que é True por padrão).
##
## A seguir, a classe MyO3CPU instancia o pool de unidades funcionais
## (MyFUPool), definidos antes, e define os demais parâmetros do
## processador. É possível mudar a largura dos estágios do pipeline
## (variáveis *Width), a latência de cada estágio (e.g.: variável
## fetchToDecodeDelay = 3 modela um pipeline com o Fetch dividido em 3
## estágios), a quantidade de posições nos buffers (*BufferSize,
## *QueueSize, *Entries).
##
## O fluxo de instruções entre os estágios do pipeline é:
##
## Fetch -> Decode -> Rename -> Dispatch,Issue,Execute,Writeback -> Commit.
##
## OBS: Os estágios Dispatch,Issue,Execute,Writeback são agrupados em um único
## estágio, chamado aqui de IEW.
##
######################################################################
######################################################################
import m5
from m5.objects import *
from m5.objects import BaseCache
from m5.objects import DDR3_1600_8x8
from m5.objects import DerivO3CPU
from m5.objects import System
from m5.objects import SystemXBar
###############################################################################
## Unidades funcionais
##
## Cada classe especifica um tipo de unidade funcional.
##
## O campo opList especifica os tipos de operação que a FU executa e o campo
## count especifica a quantidade de unidades desse tipo.
###############################################################################
class MyIntALU(FUDesc):
opList = [ OpDesc(opClass='IntAlu') ]
count = 4
class MyIntMultDiv(FUDesc):
opList = [ OpDesc(opClass='IntMult', opLat=3, pipelined=True),
OpDesc(opClass='IntDiv', opLat=16, pipelined=False) ]
# DIV and IDIV instructions in x86 are implemented using a loop which
# issues division microops. The latency of these microops should really be
# one (or a small number) cycle each since each of these computes one bit
# of the quotient.
if buildEnv['TARGET_ISA'] in ('x86'):
opList[1].opLat=1
count = 1
class My_FP_ALU(FUDesc):
opList = [ OpDesc(opClass='FloatAdd', opLat=2),
OpDesc(opClass='FloatCmp', opLat=2),
OpDesc(opClass='FloatCvt', opLat=2) ]
count = 1
class My_FP_MultDiv(FUDesc):
opList = [ OpDesc(opClass='FloatMult', opLat=4),
OpDesc(opClass='FloatDiv', opLat=12, pipelined=False),
OpDesc(opClass='FloatSqrt', opLat=24, pipelined=False) ]
count = 1
class My_SIMD_Unit(FUDesc):
opList = [ OpDesc(opClass='SimdAdd', opLat=2),
OpDesc(opClass='SimdAddAcc', opLat=2),
OpDesc(opClass='SimdAlu', opLat=2),
OpDesc(opClass='SimdCmp', opLat=2),
OpDesc(opClass='SimdCvt', opLat=2),
OpDesc(opClass='SimdMisc', opLat=2),
OpDesc(opClass='SimdMult', opLat=2),
OpDesc(opClass='SimdMultAcc', opLat=2),
OpDesc(opClass='SimdShift', opLat=2),
OpDesc(opClass='SimdShiftAcc', opLat=2),
OpDesc(opClass='SimdSqrt', opLat=2),
OpDesc(opClass='SimdFloatAdd', opLat=2),
OpDesc(opClass='SimdFloatAlu', opLat=2),
OpDesc(opClass='SimdFloatCmp', opLat=2),
OpDesc(opClass='SimdFloatCvt', opLat=2),
OpDesc(opClass='SimdFloatDiv', opLat=2),
OpDesc(opClass='SimdFloatMisc', opLat=2),
OpDesc(opClass='SimdFloatMult', opLat=2),
OpDesc(opClass='SimdFloatMultAcc', opLat=2),
OpDesc(opClass='SimdFloatSqrt', opLat=2) ]
count = 1
class MyMemUnit(FUDesc):
opList = [ OpDesc(opClass='MemRead'),
OpDesc(opClass='MemWrite'),
OpDesc(opClass='IprAccess', opLat = 2, pipelined = False) ]
count = 1
class MyFUPool(FUPool):
FUList = [ MyIntALU(), MyIntMultDiv(), My_FP_ALU(),
My_FP_MultDiv(), My_SIMD_Unit(),
MyMemUnit() ]
############################################################
## Processador
############################################################
class MyO3CPU(DerivO3CPU):
############################################################
## Preditor de desvios
############################################################
branchPred = LocalBP() # Branch Predictor
############################################################
## Latências entre os diferentes estágios do pipeline.
## Pode ser usado para simular pipelines mais profundos.
############################################################
#### Latências de avanço
fetchToDecodeDelay = 3 # Fetch to decode delay
decodeToRenameDelay = 2 # Decode to rename delay
renameToIEWDelay = 2 # Rename to Issue/Execute/Writeback delay
renameToROBDelay = 2 # Rename to reorder buffer delay
issueToExecuteDelay = 2 # Issue to execute delay internal to the IEW stage
iewToCommitDelay = 2 # Issue/Execute/Writeback to commit delay
#### Latências de retorno
decodeToFetchDelay = 1 # Decode to fetch delay
renameToFetchDelay = 1 # Rename to fetch delay
renameToDecodeDelay = 1 # Rename to decode delay
iewToFetchDelay = 1 # Issue/Execute/Writeback to fetch delay
iewToDecodeDelay = 1 # Issue/Execute/Writeback to decode delay
iewToRenameDelay = 1 # Issue/Execute/Writeback to rename delay
commitToFetchDelay = 1 # Commit to fetch delay
commitToDecodeDelay = 1 # Commit to decode delay
commitToRenameDelay = 1 # Commit to rename delay
commitToIEWDelay = 1 # Commit to Issue/Execute/Writeback delay
############################################################
## Tamanho das estruturas do pipeline. Afetam a quantidade
## de instruções que podem ser armazenadas nos buffers.
############################################################
fetchBufferSize = 64 # Fetch buffer size in bytes
fetchQueueSize = 32 # Fetch queue size in micro-ops per thread
numIQEntries = 32 # Number of instruction queue entries
numROBEntries = 96 # Number of reorder buffer entries
LQEntries = 20 # Number of load queue entries
SQEntries = 12 # Number of store queue entries
numPhysIntRegs = 96 # Number of physical integer registers
numPhysFloatRegs = 96 # Number of physical floating point registers
numRobs = 1 # Number of Reorder Buffers;
############################################################
## Largura das estruturas do pipeline. Afetam a quantidade
## de instruções processadas por ciclo em cada estágio.
############################################################
fetchWidth = 2 # Fetch width
decodeWidth = 2 # Decode width
renameWidth = 2 # Rename width
dispatchWidth = 2 # Dispatch width
issueWidth = 2 # Issue width
wbWidth = 2 # Writeback width
commitWidth = 2 # Commit width
squashWidth = 16 # Squash width
fuPool = MyFUPool() # Functional Unit pool
############################################################
## Outros parâmetros. Sugestão: não mexer.
############################################################
LSQDepCheckShift = 4 # Number of places to shift addr before check
LSQCheckLoads = True # Should dependency violations be checked for
# loads & stores or just stores
store_set_clear_period = 250000 # Number of load/store insts before
# the dep predictor should be invalidated
LFSTSize = 1024 # Last fetched store table size
SSITSize = 1024 # Store set ID table size
# most ISAs don't use condition-code regs # so default is 0
_defaultNumPhysCCRegs = 0
# For x86, each CC reg is used to hold only a subset of the flags, so we
# need 4-5 times the number of CC regs as physical integer regs to be
# sure we don't run out. In typical real machines, CC regs are not
# explicitly renamed (it's a side effect of int reg renaming),
# so they should never be the bottleneck here.
_defaultNumPhysCCRegs = numPhysIntRegs * 5
numPhysCCRegs = _defaultNumPhysCCRegs # Number of physical cc registers
activity = 0 # Initial count
cacheStorePorts = 1 # Cache Store Ports
trapLatency = 10 # Trap latency
fetchTrapLatency = 1 # Fetch trap latency
backComSize = 32 # Time buffer size for backwards communication
forwardComSize = 32 # Time buffer size for forward communication
smtNumFetchingThreads = 1 # SMT Number of Fetching Threads
smtFetchPolicy = 'SingleThread' # SMT Fetch policy
smtLSQPolicy = 'Partitioned' # SMT LSQ Sharing Policy
smtLSQThreshold = 100 # SMT LSQ Threshold Sharing Parameter
smtIQPolicy = 'Partitioned' # SMT IQ Sharing Policy
smtIQThreshold = 100 # SMT IQ Threshold Sharing Parameter
smtROBPolicy = 'Partitioned' # SMT ROB Sharing Policy
smtROBThreshold = 100 # SMT ROB Threshold Sharing Parameter
smtCommitPolicy = 'RoundRobin' # SMT Commit Policy
needsTSO = True # Enable TSO Memory model
|
[
"hlah_@hotmail.com"
] |
hlah_@hotmail.com
|
1dc1b3c42fc07a6111c8afcd5ffc819647e66844
|
0d91f139b73b9151e3259d63393d24f18cf62e43
|
/listings/migrations/0001_initial.py
|
1444746d06c687eb47e8772ea5578ace9c18a604
|
[
"Apache-2.0"
] |
permissive
|
fcurella/helpothers
|
ea117dac964d55bd06f299c0a1dec8d32baabd14
|
1c830a1ff11370a572d05289eb75c81af89cff0b
|
refs/heads/master
| 2020-12-28T17:11:30.111592
| 2015-10-31T18:51:49
| 2015-10-31T18:51:49
| 45,277,530
| 0
| 0
|
Apache-2.0
| 2020-05-27T22:54:12
| 2015-10-30T21:30:21
|
CSS
|
UTF-8
|
Python
| false
| false
| 3,156
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import geoposition.fields
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='GatheringCenter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('location_name', models.CharField(default=b'', help_text='If this center has any special name', max_length=100, blank=True)),
('address', models.CharField(max_length=255)),
('geoposition', geoposition.fields.GeopositionField(max_length=42, null=True, blank=True)),
('description', models.TextField(default=b'', help_text='Any additional information about this specific gathering center', blank=True)),
('city', models.ForeignKey(to='listings.City')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
name='Resource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('name', models.CharField(max_length=255)),
('description', models.TextField(default=b'', blank=True)),
('url', models.URLField(default=b'', max_length=500, blank=True)),
('sticky', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='city',
name='region',
field=models.ForeignKey(blank=True, to='listings.Region', null=True),
),
migrations.AlterUniqueTogether(
name='city',
unique_together=set([('name', 'region')]),
),
]
|
[
"zan.anderle@gmail.com"
] |
zan.anderle@gmail.com
|
6ab3c319e1d403630bcd3263a0f20be489df95a9
|
11fff64cfaee383c222fad281d4d8a7897c373d3
|
/venv/Lib/site-packages/tests/test_level4/test_host_context.py
|
76bcdcb091632c44a80ab8a5cab342ad7a73dd8d
|
[
"MIT"
] |
permissive
|
LarkEaglin/pretty-codex
|
b40c91308909e8cdba25ddf9efb2d36f8fd086a3
|
d5caabbd4dbfa11665c884df8b2fc70ad1cdd82f
|
refs/heads/master
| 2022-10-25T11:02:41.928065
| 2019-03-13T03:32:01
| 2019-03-13T03:32:01
| 175,336,023
| 0
| 1
|
MIT
| 2022-10-07T19:02:17
| 2019-03-13T03:01:47
|
Python
|
UTF-8
|
Python
| false
| false
| 657
|
py
|
"""Test host context selectors."""
from __future__ import unicode_literals
from .. import util
class TestHostContext(util.TestCase):
"""Test host context selectors."""
def test_host_context(self):
"""Test host context (not supported)."""
markup = """<h1>header</h1><div><p>some text</p></div>"""
self.assert_selector(
markup,
":host-context(h1, h2)",
[],
flags=util.HTML
)
class TestHostContextQuirks(TestHostContext):
"""Test host context selectors with quirks."""
def setUp(self):
"""Setup."""
self.purge()
self.quirks = True
|
[
"lrkdxn@gmail.com"
] |
lrkdxn@gmail.com
|
ab9e8fded2f72aa7db2905169469aec2ac10aa34
|
f03d06461b6bed40ae6cf4e7fff2696ad6fad96a
|
/Introduction_of_Mathematical_Programming/ch01/Multi-period_Planning_Problem.py
|
78beac64a839769076fbba353017c7c72f1a88fc
|
[] |
no_license
|
wanimaru47/LinearProgramming
|
6b491e831f259669b91b37f578c8f01cd5e73427
|
e0c403bb0a483062370a98ae549baaecf8e4a7e7
|
refs/heads/master
| 2021-01-01T04:30:57.996514
| 2016-04-25T13:17:26
| 2016-04-25T13:17:26
| 56,852,836
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,466
|
py
|
# 1.1.2 Multi-period Planning Problem
import pulp
# N: product variety
N = 2
# T: month
T = 3
# the number of row materials per product
A = [[2, 7],
[5, 3]]
# The shipment at each month
B = [[30, 20],
[60, 50],
[80, 90]]
# The number of abailable materials at each month
C = [[920, 790],
[750, 600],
[500, 400]]
# Production Cost and Inventory Cost
D = [[75, 50],
[8, 7]]
# arrange Inventory Plan
E1 = [[1 for t in range(T - 1)] + [0] for n in range(N)]
E2 = [[0] + [1 for t in range(T - 1)] for n in range(N)]
prog = pulp.LpProblem('Multi-period Planning Problem', pulp.LpMinimize);
x = pulp.LpVariable.dicts('X', (range(N), range(T)), 0, None, pulp.LpInteger)
y = pulp.LpVariable.dicts('Y', (range(N), range(T)), 0, None, pulp.LpInteger)
tmp_x = [[x[row][i] for i in range(T)] for row in range(N)]
tmp_y = [[y[row][i] for i in range(T)] for row in range(N)]
prog += pulp.lpDot(D[0], tmp_x) + pulp.lpDot(D[1], tmp_y)
for row_t in range(T):
for row_i in range(N):
prog += pulp.lpDot(A[row_i], [x[i][row_t] for i in range(N)]) <= C[row_t][row_i]
for row_t in range(T):
for row_i in range(N):
prog += x[row_i][row_t] + E2[row_i][row_t] * y[row_i][(T + row_t - 1) % T] - E1[row_i][row_t] * y[row_i][row_t] == B[row_t][row_i]
print(prog)
prog.solve()
for t in range(T):
for n in range(N):
print(x[n][t].varValue)
for t in range(T):
for n in range(N):
print(y[n][t].varValue)
|
[
"wanimaru47@gmail.com"
] |
wanimaru47@gmail.com
|
943379ec5d1bee9dff951f30490f631db5c43d9d
|
f8841ef9797227ac93d8809e2e59aca0d7f3ab76
|
/src/relational_erm/models/multilabel_node_classification_template.py
|
dae0d9823bca1c1a7d22814de71daf9d09be3563
|
[
"MIT"
] |
permissive
|
wooden-spoon/relational-ERM
|
1f2fa0b76442384f90851412d36c7cb3911577f3
|
28b16da9fb64852a0302d143d4857728bd08e2eb
|
refs/heads/master
| 2021-06-22T19:11:03.721247
| 2020-10-17T01:26:48
| 2020-10-17T01:26:48
| 136,656,022
| 15
| 2
|
MIT
| 2018-10-23T00:04:52
| 2018-06-08T19:00:55
|
Python
|
UTF-8
|
Python
| false
| false
| 15,965
|
py
|
import tensorflow as tf
from . import metrics
def _make_metrics(labels, predictions, weights):
assert weights is not None
accuracy = tf.metrics.accuracy(
labels=labels,
predictions=predictions,
weights=tf.expand_dims(weights, -1))
precision = tf.metrics.precision(
labels=labels,
predictions=predictions,
weights=tf.expand_dims(weights, -1))
recall = tf.metrics.recall(
labels=labels,
predictions=predictions,
weights=tf.expand_dims(weights, -1))
macro_f1 = metrics.macro_f1(
labels=labels,
predictions=predictions,
weights=weights)
return {
'accuracy': accuracy,
'precision': precision,
'recall': recall,
'macro_f1': macro_f1
}
def _make_dataset_summaries(features, mode):
""" Make summaries for dataset (number of edges and vertices seen so far).
By default, we only update those during training (as they represent the number
of training samples seen).
Parameter
---------
features: the features passed into the estimator.
mode: the estimator mode
"""
if mode != tf.estimator.ModeKeys.TRAIN:
return
with tf.variable_scope(None, 'dataset_summaries'):
total_count_vertex = tf.get_variable('total_count_vertex', shape=[], dtype=tf.int64,
initializer=tf.zeros_initializer(), trainable=False)
total_count_edges = tf.get_variable('total_count_edges', shape=[], dtype=tf.int64,
initializer=tf.zeros_initializer(), trainable=False)
update_vertex_count = total_count_vertex.assign_add(
tf.shape(features['vertex_index'], out_type=tf.int64)[0])
update_edge_count = total_count_edges.assign_add(
tf.shape(features['edge_list'], out_type=tf.int64)[0])
with tf.control_dependencies([update_vertex_count, update_edge_count]):
tf.summary.scalar('total_edges', total_count_edges, family='dataset')
tf.summary.scalar('total_vertex', total_count_vertex, family='dataset')
def _make_label_prediction_summaries(present_labels, present_pred_labels, split):
""" Make summaries for label prediction task.
Parameter
---------
present_labels: the labels present in the graph.
present_pred_labels: the predicted labels present in the graph.
split: for present labels, whether they are censored for testing.
"""
# split == 1 indicates insample, wherease split == 0 indicates out of sample.
# split == -1 denotes fake padded values.
split_insample = tf.expand_dims(tf.to_float(tf.equal(split, 1)), -1)
split_outsample = tf.expand_dims(tf.to_float(tf.equal(split, 0)), -1)
accuracy_batch_insample = metrics.batch_accuracy(
present_labels, present_pred_labels, split_insample,
name='accuracy_insample_batch')
kappa_batch_insample = metrics.batch_kappa(
present_labels, present_pred_labels, split_insample,
name='kappa_insample_batch'
)
accuracy_batch_outsample = metrics.batch_accuracy(
present_labels, present_pred_labels, split_outsample,
name='accuracy_outsample_batch'
)
kappa_batch_outsample = metrics.batch_kappa(
present_labels, present_pred_labels, split_outsample,
name='kappa_outsample_batch'
)
tf.summary.scalar('accuracy_batch_in', accuracy_batch_insample)
tf.summary.scalar('accuracy_batch_out', accuracy_batch_outsample)
tf.summary.scalar('kappa_batch_in', kappa_batch_insample)
tf.summary.scalar('kappa_batch_out', kappa_batch_outsample)
def _get_value(value_or_fn):
if callable(value_or_fn):
return value_or_fn()
else:
return value_or_fn
def _default_embedding_optimizer():
# embedding optimization
# word2vec decays linearly to a min learning rate (default: 0.0001), decreasing each "epoch"
# however, node2vec and deepwalk run only 1 "epoch" each
# learning_rate = tf.train.polynomial_decay(
# 10.,
# global_step,
# 100000,
# end_learning_rate=0.0001,
# power=1.0,
# cycle=False,
# name="Word2Vec_decay"
# )
# gensim word2vec default learning rate is 0.025
return tf.train.GradientDescentOptimizer(learning_rate=0.025)
def _default_global_optimizer():
# return tf.train.RMSPropOptimizer(learning_rate=5e-4, momentum=0.9)
global_step = tf.train.get_or_create_global_step()
# learning_rate = tf.train.polynomial_decay(
# 10.,
# global_step,
# 1000000,
# end_learning_rate=0.01,
# power=1.0,
# cycle=False,
# name="global_linear_decay"
# )
learning_rate = 1.
return tf.train.GradientDescentOptimizer(learning_rate)
def _make_polyak_averaging(embeddings, features, label_logits, mode, polyak, make_label_logits, params):
batch_size = params['batch_size']
decay = 0.99
if batch_size is not None:
# Adjust decay for batch size to take into account the minibatching.
decay = decay ** batch_size
label_ema = tf.train.ExponentialMovingAverage(decay=decay)
if polyak:
# predict logits by replacing the model params by a moving average
def label_ema_getter(getter, name, *args, **kwargs):
var = getter(name, *args, **kwargs)
ema_var = label_ema.average(var)
return ema_var # if ema_var else var
# create the running average variable
label_ema_op = label_ema.apply(tf.global_variables("label_logits"))
with tf.control_dependencies([label_ema_op]):
with tf.variable_scope("label_logits", reuse=True, custom_getter=label_ema_getter):
label_logits_predict = make_label_logits(embeddings, features, mode, params)
else:
# no polyak averaging; default behaviour
label_logits_predict = label_logits
label_ema_op = tf.no_op(name='no_polyak_averaging')
return label_ema_op, label_logits_predict
def _make_embedding_variable(params):
embedding_variable_name = 'input_layer/vertex_index_embedding/embedding_weights'
all_embeddings = tf.get_variable(
embedding_variable_name,
shape=[params['num_vertices'], params['embedding_dim']],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=1 / params['embedding_dim']),
trainable=params.get('embedding_trainable', True))
if params.get('embedding_checkpoint', None) is not None:
tf.train.init_from_checkpoint(
params['embedding_checkpoint'],
{embedding_variable_name: all_embeddings})
return all_embeddings
def make_node_classifier(make_label_logits,
make_edge_logits,
make_label_pred_loss,
make_edge_pred_loss,
embedding_optimizer=None,
global_optimizer=None,
polyak=True,
pos_only_labels=True):
""" Creates a node classifier function from various parts.
Parameters
----------
make_label_logits: function (embeddings, features, mode, params) -> (logits),
which computes the label logits for for each node.
make_edge_logits: function (embeddings, features, edge_list, edge_weights, params) -> (label_logits),
which computes the logits for each pair in edge_list.
make_label_pred_loss: function (label_logits, present_labels) -> (losses),
which computes the label prediction loss.
make_edge_pred_loss: function (embeddings, n_vert, el, w, params) -> (losses),
which computes the edge prediction loss.
embedding_optimizer: the optimizer (or a nullary function creating the optimizer) to use for the embedding variables.
global_optimizer: the optimizer (or a nullary function creating the optimizer) to use for the global variables.
polyak: bool, default True. If true, label predictions are made using an exponentially weighted moving average of
the global variables
pos_only_labels: bool, default False. If true, label predictions are trained using only vertices from the positive
sample
Returns
-------
node_classifier: function, to be passed as model_fn to a node classification tensorflow estimator
"""
if embedding_optimizer is None:
embedding_optimizer = _default_embedding_optimizer
if global_optimizer is None:
global_optimizer = _default_global_optimizer
def node_classifier(features, labels, mode, params):
""" The model function for the node classifier.
Parameters
----------
features: dictionary of graph attributes {edge list, weights, ids of sampled vertices},
and possibly additional vertex attributes
labels: dictionary of labels and friends. labels is tensor containing labels of the vertices in the sample
mode: the estimator mode in which this model function is invoked.
params: a dictionary of parameters.
Returns
-------
estimator_spec: the estimator spec for the given problem.
"""
vertex_index = features['vertex_index']
all_embeddings = _make_embedding_variable(params)
vertex_embedding_shape = tf.concat(
[tf.shape(vertex_index), [params['embedding_dim']]], axis=0,
name='vertex_embedding_shape')
# We flatten the vertex index prior to extracting embeddings
# to maintain compatibility with the input columns.
embeddings = tf.nn.embedding_lookup(all_embeddings, tf.reshape(vertex_index, [-1]))
embeddings = tf.reshape(embeddings, vertex_embedding_shape, name='vertex_embeddings_batch')
# Vertex Label Predictions
present_labels = labels['labels']
split = labels['split']
if pos_only_labels:
vert_is_positive = features['is_positive']
split = tf.where(tf.equal(vert_is_positive,1), split, -tf.ones_like(split))
with tf.variable_scope("label_logits"):
label_logits = make_label_logits(embeddings, features, mode, params)
# polyak averaging
label_ema_op, label_logits_predict = _make_polyak_averaging(
embeddings, features, label_logits, mode, polyak, make_label_logits, params)
predicted_labels = tf.cast(tf.greater(label_logits_predict, 0.), label_logits.dtype)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_labels,
'probabilities': tf.nn.sigmoid(label_logits_predict),
'label_logits': label_logits_predict,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# label loss
with tf.name_scope('label_loss', values=[label_logits, present_labels, split]):
label_pred_loss = make_label_pred_loss(
label_logits, present_labels,
tf.maximum(split, 0)) # clip the split, as -1 represents padded values.
label_pred_size = tf.shape(label_logits)[-1]
label_pred_loss_normalized = tf.divide(label_pred_loss, tf.to_float(label_pred_size))
# label logits and DeepWalk style prediction
present_logits = label_logits_predict
present_pred_labels = metrics.oracle_predictions(present_labels, present_logits)
if mode == tf.estimator.ModeKeys.EVAL:
# Metrics
estimator_metrics = {}
with tf.variable_scope('metrics_insample'):
estimator_metrics.update({
k + '_insample': v
for k, v in _make_metrics(
present_labels,
present_pred_labels,
split).items()
})
with tf.variable_scope('metrics_outsample'):
estimator_metrics.update({
k + '_outsample': v
for k, v in _make_metrics(
present_labels,
present_pred_labels,
(1 - split)).items()
})
return tf.estimator.EstimatorSpec(
mode, loss=label_pred_loss, eval_metric_ops=estimator_metrics)
# subgraph structure
edge_list = features['edge_list']
weights = features['weights'] # should be {0., 1.}
if weights.shape[-1].value == 1:
weights = tf.squeeze(weights, axis=-1)
n_vert = tf.shape(features['vertex_index'])
# Edge predictions
edge_logits = make_edge_logits(embeddings, features, edge_list, weights, params)
# edge loss
with tf.name_scope('edge_loss', values=[edge_logits, edge_list, weights]):
edge_pred_loss = make_edge_pred_loss(edge_logits, n_vert, edge_list, weights, params)
edge_pred_size = tf.shape(edge_logits)[-1]
edge_pred_loss_normalized = tf.divide(edge_pred_loss, tf.to_float(edge_pred_size))
reg_loss = tf.losses.get_regularization_loss()
loss = label_pred_loss + edge_pred_loss + reg_loss
tf.summary.scalar('label_loss', label_pred_loss, family='loss')
tf.summary.scalar('label_loss_normalized', label_pred_loss_normalized, family='loss')
tf.summary.scalar('edge_loss', edge_pred_loss, family='loss')
tf.summary.scalar('edge_loss_normalized', edge_pred_loss_normalized, family='loss')
tf.summary.scalar('regularization_loss', reg_loss, family='loss')
# Summaries
_make_label_prediction_summaries(present_labels, present_pred_labels, split)
# edge prediction summaries
predicted_edges = tf.cast(tf.greater(edge_logits, 0.), edge_logits.dtype)
kappa_batch_edges = metrics.batch_kappa(
weights, predicted_edges,
tf.to_float(tf.not_equal(weights, -1)), # -1 weight indicates padded edges
name='kappa_edges_in_batch'
)
tf.summary.scalar('kappa_batch_edges', kappa_batch_edges)
# dataset summaries
_make_dataset_summaries(features, mode)
# gradient updates
if mode == tf.estimator.ModeKeys.TRAIN:
batch_size = params['batch_size'] if params['batch_size'] is not None else 1
embedding_vars = [v for v in tf.trainable_variables() if "embedding" in v.name]
global_vars = [v for v in tf.trainable_variables() if "embedding" not in v.name]
global_step = tf.train.get_or_create_global_step()
update_global_step = tf.assign_add(global_step, batch_size, name="global_step_update")
embedding_optimizer_value = _get_value(embedding_optimizer)
global_optimizer_value = _get_value(global_optimizer)
if len(embedding_vars) > 0:
embedding_update = embedding_optimizer_value.minimize(
loss, var_list=embedding_vars, global_step=None)
else:
embedding_update = tf.identity(0.) # meaningless
if len(global_vars) > 0:
global_update = global_optimizer_value.minimize(
loss, var_list=global_vars, global_step=None)
else:
global_update = tf.identity(0.)
with tf.control_dependencies([update_global_step]):
basic_train_op = tf.group(embedding_update, global_update)
if polyak:
# update moving average of parameters after each gradient step
label_ema_op._add_control_input(basic_train_op)
train_op = label_ema_op
else:
train_op = basic_train_op
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
return node_classifier
|
[
"victorveitch@gmail.com"
] |
victorveitch@gmail.com
|
93e087b7f693c36ff5715732e59b91a7ec8fc247
|
fadef5f701c077644128d2006178c2d8a59c1eb8
|
/two_factor/models.py
|
dcf753aac05320016c711e6751a659e944edfc16
|
[
"MIT"
] |
permissive
|
smarthall/django-two-factor-auth
|
7080aab7a6e58158ba69d3859f53dfb32b10da08
|
4d526fb31719637cae392718bb662081060f6187
|
refs/heads/master
| 2021-01-15T10:58:39.286506
| 2014-02-19T03:15:33
| 2014-02-19T07:43:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,328
|
py
|
from binascii import unhexlify
import logging
from django.conf import settings
from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_otp import Device
from django_otp.oath import totp
from django_otp.util import hex_validator, random_hex
from .gateways import make_call, send_sms
phone_number_validator = RegexValidator(
regex='^(\+|00)',
message=_('Please enter a valid phone number, including your country code '
'starting with + or 00.'),
)
PHONE_METHODS = (
('call', _('Phone Call')),
('sms', _('Text Message')),
)
def get_available_phone_methods():
methods = []
if getattr(settings, 'TWO_FACTOR_CALL_GATEWAY', None):
methods.append(('call', _('Phone Call')))
if getattr(settings, 'TWO_FACTOR_SMS_GATEWAY', None):
methods.append(('sms', _('Text Message')))
return methods
def get_available_methods():
methods = [('generator', _('Token generator'))]
methods.extend(get_available_phone_methods())
return methods
logger = logging.getLogger(__name__)
class PhoneDevice(Device):
"""
Model with phone number and token seed linked to a user.
"""
number = models.CharField(max_length=16,
validators=[phone_number_validator],
verbose_name=_('number'))
key = models.CharField(max_length=40,
validators=[hex_validator()],
default=lambda: random_hex(20),
help_text="Hex-encoded secret key")
method = models.CharField(max_length=4, choices=PHONE_METHODS,
verbose_name=_('method'))
@property
def bin_key(self):
return unhexlify(self.key.encode())
def verify_token(self, token):
for drift in range(-5, 1):
if totp(self.bin_key, drift=drift) == token:
return True
return False
def generate_challenge(self):
"""
Sends the current TOTP token to `self.number` using `self.method`.
"""
token = '%06d' % totp(self.bin_key)
if self.method == 'call':
make_call(device=self, token=token)
else:
send_sms(device=self, token=token)
|
[
"bouke@webatoom.nl"
] |
bouke@webatoom.nl
|
2ab03535f0d53671a19e46cc4309c1ad53884dcd
|
bfae7dcb4cba2d9f034927c989e30b2cf89cc5c1
|
/leadmanager/accounts/serializers.py
|
02c2c28b14b4111b9efee316dde234b95598aaa1
|
[] |
no_license
|
rey-rubio/leads-manager
|
c67c19a1ab4e1afd4b62e02e8163e0e67b2557ec
|
41754f9990ab5c948e841a589a5ba7dc584a7cf9
|
refs/heads/master
| 2023-01-09T16:42:01.224234
| 2020-01-03T02:45:19
| 2020-01-03T02:45:19
| 230,825,516
| 0
| 0
| null | 2023-01-07T13:16:27
| 2019-12-30T01:24:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
from rest_framework import serializers
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
# User Serializer
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email')
# Register Serializer
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User.objects.create_user(
validated_data['username'], validated_data['email'], validated_data['password'])
return user
# Login Serializer
class LoginSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError("Sorry, Incorrect Credentials")
|
[
"reynerio.r.rubio@gmail.com"
] |
reynerio.r.rubio@gmail.com
|
d98e0aba480f0750ac88d41974c244ee064483e8
|
e0345c85abb33e59c0dd6cfa9274d9794d0db898
|
/venv/Scripts/pip3-script.py
|
213f347c644f9c0d6604cdef79d49daaab8b6056
|
[] |
no_license
|
tzuravner/Argus_Python
|
a6e79f2a037b9caa7e30a2f85e981a1539af23c1
|
8510ee87b7b5a9a4c8073f0836668beea9acbf7b
|
refs/heads/master
| 2020-03-20T18:25:45.735463
| 2018-06-16T15:22:21
| 2018-06-16T15:22:21
| 137,587,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
#!C:\Projects\PythonArgus\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
|
[
"noreply@github.com"
] |
tzuravner.noreply@github.com
|
d3c6686d0f203afd68f797901a169ea00538d252
|
25f7e68919e64e1371b81ba379066bcd01bf4375
|
/droxi/drox/omcdbase/minmax1/models.py
|
ba01eea5cd713e5d342815491bce5c8fa99a8188
|
[
"MIT"
] |
permissive
|
andydude/droxtools
|
5fa0233c7fe47379b8da1bcd9d06aced9af19638
|
d608ceb715908fb00398c0d28eee74286fef3750
|
refs/heads/master
| 2021-01-25T10:29:52.787506
| 2020-07-11T15:23:57
| 2020-07-11T15:23:57
| 14,605,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
'''
Created on Mar 31, 2014
@author: ajr
'''
from ..models import OMSym
@OMSym.called("minmax1", "max")
class Max(OMSym):
pass
@OMSym.called("minmax1", "min")
class Min(OMSym):
pass
|
[
"and_j_rob@yahoo.com"
] |
and_j_rob@yahoo.com
|
28b269aa583908671f826df2d728bc45e6b956bb
|
09e0da91b3eda6f8bf802e0ec17e2efa8c2eb279
|
/voter_guide/votes/views.py
|
30e85aef990b7ab2870959531dce1a0ca40d2780
|
[
"CC0-1.0"
] |
permissive
|
yaodehaung/councilor-voter-guide
|
fc5f61b50670a8566346184acbd12522bb8375fd
|
dc4cea97abc2e8c81c991df3eaa1983012bc4c7e
|
refs/heads/master
| 2021-01-01T06:37:30.827245
| 2017-07-17T09:08:01
| 2017-07-17T09:08:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,897
|
py
|
# -*- coding: utf-8 -*-
import operator
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.db.models import Q, F, Sum
from django.db import IntegrityError, transaction
from .models import Votes, Councilors_Votes
from councilors.models import CouncilorsDetail
from search.views import keyword_list, keyword_been_searched
from standpoints.models import Standpoints, User_Standpoint
from commontag.views import paginate
def select_county(request, index, county):
regions = [
{"region": "北部", "counties": ["臺北市", "新北市", "桃園市", "基隆市", "宜蘭縣", "新竹縣", "新竹市"]},
{"region": "中部", "counties": ["苗栗縣", "臺中市", "彰化縣", "雲林縣", "南投縣"]},
{"region": "南部", "counties": ["嘉義縣", "嘉義市", "臺南市", "高雄市", "屏東縣"]},
{"region": "東部", "counties": ["花蓮縣", "臺東縣"]},
{"region": "離島", "counties": ["澎湖縣", "金門縣", "連江縣"]}
]
return render(request, 'votes/select_county.html', {'index': index, 'regions': regions, 'category': 'votes'})
def votes(request, county, index='normal'):
result = None
qs = Q(sitting__county=county)
qs = qs & Q(conflict=True) if request.GET.get('conscience') else qs
if request.GET.get('tag'):
vote_ids = Standpoints.objects.filter(county=county, title=request.GET['tag']).values_list('vote', flat=True)
qs = qs & Q(uid__in=vote_ids)
keyword = request.GET.get('keyword', '')
if keyword:
votes = Votes.objects.filter(qs & reduce(operator.and_, (Q(content__icontains=x) for x in keyword.split()))).prefetch_related('standpoints').order_by('-date', 'vote_seq')
if votes:
keyword_been_searched(keyword, 'votes')
else:
votes = Votes.objects.filter(qs).prefetch_related('standpoints').order_by('-date', 'vote_seq')
votes = paginate(request, votes)
standpoints = Standpoints.objects.filter(county=county).values('title').annotate(pro_sum=Sum('pro')).order_by('-pro_sum').distinct()
return render(request,'votes/votes.html', {'county': county, 'votes': votes, 'index':index, 'keyword':keyword, 'result':result, 'hot_keyword': keyword_list('votes')[:5], 'hot_standpoints': standpoints[:5]})
def vote(request, vote_id):
vote = get_object_or_404(Votes.objects.select_related('sitting'), uid=vote_id)
if request.user.is_authenticated():
if request.POST:
with transaction.atomic():
if request.POST.get('keyword', '').strip():
standpoint_id = u'vote-%s-%s' % (vote_id, request.POST['keyword'].strip())
Standpoints.objects.get_or_create(uid=standpoint_id, county=vote.sitting.county, title=request.POST['keyword'].strip(), vote_id=vote_id)
elif request.POST.get('pro'):
User_Standpoint.objects.create(standpoint_id=request.POST['pro'], user=request.user)
Standpoints.objects.filter(uid=request.POST['pro']).update(pro=F('pro') + 1)
elif request.POST.get('against'):
User_Standpoint.objects.get(standpoint_id=request.POST['against'], user=request.user).delete()
Standpoints.objects.filter(uid=request.POST['against']).update(pro=F('pro') - 1)
standpoints_of_vote = Standpoints.objects.filter(vote_id=vote_id)\
.order_by('-pro')
if request.user.is_authenticated():
standpoints_of_vote = standpoints_of_vote.extra(select={
'have_voted': "SELECT true FROM standpoints_user_standpoint su WHERE su.standpoint_id = standpoints_standpoints.uid AND su.user_id = %s" % request.user.id,
},)
return render(request,'votes/vote.html', {'vote': vote, 'standpoints_of_vote': standpoints_of_vote})
|
[
"twly.tw@gmail.com"
] |
twly.tw@gmail.com
|
1939babf7c003032636663a04c198f0733a9159a
|
1c0015b0517e525c30cd38c466302fe2cdf55a12
|
/registros/migrations/0003_empresa_contato.py
|
d634a7cd0f04f6a3d8b7d26502e589c3147defdd
|
[
"MIT"
] |
permissive
|
pontual/sistema-v1
|
3e8a3f71c30be18b8140e1d2cf897e542d118d29
|
5f63d11083077035326787a56c3ae8b66ee2054c
|
refs/heads/master
| 2021-09-07T06:15:45.292127
| 2018-02-18T17:57:05
| 2018-02-18T17:57:05
| 119,609,237
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
# Generated by Django 2.0.1 on 2018-02-02 17:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registros', '0002_auto_20180130_2339'),
]
operations = [
migrations.AddField(
model_name='empresa',
name='contato',
field=models.CharField(blank=True, max_length=63),
),
]
|
[
"heitorchang@gmail.com"
] |
heitorchang@gmail.com
|
3e572755412c327354d0ccbf0330d7f72d0a7536
|
45de7d905486934629730945619f49281ad19359
|
/xlsxwriter/test/comparison/test_array_formula04.py
|
6ec76e91afcc459fc8122abb355867f73b554e69
|
[
"BSD-2-Clause"
] |
permissive
|
jmcnamara/XlsxWriter
|
599e1d225d698120ef931a776a9d93a6f60186ed
|
ab13807a1be68652ffc512ae6f5791d113b94ee1
|
refs/heads/main
| 2023-09-04T04:21:04.559742
| 2023-08-31T19:30:52
| 2023-08-31T19:30:52
| 7,433,211
| 3,251
| 712
|
BSD-2-Clause
| 2023-08-28T18:52:14
| 2013-01-04T01:07:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("array_formula04.xlsx")
self.ignore_files = [
"xl/calcChain.xml",
"[Content_Types].xml",
"xl/_rels/workbook.xml.rels",
]
def test_create_file(self):
"""Test the creation of an XlsxWriter file with an array formula."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_array_formula("A1:A3", "{=SUM(B1:C1*B2:C2)}", None, 0)
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
4cf0b5b194a65d99a8e0093ab549b9f8cbd88129
|
6495e68a5a8999661c15eb4456168868ea9ace76
|
/chp30page456中国银行股价数据与均线分析.py
|
dbe5db94c267cd2430c17d6ed43755a62e61da94
|
[] |
no_license
|
Curtis-Lau/Cailiduan-quantitative-investment-based-on-python
|
7be06b9a8e762cc4313d6fcf7bdde60926b7508c
|
96d4781ade85021263dea02733ef4198f2f3f53d
|
refs/heads/main
| 2023-06-06T00:59:08.095422
| 2021-06-29T03:04:38
| 2021-06-29T03:04:38
| 381,222,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 947
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import movingAverage as ma
ChinaBank = pd.read_csv("./datas/030/ChinaBank.csv")
ChinaBank.index = ChinaBank.iloc[:,1]
ChinaBank.index = pd.to_datetime(ChinaBank.index,format="%Y-%m-%d")
ChinaBank = ChinaBank.iloc[:,2:]
CBClose = ChinaBank["Close"]
Close15 = CBClose["2015"]
SMA10 = ma.SMAcal(Close15,10)
weight = np.array(range(1,11))/sum(range(1,11))
WMA10 = ma.WMAcal(Close15,weight)
expo = 2/(len(Close15)+1)
EMA10 = ma.EMAcal(Close15,10,expo)
plt.rcParams["font.sans-serif"] = ["SimHei"]
plt.rcParams["axes.unicode_minus"] = False
plt.plot(Close15[9:],label="Close",color="k")
plt.plot(SMA10[9:],label="SMA10",color="r",linestyle="dashed")
plt.plot(WMA10[9:],label="WMA10",color="b",linestyle=":")
plt.plot(EMA10[9:],label="EMA10",color="g",linestyle="-.")
plt.title("中国银行均线")
plt.ylim(3.5,5.5)
plt.legend()
plt.show()
|
[
"noreply@github.com"
] |
Curtis-Lau.noreply@github.com
|
d3f403ee24f18f0a7ec954eb15366e2f5dc5baeb
|
84d6858bc654522a1cbdb828c84256383c867c21
|
/t3.py
|
74ea7886e3eea6e457c4dbc81a8879cfcf066de4
|
[] |
no_license
|
amanmaldar/scripts
|
19fe656b9a2501aa25bf93cf7e489f6f0db23f80
|
2b6c1f667e4c5ae1988901c6b2fe3b4508ce2cf9
|
refs/heads/master
| 2020-04-07T03:36:01.642339
| 2018-11-26T23:03:24
| 2018-11-26T23:03:24
| 158,022,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,478
|
py
|
import subprocess
bashCommand = {}
showCommand = " ndnping /ndn/metrics/show -i 1 -c 1 -n 1234567 -o 1"
zeroCommand = " ndnping /ndn/metrics/zero -i 1 -c 1 -n 1234567 -o 1"
resultDir = " >> /home/lenovo/Dropbox/Thesis/Logs/minindn4/clientLogs_1.txt"
# sample test pattern
# pingServer = d ndnpingserver /ndn/d-site/d/prefix4/prefix5/prefix6/prefix7/prefix8/prefix9/prefix10 -x 1000000 &> /home/lenovo/Dropbox/Thesis/Logs/minindn3/serverLogs.txt &
# a ndnping /ndn/d-site/d/prefix4/prefix5/prefix6/prefix7/prefix8/prefix9/prefix10 -i 1 -c 1 -n 777777
print "Starting the pings"
interestPrefix = "/ndn/d-site/d/prefix4/prefix5/prefix6/prefix7/prefix8/prefix9/prefix10"
bashCommand[0] = zeroCommand
bashCommand[1] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 1" # request 5000 packets and call show
bashCommand[2] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 1" # request same 5000 packets and call show
bashCommand[3] = showCommand
bashCommand[4] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 5001" # load 5000 more packets nCS = 10000
bashCommand[5] = zeroCommand
bashCommand[6] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 10001" # request 5000 packets and call show
bashCommand[7] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 10001" # request 5000 packets and call show
bashCommand[8] = showCommand
bashCommand[9] = " ndnping " + interestPrefix + " -i 1 -c 10000 -n 15001" # load 10000 more packets nCS = 25000
bashCommand[10] = zeroCommand
bashCommand[11] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 25001" # request 5000 packets and call show
bashCommand[12] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 25001" # request 5000 packets and call show
bashCommand[13] = showCommand
bashCommand[14] = " ndnping " + interestPrefix + " -i 1 -c 10000 -n 30001" # load 10000 more packets nCS = 40000
bashCommand[15] = zeroCommand
bashCommand[16] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 40001" # request 5000 packets and call show
bashCommand[17] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 40001" # request 5000 packets and call show
bashCommand[18] = showCommand
bashCommand[19] = " ndnping " + interestPrefix + " -i 1 -c 10000 -n 45001" # load 10000 more packets nCS = 55000
bashCommand[20] = zeroCommand
bashCommand[21] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 55001" # request 5000 packets and call show
bashCommand[22] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 55001" # request 5000 packets and call show
bashCommand[23] = showCommand
bashCommand[24] = " ndnping " + interestPrefix + " -i 1 -c 5536 -n 60001" # load 5535 more packets nCS = 65535
bashCommand[25] = zeroCommand
bashCommand[26] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 66001" # request 5000 packets and call show
bashCommand[27] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 66001" # request 5000 packets and call show
bashCommand[28] = showCommand
bashCommand[29] = zeroCommand
bashCommand[30] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 73000" # request 5000 packets and call show
bashCommand[31] = " ndnping " + interestPrefix + " -i 1 -c 5000 -n 73000" # request 5000 packets and call show
bashCommand[32] = showCommand
for i, v in bashCommand.iteritems():
output = subprocess.call(['bash', '-c', bashCommand[i] + resultDir])
print "done",i+1
print "Get the results"
|
[
"amanmaldar@users.noreply.github.com"
] |
amanmaldar@users.noreply.github.com
|
7f7f27023e294a850db4affad162e048ad895b41
|
b8a6f36c474595f30f6143411010b90df51c2716
|
/mesh_tensorflow/auto_mtf/valid_layouts.py
|
cb9960ba817066f2e0af5ddf3ea47c0cc6c0b589
|
[
"Apache-2.0"
] |
permissive
|
VonRosenchild/mesh
|
2a56da5d0e2b086a2cca49185acff9adfb4fc376
|
d26470554e086ea02e64154194097fbf517232bd
|
refs/heads/master
| 2020-08-09T13:45:48.269905
| 2019-10-10T04:16:28
| 2019-10-10T04:16:50
| 214,100,095
| 1
| 1
|
Apache-2.0
| 2019-10-10T06:06:35
| 2019-10-10T06:06:35
| null |
UTF-8
|
Python
| false
| false
| 5,889
|
py
|
# coding=utf-8
# Copyright 2019 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check whether a layout is valid under Mesh TensorFlow.
Not all layouts can be used to lower a Mesh TensorFlow graph. Some Mesh
TensorFlow operations error when a certain Mesh TensorFlow dimension is assigned
to a mesh dimension (e.g. mtf.ConcatOperation with its concatenation dimension).
A Mesh TensorFlow dimension can only be assigned to a mesh dimension if the
former's size is evenly divisible by the latter's size. This module provides
methods to check these conditions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fractions
import re
class LayoutValidator(object):
"""Validates potential Mesh TensorFlow layouts.
Usage Example:
mtf_graph = mtf.Graph()
# Add operations to mtf_graph using Mesh TensorFlow.
mesh_shape = mtf.Shape([("m1", 4), ("m2", 2)])
layout_validator = valid_layouts.LayoutValidator(mtf_graph, mesh_shape)
print(layout_validator.splittable_mtf_dimension_names)
# Set of names of Mesh TensorFlow dimensions that may be assigned to mesh
# dimensions.
print(layout_validator.is_valid_assignment("batch", "m1"))
# Whether the 'batch' Mesh TensorFlow dimension may be assigned to the 'm1'
# mesh dimension. Unlike the previous method, this ensures that every
# occurrence of the 'batch' dimension has a size that is evenly divisible by
# the size of 'm1'.
Attributes:
splittable_mtf_dimension_names: a set(string) of the names of MTF dimensions
that may be assigned in a layout.
mesh_dimension_name_to_size: a {string: int}, mapping names of mesh
dimensions to their size.
"""
def __init__(self, mtf_graph, mesh_shape):
"""Initializer.
Args:
mtf_graph: an mtf.Graph, representing the Mesh TensorFlow computation of
interest.
mesh_shape: an mtf.Shape, representing the mesh of interest.
"""
self._splittable_mtf_dimension_names = self._initialize_splittable_dimensions(
mtf_graph)
self._mtf_dimension_name_to_size_gcd = (
self._initialize_mtf_dimension_name_to_size_gcd(mtf_graph))
self._mesh_dimension_name_to_size = self._initialize_mesh_dimension_name_to_size(
mesh_shape)
@property
def splittable_mtf_dimension_names(self):
return self._splittable_mtf_dimension_names
@property
def mesh_dimension_name_to_size(self):
return self._mesh_dimension_name_to_size
def is_valid_assignment(self, mtf_dimension_name, mesh_dimension_name):
"""Whether this MTF dimension may be assigned to this mesh dimension.
Args:
mtf_dimension_name: string, the name of a Mesh TensorFlow dimension.
mesh_dimension_name: string, the name of a mesh dimension.
Returns:
A boolean indicating whether the assignment is valid.
"""
return ((mtf_dimension_name in self._splittable_mtf_dimension_names) and
(self._mtf_dimension_name_to_size_gcd[mtf_dimension_name] %
self._mesh_dimension_name_to_size[mesh_dimension_name] == 0))
def _initialize_splittable_dimensions(self, mtf_graph):
"""Initializer for self._splittable_mtf_dimension_names.
Args:
mtf_graph: an mtf.Graph.
Returns:
A set(string) of the names of Mesh TensorFlow dimensions that may be
assigned in a layout.
"""
all_mtf_dimension_names = set() # set(string)
for mtf_operation in mtf_graph.operations:
for mtf_tensor in mtf_operation.outputs:
for mtf_dimension in mtf_tensor.shape.dims:
if not re.match(r"_anonymous_\d*", mtf_dimension.name):
all_mtf_dimension_names.add(mtf_dimension.name)
unsplittable_mtf_dimension_names = set() # set(string)
for mtf_operation in mtf_graph.operations:
unsplittable_mtf_dimension_names.update(mtf_operation.unsplittable_dims)
return all_mtf_dimension_names - unsplittable_mtf_dimension_names
def _initialize_mtf_dimension_name_to_size_gcd(self, mtf_graph):
"""Initializer for self._mtf_dimension_name_to_size_gcd.
Args:
mtf_graph: an mtf.Graph.
Returns:
A {string: int}, mapping the name of an MTF dimension to the greatest
common divisor of all the sizes it has. All these sizes being evenly
divisible by some x is equivalent to the GCD being divisible by x.
"""
mtf_dimension_name_to_size_gcd = {}
for mtf_operation in mtf_graph.operations:
for mtf_tensor in mtf_operation.outputs:
for mtf_dimension in mtf_tensor.shape.dims:
mtf_dimension_name_to_size_gcd[mtf_dimension.name] = fractions.gcd(
mtf_dimension_name_to_size_gcd.get(mtf_dimension.name,
mtf_dimension.size),
mtf_dimension.size)
return mtf_dimension_name_to_size_gcd
def _initialize_mesh_dimension_name_to_size(self, mesh_shape):
"""Initializer for self._mesh_dimension_name_to_size.
Args:
mesh_shape: an mtf.Shape.
Returns:
A {string: int} mapping mesh dimension names to their sizes.
"""
mesh_dimension_name_to_size = {} # {string: int}
for mesh_dimension in mesh_shape.dims:
mesh_dimension_name_to_size[mesh_dimension.name] = mesh_dimension.size
return mesh_dimension_name_to_size
|
[
"copybara-piper@google.com"
] |
copybara-piper@google.com
|
776726ee20c6fade53263387a84edc7556aaca3c
|
740ca7047790baddef29f7300f88b50c8435eeb0
|
/daycare_database/daycares/models.py
|
b6a4402087716152ef29edb5e189ba4df2098275
|
[] |
no_license
|
Demetricew20/Daycare_Capstone_Backend
|
f2fea0c88e0480e7ee9cb64ae5eaa8e7ebb25fff
|
91795a2d4a952a44a312825aa4d7d2e9d6bdc7b2
|
refs/heads/main
| 2023-05-14T15:01:54.269150
| 2021-06-05T01:48:04
| 2021-06-05T01:48:04
| 371,132,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,759
|
py
|
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
class AgeGroup(models.Model):
group_name = models.CharField(max_length=120, default=None)
class Daycare(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
name = models.CharField(max_length=120)
street_address = models.CharField(max_length=75, null=True)
city = models.CharField(max_length=50, null=True)
state = models.CharField(max_length=50, null=True)
zip_code = models.CharField(max_length=5, null=True)
images = models.ImageField(null=True, blank=True)
description = models.CharField(max_length=250)
avg_rating = models.IntegerField(default=0)
min_cost_infant = models.CharField(max_length=4, null=True, blank=True)
max_cost_infant = models.CharField(max_length=4, null=True, blank=True)
#Youth toddler
min_cost_youth_T = models.CharField(max_length=4, null=True, blank=True)
max_cost_youth_T = models.CharField(max_length=4, null=True, blank=True)
#Old toddler
min_cost_old_T = models.CharField(max_length=4, null=True, blank=True)
max_cost_old_T = models.CharField(max_length=4, null=True, blank=True)
#Preschooler
min_cost_preschool = models.CharField(max_length=4, null=True, blank=True)
max_cost_preschool = models.CharField(max_length=4, null=True, blank=True)
availability = models.BooleanField(default=True)
infant_group = models.BooleanField(default=False)
young_toddler_group = models.BooleanField(default=False)
older_toddler_group = models.BooleanField(default=False)
preschooler_group = models.BooleanField(default=False)
school_age_group = models.BooleanField(default=False)
age_groups = models.ManyToManyField(AgeGroup)
class Child(models.Model):
name = models.CharField(max_length=120, default=None)
age_group = models.ForeignKey(AgeGroup, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Parent(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
street_address = models.CharField(max_length=75, null=True)
city = models.CharField(max_length=50, null=True)
state = models.CharField(max_length=50, null=True)
zip_code = models.CharField(max_length=5, null=True)
selected_daycare = models.ManyToManyField(Daycare, blank=True)
child = models.ManyToManyField(Child)
class DaycareReview(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
daycare = models.ManyToManyField(Daycare, default=None)
review_text = models.CharField(max_length=120, null=True)
review_rating = models.IntegerField(default=0)
|
[
"demetrice_williams@hotmail.com"
] |
demetrice_williams@hotmail.com
|
2260c6c59eece6d7755ae73b1f775980e595dc17
|
2424b0c73e3edecc5a383edbfb94f429a2c9a1bd
|
/site-packages/statsmodels/tsa/varma_process.py
|
1469c12b6242e71e684b5826dcee5d1babbf88ca
|
[
"MIT"
] |
permissive
|
pulbrich/Pyto
|
74935a15dd569184a7b190096222bf6e9ad9d1dd
|
a5e39dd49fde3a4c9eb98da76468127821ec2c9e
|
refs/heads/master
| 2020-09-07T07:44:23.001629
| 2019-11-09T16:21:15
| 2019-11-09T16:21:15
| 220,707,906
| 0
| 0
|
MIT
| 2019-11-09T21:46:16
| 2019-11-09T21:46:15
| null |
UTF-8
|
Python
| false
| false
| 19,732
|
py
|
# -*- coding: utf-8 -*-
""" Helper and filter functions for VAR and VARMA, and basic VAR class
Created on Mon Jan 11 11:04:23 2010
Author: josef-pktd
License: BSD
This is a new version, I did not look at the old version again, but similar
ideas.
not copied/cleaned yet:
* fftn based filtering, creating samples with fft
* Tests: I ran examples but did not convert them to tests
examples look good for parameter estimate and forecast, and filter functions
main TODOs:
* result statistics
* see whether Bayesian dummy observation can be included without changing
the single call to linalg.lstsq
* impulse response function does not treat correlation, see Hamilton and jplv
Extensions
* constraints, Bayesian priors/penalization
* Error Correction Form and Cointegration
* Factor Models Stock-Watson, ???
see also VAR section in Notes.txt
"""
import numpy as np
from scipy import signal
from statsmodels.tsa.tsatools import lagmat
def varfilter(x, a):
'''apply an autoregressive filter to a series x
Warning: I just found out that convolve does not work as I
thought, this likely does not work correctly for
nvars>3
x can be 2d, a can be 1d, 2d, or 3d
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
a : array_like
autoregressive filter coefficients, ar lag polynomial
see Notes
Returns
-------
y : ndarray, 2d
filtered array, number of columns determined by x and a
Notes
-----
In general form this uses the linear filter ::
y = a(L)x
where
x : nobs, nvars
a : nlags, nvars, npoly
Depending on the shape and dimension of a this uses different
Lag polynomial arrays
case 1 : a is 1d or (nlags,1)
one lag polynomial is applied to all variables (columns of x)
case 2 : a is 2d, (nlags, nvars)
each series is independently filtered with its own
lag polynomial, uses loop over nvar
case 3 : a is 3d, (nlags, nvars, npoly)
the ith column of the output array is given by the linear filter
defined by the 2d array a[:,:,i], i.e. ::
y[:,i] = a(.,.,i)(L) * x
y[t,i] = sum_p sum_j a(p,j,i)*x(t-p,j)
for p = 0,...nlags-1, j = 0,...nvars-1,
for all t >= nlags
Note: maybe convert to axis=1, Not
TODO: initial conditions
'''
x = np.asarray(x)
a = np.asarray(a)
if x.ndim == 1:
x = x[:,None]
if x.ndim > 2:
raise ValueError('x array has to be 1d or 2d')
nvar = x.shape[1]
nlags = a.shape[0]
ntrim = nlags//2
# for x is 2d with ncols >1
if a.ndim == 1:
# case: identical ar filter (lag polynomial)
return signal.convolve(x, a[:,None], mode='valid')
# alternative:
#return signal.lfilter(a,[1],x.astype(float),axis=0)
elif a.ndim == 2:
if min(a.shape) == 1:
# case: identical ar filter (lag polynomial)
return signal.convolve(x, a, mode='valid')
# case: independent ar
#(a bit like recserar in gauss, but no x yet)
#(no, reserar is inverse filter)
result = np.zeros((x.shape[0]-nlags+1, nvar))
for i in range(nvar):
# could also use np.convolve, but easier for swiching to fft
result[:,i] = signal.convolve(x[:,i], a[:,i], mode='valid')
return result
elif a.ndim == 3:
# case: vector autoregressive with lag matrices
# Note: we must have shape[1] == shape[2] == nvar
yf = signal.convolve(x[:,:,None], a)
yvalid = yf[ntrim:-ntrim, yf.shape[1]//2,:]
return yvalid
def varinversefilter(ar, nobs, version=1):
'''creates inverse ar filter (MA representation) recursively
The VAR lag polynomial is defined by ::
ar(L) y_t = u_t or
y_t = -ar_{-1}(L) y_{t-1} + u_t
the returned lagpolynomial is arinv(L)=ar^{-1}(L) in ::
y_t = arinv(L) u_t
Parameters
----------
ar : array, (nlags,nvars,nvars)
matrix lagpolynomial, currently no exog
first row should be identity
Returns
-------
arinv : array, (nobs,nvars,nvars)
Notes
-----
'''
nlags, nvars, nvarsex = ar.shape
if nvars != nvarsex:
print('exogenous variables not implemented not tested')
arinv = np.zeros((nobs+1, nvarsex, nvars))
arinv[0,:,:] = ar[0]
arinv[1:nlags,:,:] = -ar[1:]
if version == 1:
for i in range(2,nobs+1):
tmp = np.zeros((nvars,nvars))
for p in range(1,nlags):
tmp += np.dot(-ar[p],arinv[i-p,:,:])
arinv[i,:,:] = tmp
if version == 0:
for i in range(nlags+1,nobs+1):
print(ar[1:].shape, arinv[i-1:i-nlags:-1,:,:].shape)
#arinv[i,:,:] = np.dot(-ar[1:],arinv[i-1:i-nlags:-1,:,:])
#print(np.tensordot(-ar[1:],arinv[i-1:i-nlags:-1,:,:],axes=([2],[1])).shape
#arinv[i,:,:] = np.tensordot(-ar[1:],arinv[i-1:i-nlags:-1,:,:],axes=([2],[1]))
raise NotImplementedError('waiting for generalized ufuncs or something')
return arinv
def vargenerate(ar, u, initvalues=None):
'''generate an VAR process with errors u
similar to gauss
uses loop
Parameters
----------
ar : array (nlags,nvars,nvars)
matrix lagpolynomial
u : array (nobs,nvars)
exogenous variable, error term for VAR
Returns
-------
sar : array (1+nobs,nvars)
sample of var process, inverse filtered u
does not trim initial condition y_0 = 0
Examples
--------
# generate random sample of VAR
nobs, nvars = 10, 2
u = numpy.random.randn(nobs,nvars)
a21 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0. ],
[ 0., -0.6]]])
vargenerate(a21,u)
# Impulse Response to an initial shock to the first variable
imp = np.zeros((nobs, nvars))
imp[0,0] = 1
vargenerate(a21,imp)
'''
nlags, nvars, nvarsex = ar.shape
nlagsm1 = nlags - 1
nobs = u.shape[0]
if nvars != nvarsex:
print('exogenous variables not implemented not tested')
if u.shape[1] != nvars:
raise ValueError('u needs to have nvars columns')
if initvalues is None:
sar = np.zeros((nobs+nlagsm1, nvars))
start = nlagsm1
else:
start = max(nlagsm1, initvalues.shape[0])
sar = np.zeros((nobs+start, nvars))
sar[start-initvalues.shape[0]:start] = initvalues
#sar[nlagsm1:] = u
sar[start:] = u
#if version == 1:
for i in range(start,start+nobs):
for p in range(1,nlags):
sar[i] += np.dot(sar[i-p,:],-ar[p])
return sar
def padone(x, front=0, back=0, axis=0, fillvalue=0):
'''pad with zeros along one axis, currently only axis=0
can be used sequentially to pad several axis
Examples
--------
>>> padone(np.ones((2,3)),1,3,axis=1)
array([[ 0., 1., 1., 1., 0., 0., 0.],
[ 0., 1., 1., 1., 0., 0., 0.]])
>>> padone(np.ones((2,3)),1,1, fillvalue=np.nan)
array([[ NaN, NaN, NaN],
[ 1., 1., 1.],
[ 1., 1., 1.],
[ NaN, NaN, NaN]])
'''
#primitive version
shape = np.array(x.shape)
shape[axis] += (front + back)
shapearr = np.array(x.shape)
out = np.empty(shape)
out.fill(fillvalue)
startind = np.zeros(x.ndim)
startind[axis] = front
endind = startind + shapearr
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
#print(myslice
#print(out.shape
#print(out[tuple(myslice)].shape
out[tuple(myslice)] = x
return out
def trimone(x, front=0, back=0, axis=0):
'''trim number of array elements along one axis
Examples
--------
>>> xp = padone(np.ones((2,3)),1,3,axis=1)
>>> xp
array([[ 0., 1., 1., 1., 0., 0., 0.],
[ 0., 1., 1., 1., 0., 0., 0.]])
>>> trimone(xp,1,3,1)
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
'''
shape = np.array(x.shape)
shape[axis] -= (front + back)
#print(shape, front, back
shapearr = np.array(x.shape)
startind = np.zeros(x.ndim)
startind[axis] = front
endind = startind + shape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
#print(myslice
#print(shape, endind
#print(x[tuple(myslice)].shape
return x[tuple(myslice)]
def ar2full(ar):
'''make reduced lagpolynomial into a right side lagpoly array
'''
nlags, nvar,nvarex = ar.shape
return np.r_[np.eye(nvar,nvarex)[None,:,:],-ar]
def ar2lhs(ar):
'''convert full (rhs) lagpolynomial into a reduced, left side lagpoly array
this is mainly a reminder about the definition
'''
return -ar[1:]
class _Var(object):
'''obsolete VAR class, use tsa.VAR instead, for internal use only
Examples
--------
>>> v = Var(ar2s)
>>> v.fit(1)
>>> v.arhat
array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.77784898, 0.01726193],
[ 0.10733009, -0.78665335]]])
'''
def __init__(self, y):
self.y = y
self.nobs, self.nvars = y.shape
def fit(self, nlags):
'''estimate parameters using ols
Parameters
----------
nlags : int
number of lags to include in regression, same for all variables
Returns
-------
None, but attaches
arhat : array (nlags, nvar, nvar)
full lag polynomial array
arlhs : array (nlags-1, nvar, nvar)
reduced lag polynomial for left hand side
other statistics as returned by linalg.lstsq : need to be completed
This currently assumes all parameters are estimated without restrictions.
In this case SUR is identical to OLS
estimation results are attached to the class instance
'''
self.nlags = nlags # without current period
nvars = self.nvars
#TODO: ar2s looks like a module variable, bug?
#lmat = lagmat(ar2s, nlags, trim='both', original='in')
lmat = lagmat(self.y, nlags, trim='both', original='in')
self.yred = lmat[:,:nvars]
self.xred = lmat[:,nvars:]
res = np.linalg.lstsq(self.xred, self.yred, rcond=-1)
self.estresults = res
self.arlhs = res[0].reshape(nlags, nvars, nvars)
self.arhat = ar2full(self.arlhs)
self.rss = res[1]
self.xredrank = res[2]
def predict(self):
'''calculate estimated timeseries (yhat) for sample
'''
if not hasattr(self, 'yhat'):
self.yhat = varfilter(self.y, self.arhat)
return self.yhat
def covmat(self):
''' covariance matrix of estimate
# not sure it's correct, need to check orientation everywhere
# looks ok, display needs getting used to
>>> v.rss[None,None,:]*np.linalg.inv(np.dot(v.xred.T,v.xred))[:,:,None]
array([[[ 0.37247445, 0.32210609],
[ 0.1002642 , 0.08670584]],
[[ 0.1002642 , 0.08670584],
[ 0.45903637, 0.39696255]]])
>>>
>>> v.rss[0]*np.linalg.inv(np.dot(v.xred.T,v.xred))
array([[ 0.37247445, 0.1002642 ],
[ 0.1002642 , 0.45903637]])
>>> v.rss[1]*np.linalg.inv(np.dot(v.xred.T,v.xred))
array([[ 0.32210609, 0.08670584],
[ 0.08670584, 0.39696255]])
'''
#check if orientation is same as self.arhat
self.paramcov = (self.rss[None,None,:] *
np.linalg.inv(np.dot(self.xred.T, self.xred))[:,:,None])
def forecast(self, horiz=1, u=None):
'''calculates forcast for horiz number of periods at end of sample
Parameters
----------
horiz : int (optional, default=1)
forecast horizon
u : array (horiz, nvars)
error term for forecast periods. If None, then u is zero.
Returns
-------
yforecast : array (nobs+horiz, nvars)
this includes the sample and the forecasts
'''
if u is None:
u = np.zeros((horiz, self.nvars))
return vargenerate(self.arhat, u, initvalues=self.y)
class VarmaPoly(object):
'''class to keep track of Varma polynomial format
Examples
--------
ar23 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.6, 0. ],
[ 0.2, -0.6]],
[[-0.1, 0. ],
[ 0.1, -0.1]]])
ma22 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[ 0.4, 0. ],
[ 0.2, 0.3]]])
'''
def __init__(self, ar, ma=None):
self.ar = ar
self.ma = ma
nlags, nvarall, nvars = ar.shape
self.nlags, self.nvarall, self.nvars = nlags, nvarall, nvars
self.isstructured = not (ar[0,:nvars] == np.eye(nvars)).all()
if self.ma is None:
self.ma = np.eye(nvars)[None,...]
self.isindependent = True
else:
self.isindependent = not (ma[0] == np.eye(nvars)).all()
self.malags = ar.shape[0]
self.hasexog = nvarall > nvars
self.arm1 = -ar[1:]
#@property
def vstack(self, a=None, name='ar'):
'''stack lagpolynomial vertically in 2d array
'''
if a is not None:
a = a
elif name == 'ar':
a = self.ar
elif name == 'ma':
a = self.ma
else:
raise ValueError('no array or name given')
return a.reshape(-1, self.nvarall)
#@property
def hstack(self, a=None, name='ar'):
'''stack lagpolynomial horizontally in 2d array
'''
if a is not None:
a = a
elif name == 'ar':
a = self.ar
elif name == 'ma':
a = self.ma
else:
raise ValueError('no array or name given')
return a.swapaxes(1,2).reshape(-1, self.nvarall).T
#@property
def stacksquare(self, a=None, name='ar', orientation='vertical'):
'''stack lagpolynomial vertically in 2d square array with eye
'''
if a is not None:
a = a
elif name == 'ar':
a = self.ar
elif name == 'ma':
a = self.ma
else:
raise ValueError('no array or name given')
astacked = a.reshape(-1, self.nvarall)
lenpk, nvars = astacked.shape #[0]
amat = np.eye(lenpk, k=nvars)
amat[:,:nvars] = astacked
return amat
#@property
def vstackarma_minus1(self):
'''stack ar and lagpolynomial vertically in 2d array
'''
a = np.concatenate((self.ar[1:], self.ma[1:]),0)
return a.reshape(-1, self.nvarall)
#@property
def hstackarma_minus1(self):
'''stack ar and lagpolynomial vertically in 2d array
this is the Kalman Filter representation, I think
'''
a = np.concatenate((self.ar[1:], self.ma[1:]),0)
return a.swapaxes(1,2).reshape(-1, self.nvarall)
def getisstationary(self, a=None):
'''check whether the auto-regressive lag-polynomial is stationary
Returns
-------
isstationary : bool
*attaches*
areigenvalues : complex array
eigenvalues sorted by absolute value
References
----------
formula taken from NAG manual
'''
if a is not None:
a = a
else:
if self.isstructured:
a = -self.reduceform(self.ar)[1:]
else:
a = -self.ar[1:]
amat = self.stacksquare(a)
ev = np.sort(np.linalg.eigvals(amat))[::-1]
self.areigenvalues = ev
return (np.abs(ev) < 1).all()
def getisinvertible(self, a=None):
'''check whether the auto-regressive lag-polynomial is stationary
Returns
-------
isinvertible : bool
*attaches*
maeigenvalues : complex array
eigenvalues sorted by absolute value
References
----------
formula taken from NAG manual
'''
if a is not None:
a = a
else:
if self.isindependent:
a = self.reduceform(self.ma)[1:]
else:
a = self.ma[1:]
if a.shape[0] == 0:
# no ma lags
self.maeigenvalues = np.array([], np.complex)
return True
amat = self.stacksquare(a)
ev = np.sort(np.linalg.eigvals(amat))[::-1]
self.maeigenvalues = ev
return (np.abs(ev) < 1).all()
def reduceform(self, apoly):
'''
this assumes no exog, todo
'''
if apoly.ndim != 3:
raise ValueError('apoly needs to be 3d')
nlags, nvarsex, nvars = apoly.shape
a = np.empty_like(apoly)
try:
a0inv = np.linalg.inv(a[0,:nvars, :])
except np.linalg.LinAlgError:
raise ValueError('matrix not invertible',
'ask for implementation of pinv')
for lag in range(nlags):
a[lag] = np.dot(a0inv, apoly[lag])
return a
if __name__ == "__main__":
# some example lag polynomials
a21 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0. ],
[ 0., -0.6]]])
a22 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0. ],
[ 0.1, -0.8]]])
a23 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.8, 0.2],
[ 0.1, -0.6]]])
a24 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.6, 0. ],
[ 0.2, -0.6]],
[[-0.1, 0. ],
[ 0.1, -0.1]]])
a31 = np.r_[np.eye(3)[None,:,:], 0.8*np.eye(3)[None,:,:]]
a32 = np.array([[[ 1. , 0. , 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]],
[[ 0.8, 0. , 0. ],
[ 0.1, 0.6, 0. ],
[ 0. , 0. , 0.9]]])
########
ut = np.random.randn(1000,2)
ar2s = vargenerate(a22,ut)
#res = np.linalg.lstsq(lagmat(ar2s,1)[:,1:], ar2s)
res = np.linalg.lstsq(lagmat(ar2s,1), ar2s, rcond=-1)
bhat = res[0].reshape(1,2,2)
arhat = ar2full(bhat)
#print(maxabs(arhat - a22)
v = _Var(ar2s)
v.fit(1)
v.forecast()
v.forecast(25)[-30:]
ar23 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-0.6, 0. ],
[ 0.2, -0.6]],
[[-0.1, 0. ],
[ 0.1, -0.1]]])
ma22 = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[ 0.4, 0. ],
[ 0.2, 0.3]]])
ar23ns = np.array([[[ 1. , 0. ],
[ 0. , 1. ]],
[[-1.9, 0. ],
[ 0.4, -0.6]],
[[ 0.3, 0. ],
[ 0.1, -0.1]]])
vp = VarmaPoly(ar23, ma22)
print(vars(vp))
print(vp.vstack())
print(vp.vstack(a24))
print(vp.hstackarma_minus1())
print(vp.getisstationary())
print(vp.getisinvertible())
vp2 = VarmaPoly(ar23ns)
print(vp2.getisstationary())
print(vp2.getisinvertible()) # no ma lags
|
[
"adrilabbelol@gmail.com"
] |
adrilabbelol@gmail.com
|
f9ff706e439e985b42bf93cd193a097823aecdbb
|
a0fcf3baeeb28be7b34c54c2e157fdeb7ba2870d
|
/Raspebrry pi/block.py
|
ad2de6d9811a546476901bbba74bf9e0513511d5
|
[] |
no_license
|
banzsolt/Tetris
|
a1b14054ab69306f037dc96149b149cfc0cb4c69
|
93db69151688ac9a04afe35031bfa9e49bc552fc
|
refs/heads/master
| 2016-08-11T22:19:09.722516
| 2015-11-05T10:12:54
| 2015-11-05T10:12:54
| 45,603,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
import random
__author__ = 'Zsolti'
class Block:
__config_shapes = {
'1':[[1, 1, 0],
[0, 1, 1]],
'2':[[0, 0, 1],
[1, 1, 1]],
'3':[[0, 1, 1],
[1, 1, 0]],
'4':[[0, 1, 0],
[1, 1, 1]],
'5':[[1, 1],
[1, 1]],
'6':[[1, 1, 1, 1]]
}
def __init__(self, x, y):
self.x = x
self.y = y
self.shape = self.__config_shapes[str(random.randint(1, 6))]
def rotate_right(self):
result = [[]]
for x in range (0, len(self.shape)):
for y in range(0, len(self.shape[0])):
result[y][x] = self.shape[x][y]
self.shape = result
def width(self):
return len(self.shape[0])
def height(self):
return len(self.shape)
def rotate_left(self):
self.rotate_right()
self.rotate_right()
self.rotate_right()
def move_left(self):
self.y -= 1
def move_right(self):
self.y += 1
def move_down(self):
self.x += 1
def set_x(self, newx):
self.x = newx
def set_y(self, newy):
self.y = newy
|
[
"banzsolt94@gmail.com"
] |
banzsolt94@gmail.com
|
7aa76cec3eeee6bf813a07cd2cc381d6d1cd9170
|
622658871f1256fbb959338fb1bf7bca187a3a81
|
/pdr/tests/test_MSL.py
|
397851a07aa4048c605418c054cc72a4b59b8446
|
[
"BSD-3-Clause"
] |
permissive
|
alemran042/pdr
|
9f1f0a27986f5bd75137b94068b9a27d8d7c89f4
|
6c9fe5e79868e679fbca978ee83b11273b35d1ed
|
refs/heads/master
| 2023-07-13T19:56:07.898649
| 2021-08-19T14:12:48
| 2021-08-19T14:12:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,703
|
py
|
""" Test performance for MSL data. """
import unittest
import pdr
# Cameras
class TestMD(unittest.TestCase):
def setUp(self):
pass
def test_md_rdr_1(self):
url = "http://pds-imaging.jpl.nasa.gov/data/msl/MSLMRD_0002/DATA/RDR/SURFACE/0000/0000MD0000000000100027C00_DRCL.IMG"
data = pdr.open(pdr.get(url))
self.assertEqual(data.IMAGE.shape[0],1533)
self.assertEqual(data.IMAGE.shape[1],2108)
self.assertEqual(data.IMAGE.shape[2],3)
self.assertEqual(len(data.LABEL),84)
def test_md_edr_1(self): # MSSS compressed format
url = "http://pds-imaging.jpl.nasa.gov/data/msl/MSLMRD_0002/DATA/EDR/SURFACE/0000/0000MD0000000000100027C00_XXXX.DAT"
#data = pdr.open(pdr.get(url))
#self.assertEqual(data.IMAGE.shape[0],1533)
#self.assertEqual(data.IMAGE.shape[1],2108)
#self.assertEqual(data.IMAGE.shape[2],3)
#self.assertEqual(len(data.LABEL),84)
suite = unittest.TestLoader().loadTestsFromTestCase(TestMD)
unittest.TextTestRunner(verbosity=2).run(suite)
class TestMastcam(unittest.TestCase):
def setUp(self):
pass
def test_mastcam_rdr_1(self):
url = "http://pds-imaging.jpl.nasa.gov/data/msl/MSLMST_0002/DATA/RDR/SURFACE/0025/0025ML0001270000100807E01_DRCL.IMG"
data = pdr.open(pdr.get(url))
self.assertEqual(data.IMAGE.shape[0],1208)
self.assertEqual(data.IMAGE.shape[1],1208)
self.assertEqual(data.IMAGE.shape[2],3)
self.assertEqual(len(data.LABEL),84)
def test_mastcam_edr_1(self): # MSSS compressed format
url = "http://pds-imaging.jpl.nasa.gov/data/msl/MSLMST_0002/DATA/EDR/SURFACE/0025/0025ML0001270000100807E01_XXXX.DAT"
#data = pdr.open(pdr.get(url))
#self.assertEqual(data.IMAGE.shape[0],1533)
#self.assertEqual(data.IMAGE.shape[1],2108)
#self.assertEqual(data.IMAGE.shape[2],3)
#self.assertEqual(len(data.LABEL),84)
suite = unittest.TestLoader().loadTestsFromTestCase(TestMastcam)
unittest.TextTestRunner(verbosity=2).run(suite)
class TestMAHLI(unittest.TestCase):
def setUp(self):
pass
def test_mahli_rdr_1(self):
url = "http://pds-imaging.jpl.nasa.gov/data/msl/MSLMHL_0002/DATA/RDR/SURFACE/0047/0047MH0000110010100214C00_DRCL.IMG"
data = pdr.open(pdr.get(url))
self.assertEqual(data.IMAGE.shape[0],1198)
self.assertEqual(data.IMAGE.shape[1],1646)
self.assertEqual(data.IMAGE.shape[2],3)
self.assertEqual(len(data.LABEL),84)
def test_mahli_edr_1(self): # MSSS compressed format
url = "http://pds-imaging.jpl.nasa.gov/data/msl/MSLMHL_0002/DATA/EDR/SURFACE/0047/0047MH0000110010100214C00_XXXX.DAT"
#data = pdr.open(pdr.get(url))
#self.assertEqual(data.IMAGE.shape[0],1533)
#self.assertEqual(data.IMAGE.shape[1],2108)
#self.assertEqual(data.IMAGE.shape[2],3)
#self.assertEqual(len(data.LABEL),84)
suite = unittest.TestLoader().loadTestsFromTestCase(TestMAHLI)
unittest.TextTestRunner(verbosity=2).run(suite)
class TestHazcam(unittest.TestCase):
def setUp(self):
pass
def test_hazcam_edr_1(self):
url = "http://pds-imaging.jpl.nasa.gov/data/msl/MSLHAZ_0XXX/DATA/SOL00382/FLB_431397159EDR_F0141262FHAZ00323M1.IMG"
data = pdr.open(pdr.get(url))
self.assertEqual(data.IMAGE.shape[0],1024)
self.assertEqual(data.IMAGE.shape[1],1024)
self.assertEqual(len(data.LABEL),102)
self.assertEqual(len(data.IMAGE_HEADER),374)
suite = unittest.TestLoader().loadTestsFromTestCase(TestHazcam)
unittest.TextTestRunner(verbosity=2).run(suite)
class TestNavcam(unittest.TestCase):
def setUp(self):
pass
def test_navcam_ecs_1(self): # 1-pixel tall image???
url = "http://pds-imaging.jpl.nasa.gov/data/msl/MSLNAV_0XXX/DATA/SOL00002/NLA_397671934ECS_N0010008AUT_04096M1.IMG"
data = pdr.open(pdr.get(url))
self.assertEqual(data.IMAGE.shape[0],1)
self.assertEqual(data.IMAGE.shape[1],1024)
self.assertEqual(len(data.LABEL),101)
self.assertEqual(len(data.IMAGE_HEADER),357)
def test_navcam_edr_1(self):
url = "http://pds-imaging.jpl.nasa.gov/data/msl/MSLMOS_1XXX/DATA/SOL00012/N_A000_0012XEDR003CYPTUM0004XTOPMTM1.IMG"
data = pdr.open(pdr.get(url))
self.assertEqual(data.IMAGE.shape[0],3)
self.assertEqual(data.IMAGE.shape[1],3337)
self.assertEqual(data.IMAGE.shape[2],7824)
self.assertEqual(len(data.LABEL),29)
self.assertEqual(len(data.IMAGE_HEADER),126)
suite = unittest.TestLoader().loadTestsFromTestCase(TestNavcam)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
""
] | |
f89539b48d721cc747eb17f11ec68426d0c6cfe4
|
6d784a0f475b519a8f6955dd49c244f8b816870a
|
/Code/server/Server/myprojectenv/lib/python3.7/site-packages/pip/_internal/resolution/legacy/resolver.py
|
28793672026ce82a9dde1ca739c9473c8dd4b411
|
[] |
no_license
|
sebuaa2020/Team209
|
c0ffa26a712314ef275c8b994cfe1fd4c842c7b6
|
c64766cffb1741503a43b1c6a2a01eb6ee4401e1
|
refs/heads/master
| 2021-02-27T10:27:55.496718
| 2020-06-15T12:37:14
| 2020-06-15T12:37:14
| 245,599,132
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,046
|
py
|
"""Dependency Resolution
The dependency resolution in pip is performed as follows:
for top-level requirements:
a. only one spec allowed per project, regardless of conflicts or not.
otherwise a "double requirement" exception is raised
b. they override sub-dependency requirements.
for sub-dependencies
a. "first found, wins" (where the order is breadth first)
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
# mypy: disallow-untyped-defs=False
import logging
import sys
from collections import defaultdict
from itertools import chain
from pip._vendor.packaging import specifiers
from pip._internal.exceptions import (
BestVersionAlreadyInstalled,
DistributionNotFound,
HashError,
HashErrors,
UnsupportedPythonVersion,
)
from pip._internal.req.req_set import RequirementSet
from pip._internal.resolution.base import BaseResolver
from pip._internal.utils.compatibility_tags import get_supported
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import dist_in_usersite, normalize_version_info
from pip._internal.utils.packaging import (
check_requires_python,
get_requires_python,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import DefaultDict, List, Optional, Set, Tuple
from pip._vendor import pkg_resources
from pip._internal.cache import WheelCache
from pip._internal.distributions import AbstractDistribution
from pip._internal.index.package_finder import PackageFinder
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req.req_install import InstallRequirement
from pip._internal.resolution.base import InstallRequirementProvider
DiscoveredDependencies = DefaultDict[str, List[InstallRequirement]]
logger = logging.getLogger(__name__)
def _check_dist_requires_python(
dist, # type: pkg_resources.Distribution
version_info, # type: Tuple[int, int, int]
ignore_requires_python=False, # type: bool
):
# type: (...) -> None
"""
Check whether the given Python version is compatible with a distribution's
"Requires-Python" value.
:param version_info: A 3-tuple of ints representing the Python
major-minor-micro version to check.
:param ignore_requires_python: Whether to ignore the "Requires-Python"
value if the given Python version isn't compatible.
:raises UnsupportedPythonVersion: When the given Python version isn't
compatible.
"""
requires_python = get_requires_python(dist)
try:
is_compatible = check_requires_python(
requires_python, version_info=version_info,
)
except specifiers.InvalidSpecifier as exc:
logger.warning(
"Package %r has an invalid Requires-Python: %s",
dist.project_name, exc,
)
return
if is_compatible:
return
version = '.'.join(map(str, version_info))
if ignore_requires_python:
logger.debug(
'Ignoring failed Requires-Python check for package %r: '
'%s not in %r',
dist.project_name, version, requires_python,
)
return
raise UnsupportedPythonVersion(
'Package {!r} requires a different Python: {} not in {!r}'.format(
dist.project_name, version, requires_python,
))
class Resolver(BaseResolver):
"""Resolves which packages need to be installed/uninstalled to perform \
the requested operation without breaking the requirements of any package.
"""
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
def __init__(
self,
preparer, # type: RequirementPreparer
finder, # type: PackageFinder
wheel_cache, # type: Optional[WheelCache]
make_install_req, # type: InstallRequirementProvider
use_user_site, # type: bool
ignore_dependencies, # type: bool
ignore_installed, # type: bool
ignore_requires_python, # type: bool
force_reinstall, # type: bool
upgrade_strategy, # type: str
py_version_info=None, # type: Optional[Tuple[int, ...]]
):
# type: (...) -> None
super(Resolver, self).__init__()
assert upgrade_strategy in self._allowed_strategies
if py_version_info is None:
py_version_info = sys.version_info[:3]
else:
py_version_info = normalize_version_info(py_version_info)
self._py_version_info = py_version_info
self.preparer = preparer
self.finder = finder
self.wheel_cache = wheel_cache
self.upgrade_strategy = upgrade_strategy
self.force_reinstall = force_reinstall
self.ignore_dependencies = ignore_dependencies
self.ignore_installed = ignore_installed
self.ignore_requires_python = ignore_requires_python
self.use_user_site = use_user_site
self._make_install_req = make_install_req
self._discovered_dependencies = \
defaultdict(list) # type: DiscoveredDependencies
def resolve(self, root_reqs, check_supported_wheels):
# type: (List[InstallRequirement], bool) -> RequirementSet
"""Resolve what operations need to be done
As a side-effect of this method, the packages (and their dependencies)
are downloaded, unpacked and prepared for installation. This
preparation is done by ``pip.operations.prepare``.
Once PyPI has static dependency metadata available, it would be
possible to move the preparation to become a step separated from
dependency resolution.
"""
requirement_set = RequirementSet(
check_supported_wheels=check_supported_wheels
)
for req in root_reqs:
requirement_set.add_requirement(req)
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# _populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = [] # type: List[InstallRequirement]
hash_errors = HashErrors()
for req in chain(root_reqs, discovered_reqs):
try:
discovered_reqs.extend(self._resolve_one(requirement_set, req))
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors
return requirement_set
def _is_upgrade_allowed(self, req):
# type: (InstallRequirement) -> bool
if self.upgrade_strategy == "to-satisfy-only":
return False
elif self.upgrade_strategy == "eager":
return True
else:
assert self.upgrade_strategy == "only-if-needed"
return req.is_direct
def _set_req_to_reinstall(self, req):
# type: (InstallRequirement) -> None
"""
Set a requirement to be installed.
"""
# Don't uninstall the conflict if doing a user install and the
# conflict is not a user install.
if not self.use_user_site or dist_in_usersite(req.satisfied_by):
req.should_reinstall = True
req.satisfied_by = None
def _check_skip_installed(self, req_to_install):
# type: (InstallRequirement) -> Optional[str]
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
if self.ignore_installed:
return None
req_to_install.check_if_exists(self.use_user_site)
if not req_to_install.satisfied_by:
return None
if self.force_reinstall:
self._set_req_to_reinstall(req_to_install)
return None
if not self._is_upgrade_allowed(req_to_install):
if self.upgrade_strategy == "only-if-needed":
return 'already satisfied, skipping upgrade'
return 'already satisfied'
# Check for the possibility of an upgrade. For link-based
# requirements we have to pull the tree down and inspect to assess
# the version #, so it's handled way down.
if not req_to_install.link:
try:
self.finder.find_requirement(req_to_install, upgrade=True)
except BestVersionAlreadyInstalled:
# Then the best version is installed.
return 'already up-to-date'
except DistributionNotFound:
# No distribution found, so we squash the error. It will
# be raised later when we re-try later to do the install.
# Why don't we just raise here?
pass
self._set_req_to_reinstall(req_to_install)
return None
def _populate_link(self, req):
# type: (InstallRequirement) -> None
"""Ensure that if a link can be found for this, that it is found.
Note that req.link may still be None - if the requirement is already
installed and not needed to be upgraded based on the return value of
_is_upgrade_allowed().
If preparer.require_hashes is True, don't use the wheel cache, because
cached wheels, always built locally, have different hashes than the
files downloaded from the index server and thus throw false hash
mismatches. Furthermore, cached wheels at present have undeterministic
contents due to file modification times.
"""
upgrade = self._is_upgrade_allowed(req)
if req.link is None:
req.link = self.finder.find_requirement(req, upgrade)
if self.wheel_cache is None or self.preparer.require_hashes:
return
cache_entry = self.wheel_cache.get_cache_entry(
link=req.link,
package_name=req.name,
supported_tags=get_supported(),
)
if cache_entry is not None:
logger.debug('Using cached wheel link: %s', cache_entry.link)
if req.link is req.original_link and cache_entry.persistent:
req.original_link_is_in_wheel_cache = True
req.link = cache_entry.link
def _get_abstract_dist_for(self, req):
# type: (InstallRequirement) -> AbstractDistribution
"""Takes a InstallRequirement and returns a single AbstractDist \
representing a prepared variant of the same.
"""
if req.editable:
return self.preparer.prepare_editable_requirement(req)
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req.satisfied_by is None
skip_reason = self._check_skip_installed(req)
if req.satisfied_by:
return self.preparer.prepare_installed_requirement(
req, skip_reason
)
# We eagerly populate the link, since that's our "legacy" behavior.
self._populate_link(req)
abstract_dist = self.preparer.prepare_linked_requirement(req)
# NOTE
# The following portion is for determining if a certain package is
# going to be re-installed/upgraded or not and reporting to the user.
# This should probably get cleaned up in a future refactor.
# req.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req.check_if_exists(self.use_user_site)
if req.satisfied_by:
should_modify = (
self.upgrade_strategy != "to-satisfy-only" or
self.force_reinstall or
self.ignore_installed or
req.link.scheme == 'file'
)
if should_modify:
self._set_req_to_reinstall(req)
else:
logger.info(
'Requirement already satisfied (use --upgrade to upgrade):'
' %s', req,
)
return abstract_dist
def _resolve_one(
self,
requirement_set, # type: RequirementSet
req_to_install, # type: InstallRequirement
):
# type: (...) -> List[InstallRequirement]
"""Prepare a single requirements file.
:return: A list of additional InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
abstract_dist = self._get_abstract_dist_for(req_to_install)
# Parse and return dependencies
dist = abstract_dist.get_pkg_resources_distribution()
# This will raise UnsupportedPythonVersion if the given Python
# version isn't compatible with the distribution's Requires-Python.
_check_dist_requires_python(
dist, version_info=self._py_version_info,
ignore_requires_python=self.ignore_requires_python,
)
more_reqs = [] # type: List[InstallRequirement]
def add_req(subreq, extras_requested):
sub_install_req = self._make_install_req(
str(subreq),
req_to_install,
)
parent_req_name = req_to_install.name
to_scan_again, add_to_parent = requirement_set.add_requirement(
sub_install_req,
parent_req_name=parent_req_name,
extras_requested=extras_requested,
)
if parent_req_name and add_to_parent:
self._discovered_dependencies[parent_req_name].append(
add_to_parent
)
more_reqs.extend(to_scan_again)
with indent_log():
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not requirement_set.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
# 'unnamed' requirements can only come from being directly
# provided by the user.
assert req_to_install.is_direct
requirement_set.add_requirement(
req_to_install, parent_req_name=None,
)
if not self.ignore_dependencies:
if req_to_install.extras:
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq, extras_requested=available_requested)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
req_to_install.successfully_downloaded = True
return more_reqs
def get_installation_order(self, req_set):
# type: (RequirementSet) -> List[InstallRequirement]
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set() # type: Set[InstallRequirement]
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._discovered_dependencies[req.name]:
schedule(dep)
order.append(req)
for install_req in req_set.requirements.values():
schedule(install_req)
return order
|
[
"598174480@qq.com"
] |
598174480@qq.com
|
c22d10574ff57db4fb4cb755e274a2bd46027012
|
1d9d196e4a88f8da6acbabf3e7ccab00a87ef172
|
/Edge & line Detection/task2.py
|
b4d94b84557695a035fe9cb40886f6b494cdb7ac
|
[] |
no_license
|
AhmedAdel21/Computer-Vision
|
15fcfb206e5451572ce03a0c0c04b1d5155eb609
|
8cf5ffde2dcdc53c854eb3db2686083cbb6835df
|
refs/heads/main
| 2023-07-11T19:55:46.434591
| 2021-08-14T18:09:05
| 2021-08-14T18:09:05
| 396,089,194
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,641
|
py
|
# from UI import *
from numpy.core.fromnumeric import shape
import pyqtgraph as pg
from PyQt5 import QtCore, QtGui,QtWidgets
from cv2 import cv2 as cv
from math import sqrt
import numpy as np
from PIL import Image
import matplotlib as plt
import random
from UI import Ui_MainWindow
from lines_hough import hough_lines
import snake as sn
import canny
# from collections import Counter # Replaced
class GUI(Ui_MainWindow):
def __init__(self,MainWindow):
super(GUI,self).setupUi(MainWindow)
self.images=[self.cannyInputImage,self.cannyOutputImage,
self.activeContoursInputImage,self.activeContoursOutputImage]
#removing unwanted options from the image display widget
for i in range(len(self.images)):
self.images[i].ui.histogram.hide()
self.images[i].ui.roiPlot.hide()
self.images[i].ui.roiBtn.hide()
self.images[i].ui.menuBtn.hide()
self.images[i].view.setContentsMargins(0,0,0,0)
self.images[i].view.setAspectLocked(False)
self.images[i].view.setRange(xRange=[0,100],yRange=[0,100], padding=0)
#retrieve the original image data
<<<<<<< HEAD
hough_lines("linesInput.jpg")
=======
>>>>>>> f84900ccb35d78754c2205ba7f275868319481b6
# Active contour
self.snakeContour()
######################################################################################################
# DoLa
def snakeContour(self):
img = np.load('./img.npy')
t = np.arange(0, 2*np.pi, 0.1)
x = 120+50*np.cos(t)
y = 140+60*np.sin(t)
alpha = 0.001
beta = 0.4
gamma = 100
iterations = 50
# fx and fy are callable functions
fx, fy = sn.create_external_edge_force_gradients_from_img( img )
snakes = sn.iterate_snake(
x = x,
y = y,
a = alpha,
b = beta,
fx = fx,
fy = fy,
gamma = gamma,
n_iters = iterations,
return_all = True
)
self.activeContoursInputImage.setImage(img,xvals=np.linspace(1., 3., img.shape[0]))
# self.activeContoursOutputImage.setImage(img,xvals=np.linspace(1., 3., img.shape[0]))
fig = plt.pyplot.figure()
ax = fig.add_subplot()
ax.imshow(img)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(0,img.shape[1])
ax.set_ylim(img.shape[0],0)
ax.plot(np.r_[x,x[0]], np.r_[y,y[0]], c=(0,1,0), lw=2)
for i, snake in enumerate(snakes):
if i % 10 == 0:
ax.plot(np.r_[snake[0], snake[0][0]], np.r_[snake[1], snake[1][0]], c=(0,0,1), lw=2)
# Plot the last one a different color.
ax.plot(np.r_[snakes[-1][0], snakes[-1][0][0]], np.r_[snakes[-1][1], snakes[-1][1][0]], c=(1,0,0), lw=2)
plt.pyplot.savefig('snake.jpg')
outImg = cv.imread('./snake.jpg')
self.activeContoursOutputImage.setImage(outImg)
cny_img_in = cv.imread('CannyInput.jpg')
self.cannyInputImage.setImage(cny_img_in.T)
cny_img_out = canny.canny_apply("CannyInput.jpg")
# print(type(np.asarray(cny_img_out)))
self.cannyOutputImage.setImage(np.asarray(cny_img_out).T)
######################################################################################################
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = GUI(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
[
"ahmed12365488@gmail.com"
] |
ahmed12365488@gmail.com
|
01fecacfcdbde9e122e2c0c0ddf10db7ae37ec01
|
67651f8324395e5ef00cdaba3fd4e3f3c9787f9a
|
/dt-core/packages/slim_parking/src/graph_node.py
|
fb3b82b2048cba3fa4d808126726f45422896709
|
[] |
no_license
|
chiaracaste/duckietown-gym_src
|
9fdb0d6906af4578daa231bacbd1728da031c5fa
|
538f620b7b95ea945a5efc6366df4403d20aba93
|
refs/heads/main
| 2022-12-27T21:54:14.432459
| 2020-10-06T21:23:25
| 2020-10-06T21:23:25
| 301,855,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,110
|
py
|
#!/usr/bin/env python
from duckietown_msgs.msg import LanePose, FSMState
import rospy
import matplotlib.pyplot as plt
import numpy as np
class GraphNode():
def __init__(self):
self.active = False
self.d_lst = []
self.phi_lst = []
self.subMode = rospy.Subscriber("/default/fsm_node/mode",FSMState, self.updateState)
self.getError = rospy.Subscriber("/default/lane_filter_node/lane_pose", LanePose, self.updateErrorArray)
def updateErrorArray(self,msg):
if self.active:
self.d_lst.append(msg.d)
self.phi_lst.append(msg.phi)
def updateState(self,msg):
if msg.state == "EXITING_FROM_PARKING":
self.active = True
else:
if self.active:
self.printAndClose()
def printAndClose(self):
fileD = open('d_exit.txt','a')
for element in self.d_lst:
fileD.write(str(element))
fileD.write('\t')
fileD.write('\n')
fileD.close()
filePhi = open('angolo_exit.txt','a')
for element in self.phi_lst:
filePhi.write(str(element))
filePhi.write('\t')
filePhi.write('\n')
filePhi.close()
t = np.arange(0, len(self.d_lst), 1) # see also linspace
#nc = len(t)
#self.e = np.zeros(nc)
rospy.loginfo("DONEEEEE")
fig_1 = plt.figure(1)
plt.plot(t, self.d_lst, label='Errore')
plt.title('Errore', fontsize=12)
plt.xlabel('t')
plt.ylabel('e')
plt.grid(True)
plt.legend()
# plt.show()
plt.savefig('errore.png')
self.active = False
#self.lst.clear(self)
del self.d_lst[:]
del self.phi_lst[:]
def onShutdown(self):
rospy.loginfo("[GraphNode] Shutdown.")
def loginfo(self, s):
rospy.loginfo('[%s] %s' % (self.node_name, s))
if __name__ == '__main__':
rospy.init_node('graph_node', anonymous=False)
graph_node_class = GraphNode()
rospy.on_shutdown(graph_node_class.onShutdown)
rospy.spin()
|
[
"chiaracastellani@live.it"
] |
chiaracastellani@live.it
|
819779f414c01d888fd28cc064fc58a21a4b4dc8
|
d05233eaf44c62ca82a669980f91e6c16900e017
|
/tests/preprocessing_test.py
|
27176831f4f8c41ab2b2d2557a413920cc5a7a6a
|
[
"Apache-2.0"
] |
permissive
|
ayazskhan/vqf
|
36caf2f6d0d89d8e6a83d8a4ee2a8241545f4b15
|
d664437c8fbd9306ef154243edf2d376128eb044
|
refs/heads/master
| 2020-11-24T17:15:55.630208
| 2019-09-14T20:16:04
| 2019-09-14T20:16:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,434
|
py
|
import pytest
from vqf import preprocessing
from sympy import symbols
import pdb
def test_apply_z_rule_1():
## Given
known_expressions = {}
q, p, z = symbols('q p z')
clause = q + p - 1 - 2*z
## When
known_expressions = preprocessing.apply_z_rule_1(clause, known_expressions)
## Then
assert known_expressions[z] == 0
## Given
known_expressions = {}
q_0, q_1, p_0, p_1, z_0, z_1 = symbols('q_0 q_1 p_0 p_1 z_0 z_1')
clause = q_0 + q_1 + p_0 + p_1 - 2*z_0 - 4*z_1 - 1
## When
known_expressions = preprocessing.apply_z_rule_1(clause, known_expressions)
## Then
assert len(known_expressions) == 1
assert known_expressions[z_1] == 0
## Given
known_expressions = {}
q, p, z = symbols('q p z')
clause = q + p - 2*z
## When
known_expressions = preprocessing.apply_z_rule_1(clause, known_expressions)
## Then
assert len(known_expressions) == 0
## Given
known_expressions = {}
q_0, q_1, q_2, z_0 = symbols('q_0 q_1 q_2 z_0')
clause = q_0 + 2*q_1 - p_0 - 2*z_0
## When
known_expressions = preprocessing.apply_z_rule_1(clause, known_expressions)
## Then
assert len(known_expressions) == 0
## Given
known_expressions = {}
q_0, q_1, q_2, z_0 = symbols('q_0 q_1 q_2 z_0')
clause = q_0 + 2*q_1 - p_0 - 4*z_0
## When
known_expressions = preprocessing.apply_z_rule_1(clause, known_expressions)
## Then
assert len(known_expressions) == 1
assert known_expressions[z_0] == 0
def test_apply_z_rule_2():
## Given
known_expressions = {}
q, p, z = symbols('q p z')
clause = q + p - 2*z
## When
known_expressions = preprocessing.apply_z_rule_2(clause, known_expressions)
## Then
assert known_expressions[p] == q
assert known_expressions[z] == q
## Given
known_expressions = {}
q, p, z = symbols('q p z')
clause = q + 2*p - 2*z
## When
known_expressions = preprocessing.apply_z_rule_2(clause, known_expressions)
## Then
assert known_expressions[q] == 0
## Given
known_expressions = {}
q, z = symbols('q z')
clause = q + 1 - 2*z
## When
known_expressions = preprocessing.apply_z_rule_2(clause, known_expressions)
## Then
assert known_expressions[q] == 1
#TODO:
# assert known_expressions[z] == 1
## Given
known_expressions = {}
q_0, q_1, p_0, p_1, z_0, z_1 = symbols('q_0 q_1 p_0 p_1 z_0 z_1')
clause = q_0 + p_0 + 2*q_1 + 2*p_1 - 2*z_0 - 4*z_1
## When
known_expressions = preprocessing.apply_z_rule_2(clause, known_expressions)
## Then
assert known_expressions[p_0] == q_0
## Given
known_expressions = {}
q_0, q_1, p_0, z_0 = symbols('q_0 q_1 p_0 z_0')
clause = q_0 + p_0 + 2*q_1 - 2*z_0 - 1
## When
known_expressions = preprocessing.apply_z_rule_2(clause, known_expressions)
## Then
assert known_expressions[q_0*p_0] == 0
## Given
known_expressions = {}
q_0, p_0, z_0 = symbols('q_0 p_0 z_0')
clause = q_0 + p_0 - 2*z_0 + 2
## When
known_expressions = preprocessing.apply_z_rule_2(clause, known_expressions)
## Then
assert known_expressions[p_0] == q_0
assert len(known_expressions) == 1
## Given
known_expressions = {}
q_0, p_0, z_0 = symbols('q_0 p_0 z_0')
clause = q_0 - p_0 - 2*z_0 + 2
## When
known_expressions = preprocessing.apply_z_rule_2(clause, known_expressions)
## Then
assert known_expressions[p_0] == q_0
# This expression is currently not supported
# ## Given
# known_expressions = {}
# q_0, p_0, z_0 = symbols('q_0 p_0 z_0')
# clause = q_0 + p_0 + 2*z_0 - 2
# ## When
# known_expressions = preprocessing.apply_z_rule_2(clause, known_expressions)
# ## Then
# assert known_expressions[p_0] == q_0
# assert known_expressions[z_0] == 1 - q_0
def test_apply_rule_of_equality():
## Given
known_expressions = {}
q = symbols('q')
clause = q - 1
## When
known_expressions = preprocessing.apply_rule_of_equality(clause, known_expressions)
## Then
assert known_expressions[q] == 1
## Given
known_expressions = {}
q = symbols('q')
clause = q
## When
known_expressions = preprocessing.apply_rule_of_equality(clause, known_expressions)
## Then
assert known_expressions[q] == 0
## Given
known_expressions = {}
p, q = symbols('p q')
clause = p*q - 1
## When
known_expressions = preprocessing.apply_rule_of_equality(clause, known_expressions)
## Then
assert known_expressions[p*q] == 1
## Given
known_expressions = {}
p, q = symbols('p q')
clause = p*q
## When
known_expressions = preprocessing.apply_rule_of_equality(clause, known_expressions)
## Then
assert known_expressions[p*q] == 0
## Given
known_expressions = {}
p, q = symbols('p q')
clause = p - q
## When
known_expressions = preprocessing.apply_rule_of_equality(clause, known_expressions)
## Then
assert known_expressions[p] == q
def test_apply_rule_1():
## Given
known_expressions = {}
p, q = symbols('p q')
clause = p * q - 1
## When
known_expressions = preprocessing.apply_rule_1(clause, known_expressions)
## Then
assert known_expressions[p] == 1
assert known_expressions[q] == 1
def test_apply_rule_2():
## Given
known_expressions = {}
p, q = symbols('p q')
clause = p + q - 1
## When
known_expressions = preprocessing.apply_rule_2(clause, known_expressions)
## Then
assert known_expressions[p*q] == 0
assert known_expressions[p] == 1 - q
def test_apply_rule_3():
## Given
known_expressions = {}
q = symbols('q')
clause = 2 - 2*q
## When
known_expressions = preprocessing.apply_rule_3(clause, known_expressions)
## Then
assert known_expressions[q] == 1
def test_apply_rules_4_and_5():
## Given
known_expressions = {}
q_0, q_1, p_0, p_1 = symbols('q_0 q_1 p_0 p_1')
clause = q_0 + q_1 + p_0 + p_1
## When
known_expressions = preprocessing.apply_rules_4_and_5(clause, known_expressions)
## Then
assert known_expressions[q_0] == 0
assert known_expressions[q_1] == 0
assert known_expressions[p_0] == 0
assert known_expressions[p_1] == 0
## Given
known_expressions = {}
q_0, q_1, p_0, p_1 = symbols('q_0 q_1 p_0 p_1')
clause = q_0 + q_1 + p_0 + p_1 - 4
## When
known_expressions = preprocessing.apply_rules_4_and_5(clause, known_expressions)
## Then
assert known_expressions[q_0] == 1
assert known_expressions[q_1] == 1
assert known_expressions[p_0] == 1
assert known_expressions[p_1] == 1
## Given
known_expressions = {}
q = symbols('q')
clause = q - 1
## When
known_expressions = preprocessing.apply_rules_4_and_5(clause, known_expressions)
## Then
assert known_expressions[q] == 1
assert len(known_expressions) == 1
# This expression is currently not supported
# ## Given
# known_expressions = {}
# q_0, q_1, q_2 = symbols('q_0 q_1 q_2')
# clause = 2*q_0 + q_1 + q_2 - 4
# ## When
# known_expressions = preprocessing.apply_rules_4_and_5(clause, known_expressions)
# ## Then
# assert known_expressions[q_0] == 1
# assert known_expressions[q_1] == 1
# assert known_expressions[q_2] == 1
|
[
"michal.stechly@gmail.com"
] |
michal.stechly@gmail.com
|
4ecdec1c4b76af74187491d831d35ca6542cdf07
|
f5d67e3d011f59ba5ca172544b407bf20fbb0b2c
|
/django/apps/issues/admin.py
|
7758698f5dac5f4a7bf548f1b0d6b27778af96c6
|
[
"Apache-2.0"
] |
permissive
|
haakenlid/universitas.no
|
28f91b458c12b0fa89114960a619dcffa325a820
|
871e4fba706b671b2a5d8258ff21faa74a05cf3e
|
refs/heads/master
| 2020-04-23T07:46:53.880988
| 2019-02-16T15:02:21
| 2019-02-16T15:05:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,479
|
py
|
""" Admin for printissues app """
import logging
# from sorl.thumbnail.admin import AdminImageMixin
from django.contrib import admin, messages
from django.contrib.staticfiles.storage import staticfiles_storage
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from sorl.thumbnail import get_thumbnail
from utils.sorladmin import AdminImageMixin
from .models import Issue, PrintIssue
from .tasks import create_print_issue_pdf
logger = logging.getLogger(__name__)
def create_pdf(modeladmin, request, queryset):
messages.add_message(request, messages.INFO, 'started creating pdf')
create_print_issue_pdf.delay(expiration_days=6)
class ThumbAdmin:
exclude = ()
def thumbnail(self, instance, width=200, height=200):
""" Show thumbnail of pdf frontpage """
try:
thumb = instance.thumbnail()
url = thumb.url
except (AttributeError, FileNotFoundError) as e: # noqa
logger.exception('thumb error')
url = staticfiles_storage.url('/admin/img/icon-no.svg')
if instance.pdf:
html = '<a href="{pdf}"><img src="{thumb}"></a>'.format(
thumb=url,
pdf=instance.pdf.url,
)
else:
html = '<p>{}</p>'.format(_('PDF is not uploaded yet.'))
return mark_safe(html)
thumbnail.allow_tags = True # typing: disable
def large_thumbnail(self, instance):
return self.thumbnail(instance, width=800, height=800)
@admin.register(Issue)
class IssueAdmin(admin.ModelAdmin):
list_per_page = 40
date_hierarchy = 'publication_date'
list_display = [
'__str__',
'publication_date',
'issue_type',
'pdf_links',
]
list_editable = [
'publication_date',
'issue_type',
]
search_fields = [
'name',
]
def pdf_thumb(self, pdf, width=250, height=100):
try:
thumb = get_thumbnail(
pdf.get_cover_page(),
'%sx%s' % (width, height),
crop='top',
)
url = thumb.url
except FileNotFoundError: # noqa
url = '/static/admin/img/icon-no.svg'
return url
def pdf_links(self, instance):
html = ''
a_template = '<a href="{url}"><img src="{thumb}"><p>{filename}</p></a>'
for pdf in instance.pdfs.all():
html += a_template.format(
url=pdf.get_edit_url(),
filename=pdf.pdf.name,
thumb=self.pdf_thumb(pdf),
)
if not html:
html = "Nei"
return mark_safe(html)
@admin.register(PrintIssue)
class PrintIssueAdmin(AdminImageMixin, admin.ModelAdmin, ThumbAdmin):
actions = [create_pdf]
actions_on_top = True
actions_on_bottom = True
save_on_top = True
list_per_page = 40
list_display = [
'pages',
'pdf',
'thumbnail',
'extract',
'issue',
]
search_fields = [
'text',
'pdf',
]
readonly_fields = [
'large_thumbnail',
'text',
'extract',
'pages',
]
autocomplete_fields = [
'issue',
]
fieldsets = [[
'',
{
'fields': (
('issue', ),
('pdf', 'cover_page', 'pages'),
('large_thumbnail', 'extract'),
)
}
]]
|
[
"haakenlid@gmail.com"
] |
haakenlid@gmail.com
|
49e63171cad299cb32d8586726057f80669e4d05
|
b39670923ee518f2da16207a0d7e6093775f7b55
|
/prac_06/car_simulator.py
|
0f56e14275fd96482a453850ee8c1023e0fc49d1
|
[] |
no_license
|
SebastianFrizzo/CP1404_Practicals
|
02116acbd7730f18c9f4b0f93d9c6e3afe619746
|
f635cd701e99e81087587e1ae34972b51e1d1357
|
refs/heads/master
| 2020-07-12T21:07:48.623511
| 2019-11-11T06:11:22
| 2019-11-11T06:11:22
| 204,906,505
| 0
| 0
| null | 2019-09-18T02:27:24
| 2019-08-28T10:30:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
from car import Car
def main():
print("Let's drive!")
car_name = input("Enter your car name: ")
car = Car(car_name)
choice = "A"
while choice != "Q":
print(car)
choice = input("Menu \n (D)rive \n (R)efuel \n (Q)uit \nEnter your choice: ").upper()
if choice == "D":
validated = False
while not validated:
distance = input("How many km do you wish to drive?: ")
validated = validate_number_positive(distance)
distance = int(distance)
car.drive(distance)
elif choice == "R":
validated = False
while not validated:
fuel = input("How many units of fuel do you want to add to the car?: ")
validated = validate_number_positive(fuel)
fuel = int(fuel)
car.add_fuel(fuel)
print("Goodbye {}'s driver".format(car_name))
def validate_number_positive(number):
try:
number = int(number)
return True
except ValueError:
print("Invalid")
return False
main()
|
[
"sebastianfrizzolaloli@gmail.com"
] |
sebastianfrizzolaloli@gmail.com
|
f9d7e2e4ad826382d4a4c94fa202c744ad8cd266
|
e9abcb6021cc6fcc15ef2258f09812492b4e093d
|
/ironic/objects/base.py
|
84ac7ebc7f4207450cc1c367290557beaa9caf55
|
[
"Apache-2.0"
] |
permissive
|
ericxiett/ironic-customized
|
e6df6a62840ae34180b8004c98ac56790462408b
|
3a2ad13969e1497889a0c3be80f9f5f671ff4d1b
|
refs/heads/master
| 2020-07-16T08:29:03.447845
| 2019-09-02T01:31:58
| 2019-09-02T01:31:58
| 205,754,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,568
|
py
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Ironic common internal object model"""
from oslo_utils import versionutils
from oslo_versionedobjects import base as object_base
from ironic import objects
from ironic.objects import fields as object_fields
class IronicObjectRegistry(object_base.VersionedObjectRegistry):
def registration_hook(self, cls, index):
# NOTE(jroll): blatantly stolen from nova
# NOTE(danms): This is called when an object is registered,
# and is responsible for maintaining ironic.objects.$OBJECT
# as the highest-versioned implementation of a given object.
version = versionutils.convert_version_to_tuple(cls.VERSION)
if not hasattr(objects, cls.obj_name()):
setattr(objects, cls.obj_name(), cls)
else:
cur_version = versionutils.convert_version_to_tuple(
getattr(objects, cls.obj_name()).VERSION)
if version >= cur_version:
setattr(objects, cls.obj_name(), cls)
class IronicObject(object_base.VersionedObject):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
OBJ_SERIAL_NAMESPACE = 'ironic_object'
OBJ_PROJECT_NAMESPACE = 'ironic'
# TODO(lintan) Refactor these fields and create PersistentObject and
# TimeStampObject like Nova when it is necessary.
fields = {
'created_at': object_fields.DateTimeField(nullable=True),
'updated_at': object_fields.DateTimeField(nullable=True),
}
def as_dict(self):
return dict((k, getattr(self, k))
for k in self.fields
if hasattr(self, k))
def obj_refresh(self, loaded_object):
"""Applies updates for objects that inherit from base.IronicObject.
Checks for updated attributes in an object. Updates are applied from
the loaded object column by column in comparison with the current
object.
"""
for field in self.fields:
if (self.obj_attr_is_set(field) and
self[field] != loaded_object[field]):
self[field] = loaded_object[field]
@staticmethod
def _from_db_object(obj, db_object):
"""Converts a database entity to a formal object.
:param obj: An object of the class.
:param db_object: A DB model of the object
:return: The object of the class with the database entity added
"""
for field in obj.fields:
obj[field] = db_object[field]
obj.obj_reset_changes()
return obj
class IronicObjectSerializer(object_base.VersionedObjectSerializer):
# Base class to use for object hydration
OBJ_BASE_CLASS = IronicObject
|
[
"eric_xiett@163.com"
] |
eric_xiett@163.com
|
440e3de6c590108e44a0933ec7c799273aefaf59
|
05263538c3ad0f577cdbbdb9bac87dcf450230ce
|
/alexa/ask-sdk/ask_sdk_model/interfaces/audioplayer/stop_directive.py
|
3f06a7c163c1236132e3736f39e68b6261dca362
|
[] |
no_license
|
blairharper/ISS-GoogleMap-project
|
cea027324fc675a9a309b5277de99fc0265dcb80
|
3df119036b454a0bb219af2d703195f4154a2471
|
refs/heads/master
| 2020-03-21T16:47:21.046174
| 2018-10-24T08:05:57
| 2018-10-24T08:05:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,140
|
py
|
# coding: utf-8
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.directive import Directive
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
class StopDirective(Directive):
"""
NOTE: This class is auto generated.
Do not edit the class manually.
"""
deserialized_types = {
'object_type': 'str'
}
attribute_map = {
'object_type': 'type'
}
def __init__(self): # noqa: E501
# type: () -> None
"""
"""
self.__discriminator_value = "AudioPlayer.Stop"
self.object_type = self.__discriminator_value
super(StopDirective, self).__init__(object_type=self.__discriminator_value) # noqa: E501
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, StopDirective):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
[
"blair.harper@gmail.com"
] |
blair.harper@gmail.com
|
0b4fe2acd6493fe680e1825645e893f245093f51
|
1b4a9b553209c467b872e754e325e7259a6e6d38
|
/test.py
|
499e9ac0132851e69fe1592bd12884a35c52df31
|
[] |
no_license
|
soumilshah1995/Data-cleaning-Tool-Python
|
541e73da0207dcc083edde43f64d7cb210f59ca1
|
4c248833176d207b9a900cc57a39932708c42a47
|
refs/heads/master
| 2020-07-26T03:11:33.226590
| 2019-09-14T23:35:45
| 2019-09-14T23:35:45
| 208,516,369
| 6
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,173
|
py
|
__Author__ = "soumil shah"
__Verion__ = "0.0.1"
__Email__ = "soumil.shah@budderfly.com"
"""
Problem Statments: We need to Automate Data cleaning to removes Null Values
Solution: Python script to select file it will drop all null values and create a new csv File
Later more Functionality can be added
"""
try:
from tkinter import filedialog
from tkinter import ttk
from tkinter import *
import pandas as pd
except Exception as e:
print("Some Modules are Missing {}".format(e))
class Master(object):
def __init__(self):
self.root = Tk()
@property
def __open_dialog(self):
"""
This FUnction is Provate
Open Dialog Box
:return: None
"""
self.root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("CSV File ","*.csv"),("all files","*.*")))
self.filename = self.root.filename
print (self.filename)
return self.filename
def clean_data(self):
"""
Drops the Null values and 0
:return: New csv File
"""
self.filename = self.__open_dialog
df = pd.read_csv(self.filename, na_values=[0,"0"])
Data_CLeaned = df.dropna()
Data_CLeaned.to_csv("Cleaned_Data.csv")
self.__alert_popup(title="Complete", message="New Csv file has been created",path="Thanks for using Software ")
def __alert_popup(self, title="", message="", path=""):
"""Generate a pop-up window for special messages."""
self.root.title(title)
w = 400 # popup window width
h = 200 # popup window height
sw = self.root.winfo_screenwidth()
sh = self.root.winfo_screenheight()
x = (sw - w)/2
y = (sh - h)/2
self.root.geometry('%dx%d+%d+%d' % (w, h, x, y))
m = message
m += '\n'
m += path
w = Label(self.root, text=m, width=120, height=10)
w.pack()
b = Button(self.root, text="OK", command=self.root.destroy, width=10)
b.pack()
mainloop()
if __name__ == "__main__":
obj = Master()
obj.clean_data()
|
[
"soushah@my.bridgeport.edu"
] |
soushah@my.bridgeport.edu
|
5ca28f082b0ba7e3c682ea21bc0feba26a4dc8c5
|
6e7f48c3d2fb33494d4ec7ecfad3f016335747fb
|
/dump/processor.py
|
b32c7ee8d308265632fb2ee57af079e48256cc41
|
[
"Apache-2.0"
] |
permissive
|
mayankmetha/Y86-Simulator
|
e66ec65b2315bf6bf147ba598861ae110f6d5aa9
|
a68db7ce9526a7fff5d1e2242c81d0feb6f37ad6
|
refs/heads/master
| 2020-04-14T05:29:47.125565
| 2019-01-05T02:05:31
| 2019-01-05T02:05:31
| 163,662,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,574
|
py
|
#!/usr/bin/env python3
import sys
import os
# register names
regName = {
0x0: "%rax",
0x1: "%rcx",
0x2: "%rdx",
0x3: "%rbx",
0x4: "%rsp",
0x5: "%rbp",
0x6: "%rsi",
0x7: "%rdi",
0x8: "%r8",
0x9: "%r9",
0xA: "%r10",
0xB: "%r11",
0xC: "%r12",
0xD: "%r13",
0xE: "%r14"
}
# opCode names
intrName = {
"00": "halt",
"10": "nop",
"20": "rrmovq",
"21": "cmovle",
"22": "cmovl",
"23": "cmove",
"24": "cmovne",
"25": "cmovge",
"26": "cmovg",
"30": "irmovq",
"40": "rmmovq",
"50": "mrmovq",
"60": "addq",
"61": "subq",
"62": "andq",
"63": "xorq",
"70": "jmp",
"71": "jle",
"72": "jl",
"73": "je",
"74": "jne",
"75": "jge",
"76": "jg",
"80": "call",
"90": "ret",
"A0": "pushq",
"B0": "popq"
}
# register codes
R_RAX = 0x0
R_RCX = 0x1
R_RDX = 0x2
R_RBX = 0x3
R_RSP = 0x4
R_RBP = 0x5
R_RSI = 0x6
R_RDI = 0x7
R_R8 = 0x8
R_R9 = 0x9
R_R10 = 0xA
R_R11 = 0xB
R_R12 = 0xC
R_R13 = 0xD
R_R14 = 0xE
R_NONE = 0xF
# instruction codes
I_HALT = 0x0
I_NOP = 0x1
I_CMOV = 0x2
I_IRMOV = 0x3
I_RMMOV = 0x4
I_MRMOV = 0x5
I_OP = 0x6
I_J = 0x7
I_CALL = 0x8
I_RET = 0x9
I_PUSH = 0xA
I_POP = 0xB
# fetch none
F_NONE = 0x0
# alu op
A_ADD = 0x0
A_SUB = 0x1
A_AND = 0x2
A_XOR = 0x3
# jump op
J_JMP = 0x0
J_LE = 0x1
J_L = 0x2
J_E = 0x3
J_NE = 0x4
J_GE = 0x5
J_G = 0x6
# Pipeline F reg
F_predPC = 0
F_stat = 'BUB'
# Fetch intermediate values
f_icode = I_NOP
f_ifun = F_NONE
f_valC = 0x0
f_valP = 0x0
f_rA = R_NONE
f_rB = R_NONE
f_predPC = 0
f_stat = 'BUB'
# Pipeline D reg
D_stat = 'BUB'
D_icode = I_NOP
D_ifun = F_NONE
D_rA = R_NONE
D_rB = R_NONE
D_valP = 0x0
D_valC = 0x0
D_next_bub = False
# Decode intermediate values
d_srcA = R_NONE
d_srcB = R_NONE
d_dstE = R_NONE
d_dstM = R_NONE
d_valA = 0x0
d_valB = 0x0
# Pipeline E reg
E_stat = 'BUB'
E_icode = I_NOP
E_ifun = F_NONE
E_valC = 0x0
E_srcA = R_NONE
E_valA = 0x0
E_srcB = R_NONE
E_valB = 0x0
E_dstE = R_NONE
E_dstM = R_NONE
# Execute intermediate values
e_valE = 0x0
e_dstE = R_NONE
e_Cnd = False
e_setcc = False
# Pipeline M reg
M_stat = 'BUB'
M_icode = I_NOP
M_ifun = F_NONE
M_valA = 0x0
M_dstE = R_NONE
M_valE = 0x0
M_dstM = R_NONE
M_Cnd = False
# Memory intermediate values
m_valM = 0x0
m_stat = 'BUB'
mem_addr = 0x0
m_read = False
dmem_error = False
# Pipeline W reg
W_stat = 'BUB'
W_icode = I_NOP
W_ifun = F_NONE
W_dstE = R_NONE
W_valE = 0x0
W_dstM = R_NONE
W_valM = 0x0
# registers value
register = {
0x0: 0,
0x1: 0,
0x2: 0,
0x3: 0,
0x4: 0,
0x5: 0,
0x6: 0,
0x7: 0,
0x8: 0,
0x9: 0,
0xA: 0,
0xB: 0,
0xC: 0,
0xD: 0,
0xE: 0,
0xF: 0
}
# condition code flags
ccFlags = {
'ZF': 1,
'SF': 0,
'OF': 0
}
# memory
mem = {}
memRo = []
# variables
cycle = 0
cpustat = 'AOK'
yasBin = ''
binlen = 0
def myHex(x, m = 0):
if x < 0:
x = (~(-x) + 1) & 0xffffffff
if m == 0:
return "%x" % (x)
else:
return "%.*x" % (m, x)
def getInstrName(icode, ifun):
s = myHex(icode) + myHex(ifun)
if s in intrName:
return intrName[s]
return 'INS'
def getRegName(x):
if x == R_NONE:
return '----'
else:
return register[x]
def getCCStr():
return 'Z=%d S=%d O=%d' % \
(ccFlags['ZF'], ccFlags['SF'], ccFlags['OF'])
# display messages
def logger(str):
print(str)
# convert little endiam characters to int
def lEndianInt(s):
x = int('%c%c%c%c%c%c%c%c' % (s[6], s[7], s[4], s[5], s[2], s[3], s[0], s[1]))
if x > 0x7fffffff:
x = -((~x + 1) & 0xffffffff)
return x
# write to pipeline F reg
def writeF():
global F_predPC
global F_stat
if I_RET in (D_icode,E_icode,M_icode) or (E_icode in (I_MRMOV, I_POP) and E_dstM in (d_srcA, d_srcB)):
return
F_predPC = f_predPC
F_stat = f_stat
# next cycle pipeline F reg content
def nextF():
global f_icode
global f_ifun
global f_valC
global f_valP
global f_rA
global f_rB
global f_predPC
global f_stat
pc = F_predPC
if M_icode == I_J and not M_Cnd:
pc = M_valA
elif W_icode == I_RET:
pc = W_valM
oldPc = pc
imem_Error = False
if pc == binlen:
f_icode = I_HALT
f_ifun = F_NONE
f_rA = R_NONE
f_rB = R_NONE
f_valC = 0x0
f_valP = 0x0
f_stat = 'HLT'
return
elif pc > binlen or pc < 0:
imem_Error = True
else:
imem_icode = int(yasBin[pc])
imem_ifun = int(yasBin[pc+1])
f_icode = I_NOP if imem_Error else imem_icode
f_ifun = F_NONE if imem_Error else imem_ifun
instr_valid = f_icode in (I_NOP, I_HALT, I_CMOV, I_IRMOV, I_RMMOV, I_MRMOV, I_OP, I_J, I_CALL, I_RET, I_PUSH, I_POP)
if instr_valid:
try:
if f_icode in (I_CMOV, I_OP, I_PUSH, I_POP, I_IRMOV, I_RMMOV, I_MRMOV):
f_rA = int(yasBin[pc])
f_rB = int(yasBin[pc+1])
if f_rA == 0xf:
f_rA = R_NONE
if f_rB == 0xf:
f_rB = R_NONE
else:
f_rA = R_NONE
f_rB = R_NONE
if f_icode in (I_HALT, I_NOP, I_RET):
pc += 2
elif f_icode in (I_IRMOV, I_MRMOV, I_RMMOV):
pc += 20
elif f_icode in (I_J, I_CALL):
pc += 18
else:
pc += 4
if (f_rA not in regName.keys() and f_rB != R_NONE) or (f_rB not in regName.keys() and f_rB != R_NONE):
imem_Error = True
except:
imem_Error = True
if not imem_Error:
logger('\tFetch: f_pc = 0x%x, imem_instr = %s, f_instr = %s' % \
(oldPc, getInstrName(imem_icode, imem_ifun), getInstrName(f_icode, f_ifun)))
if not instr_valid:
logger('\tFetch: Instruction code 0x%s%s invalid' % (imem_icode, imem_ifun))
f_valP = pc
f_predPC = f_valC if f_icode in (I_J, I_CALL) else f_valP
f_stat = 'AOK'
if imem_Error:
f_stat = 'ADR'
if not instr_valid:
f_stat = 'INS'
if f_icode == I_HALT:
f_stat = 'HLT'
# write to pipeline D reg
def writeD():
global D_stat
global D_icode
global D_ifun
global D_rA
global D_rB
global D_valP
global D_valC
global D_next_bub
if E_icode in (I_MRMOV, I_POP) and E_dstM in (d_srcA, d_srcB):
return
if I_RET in (E_icode, M_icode, W_icode) or D_next_bub:
D_icode = I_NOP
D_ifun = F_NONE
D_rA = R_NONE
D_rB = R_NONE
D_valC = 0x0
D_valP = 0x0
D_stat = 'BUB'
if D_next_bub:
D_next_bub = False
return
if E_icode == I_J and not e_Cnd:
D_next_bub = True
D_stat = f_stat
D_icode = f_icode
D_ifun = f_ifun
D_rA = f_rA
D_rB = f_rB
D_valC = f_valC
D_valP = f_valP
# next cycle pipeline D reg content
def nextD():
global d_srcA
global d_srcB
global d_dstE
global d_dstM
global d_valA
global d_valB
print("Dicode=",D_icode)
d_srcA = R_NONE
if D_icode in (I_CMOV, I_RMMOV, I_OP, I_PUSH):
d_srcA = D_rA
elif D_icode in (I_POP, I_RET):
d_srcA = R_RSP
d_srcB = R_NONE
if D_icode in (I_OP, I_RMMOV, I_MRMOV):
d_srcB = D_rB
elif D_icode in (I_POP, I_PUSH, I_CALL, I_RET):
d_srcB = R_RSP
d_dstE = R_NONE
if D_icode in (I_CMOV, I_IRMOV, I_OP):
d_dstE = D_rB
elif D_icode in (I_POP, I_PUSH, I_CALL, I_RET):
d_dstE = R_RSP
d_dstM = D_rA if D_icode in (I_MRMOV, I_POP) else R_NONE
d_valA = register[d_srcA]
if D_icode in (I_CALL, I_J):
d_valA = D_valP
elif d_srcA == e_dstE:
d_valA = e_valE
elif d_srcA == M_dstM:
d_valA = m_valM
elif d_srcA == M_dstE:
d_valA = M_valE
elif d_srcA == W_dstM:
d_valA = W_valM
elif d_srcA == W_dstE:
d_valA = W_valE
d_valB = register[d_srcB]
if d_srcB == e_dstE:
d_valB = e_valE
elif d_srcB == M_dstM:
d_valB = m_valM
elif d_srcB == M_dstE:
d_valB = M_valE
elif d_srcB == W_dstM:
d_valB = W_valM
elif d_srcB == W_dstE:
d_valB = W_valE
print("\tDecode: dsrcA =",d_srcA," d_srcB = ",d_srcB," d_dstE = ",d_dstE," d_dstM = ",d_dstM," d_valA = ",d_valA," d_valB = ",d_valB)
# write to pipeline E reg
def writeE():
global E_stat
global E_icode
global E_ifun
global E_valC
global E_srcA
global E_valA
global E_srcB
global E_valB
global E_dstE
global E_dstM
if (E_icode == I_J and not e_Cnd) or E_icode in (I_MRMOV, I_POP) and E_dstM in (d_srcA, d_srcB):
E_icode = I_NOP
E_ifun = F_NONE
E_valC = 0x0
E_valA = 0x0
E_valB = 0x0
E_dstE = R_NONE
E_dstM = R_NONE
E_srcA = R_NONE
E_srcB = R_NONE
E_stat = 'BUB'
return
E_stat = D_stat
E_icode = D_icode
E_ifun = D_ifun
E_valC = D_valC
E_valA = d_valA
E_valB = d_valB
E_dstE = d_dstE
E_dstM = d_dstM
E_srcA = d_srcA
E_srcB = d_srcB
# next cycle pipeline E reg content
def nextE():
global ccFlags
global e_Cnd
global e_valE
global e_dstE
global e_setcc
aluA = 0
if E_icode in (I_CMOV, I_OP):
aluA = E_valA
elif E_icode in (I_IRMOV, I_RMMOV, I_MRMOV):
aluA = E_valC
elif E_icode in (I_CALL, I_PUSH):
aluA = -8
elif E_icode in (I_RET, I_POP):
aluA = 8
aluB = E_valB if E_icode in (I_RMMOV, I_MRMOV, I_OP, I_CALL, I_PUSH, I_RET, I_POP) else 0
alufun = E_ifun if E_icode == I_OP else A_ADD
alures = 0
aluchar = '+'
if alufun == A_ADD:
alures = aluB + aluA
aluchar = '+'
elif alufun == A_SUB:
alures = aluB - aluA
aluchar = '-'
elif alufun == A_AND:
alures = aluB & aluA
aluchar = '&'
elif alufun == A_XOR:
alures = aluB ^ aluA
aluchar = '^'
logger('\tExecute: ALU: 0x%s %c 0x%s = 0x%s' % (myHex(aluB), aluchar, myHex(aluA), myHex(alures)))
e_setcc = E_icode == I_OP and m_stat not in ('ADR', 'INS', 'HLT') and W_stat not in ('ADR', 'INS', 'HLT')
if e_setcc:
ccFlags['ZF'] = 1 if alures == 0 else 0
ccFlags['SF'] = 1 if alures < 0 else 0
ccFlags['OF'] = 0
if (E_ifun == A_ADD) and \
((aluB > 0 and aluA > 0 and alures < 0) or \
aluB < 0 and aluB < 0 and alures > 0):
ccFlags['OF'] = 1
if (E_ifun == A_SUB) and \
((aluB > 0 and aluA < 0 and alures < 0) or \
aluB < 0 and aluB > 0 and alures > 0):
ccFlags['OF'] = 1
logger('\tExecute: New cc = %s' % (getCCStr()))
e_Cnd = False
if E_icode == I_J or E_icode == I_CMOV:
zf = ccFlags['ZF']
sf = ccFlags['SF']
of = ccFlags['OF']
if E_ifun == J_JMP:
e_Cnd = True
elif E_ifun == J_LE and (sf ^ of) | zf == 1:
e_Cnd = True
elif E_ifun == J_L and sf ^ of == 1:
e_Cnd = True
elif E_ifun == J_E and zf == 1:
e_Cnd = True
elif E_ifun == J_NE and zf == 0:
e_Cnd = True
elif E_ifun == J_GE and sf ^ of == 0:
e_Cnd = True
elif E_ifun == J_G and (sf ^ of) | zf == 0:
e_Cnd = True
logger('\tExecute: instr = %s, cc = %s, branch %staken' % (getInstrName(E_icode, E_ifun), 'Z=%d S=%d O=%d' % (zf, sf, of), '' if e_Cnd else 'not '))
e_valE = alures
e_dstE = E_dstE
if E_icode == I_CMOV and not e_Cnd:
e_dstE = R_NONE
# write to pipeline M reg
def writeM():
global M_stat
global M_icode
global M_ifun
global M_Cnd
global M_valE
global M_valA
global M_dstE
global M_dstM
if m_stat in ('ADR', 'INS', 'HLT') or W_stat in ('ADR', 'INS', 'HLT'):
M_stat = 'BUB'
M_icode = I_NOP
M_ifun = F_NONE
M_Cnd = False
M_valE = 0x0
M_valA = 0x0
M_dstE = R_NONE
M_dstM = R_NONE
return
M_stat = E_stat
M_icode = E_icode
M_ifun = E_ifun
M_Cnd = e_Cnd
M_valE = e_valE
M_valA = E_valA
M_dstE = e_dstE
M_dstM = E_dstM
# next cycle pipeline M reg content
def nextM():
global mem
global dmem_error
global m_stat
global m_valM
global m_read
global mem_addr
global memRo
m_valM = 0
mem_addr = 0
dmem_error = False
if M_icode in (I_RMMOV, I_PUSH, I_CALL, I_MRMOV):
mem_addr = M_valE
elif M_icode in (I_POP, I_RET):
mem_addr = M_valA
if M_icode in (I_MRMOV, I_POP, I_RET):
try:
if mem_addr not in mem:
# TODO: check yasbin index
mem[mem_addr] = lEndianInt(yasBin[mem_addr * 2:mem_addr * 2 + 16])
memRo.append(mem_addr)
m_valM = mem[mem_addr]
m_read = True
logger('\tMemory: Read 0x%s from 0x%x' % (myHex(m_valM), mem_addr))
except:
dmem_error = True
logger('\tMemory: Invalid address 0x%s' % (myHex(mem_addr)))
if M_icode in (I_RMMOV, I_PUSH, I_CALL):
try:
if mem_addr in memRo or mem_addr < 0:
raise Exception
mem[mem_addr] = M_valA
logger('\tWrote 0x%s to address 0x%x' % (myHex(M_valA), mem_addr))
except:
dmem_error = True
logger('\tCouldn\'t write to address 0x%s' % (myHex(mem_addr)))
m_stat = 'ADR' if dmem_error else M_stat
# write to pipeline W reg
def writeW():
global W_stat
global W_icode
global W_ifun
global W_dstE
global W_valE
global W_dstM
global W_valM
if W_stat in ('ADR', 'INS', 'HLT'):
return
W_stat = m_stat
W_icode = M_icode
W_ifun = M_ifun
W_valE = M_valE
W_valM = m_valM
W_dstE = M_dstE
W_dstM = M_dstM
# next cycle pipeline W reg content
def nextW():
global register
global cpustat
global cycle
if W_dstE != R_NONE:
register[W_dstE] = W_valE
logger('\tWriteback: Wrote 0x%s to register %s' % (myHex(W_valE), register[W_dstE]))
if W_dstM != R_NONE:
register[W_dstM] = W_valM
logger('\tWriteback: Wrote 0x%s to register %s' % (myHex(W_valM), register[W_dstM]))
cpustat = 'AOK' if W_stat == 'BUB' else W_stat
def main(file):
maxCycles = 65535
try:
fin = open(os.path.splitext(file)[0] + '.ybo', 'rb')
except:
print('Error: cannot open binary: %s' % file)
sys.exit(1)
global yasBin
global binlen
try:
yasBin = fin.read().hex()
except:
print('Error: cannot identify binary: %s' % (file))
sys.exit(1)
try:
fin.close()
except IOError:
pass
binlen = len(yasBin) // 2
logger('%d bytes of code read' % (binlen))
global cycle
global cpustat
try:
while True:
print("Cycle:%x" % cycle)
writeW()
nextW()
writeM()
nextM()
writeE()
nextE()
writeD()
nextD()
writeF()
nextF()
if maxCycles != 0 and cycle > maxCycles:
cpustat = 'HLT'
if cpustat != 'AOK' and cpustat != 'BUB':
break
cycle += 1
except:
print('Error: bad input binary file')
sys.exit(1)
print("Done")
|
[
"mayankmetha@gmail.com"
] |
mayankmetha@gmail.com
|
48b7817783d57776b9520dab09884fde23486320
|
7fe5bad373afcb9f1031d883057842dbcb83b4cf
|
/project4_navie_bayes_classification_logistic_regression/ml_p4.py
|
d279f1fbeb3bc50a9e6837b8eadb3b10f98d9bd6
|
[
"MIT"
] |
permissive
|
Leonlee190/ML-Practice-Projects
|
f24c18394ce2d038c43029ce7aff0a4f093c6eaa
|
375a442dce1bcfa049fc8139fe7c8dc812229037
|
refs/heads/master
| 2020-12-05T03:42:06.461351
| 2020-01-06T01:30:36
| 2020-01-06T01:30:36
| 232,000,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,134
|
py
|
import numpy as np
import pandas as pd
import math
# mean calculation
def mean_calc(arr, class_value):
# selecting the class valued 0 or 1 data
mean_arr = np.array(arr[np.where(arr[:, 57] == class_value)])
mean_arr = np.array(mean_arr[:, :57])
mean_size = len(mean_arr)
# summing then finding mean
calc_mean = np.array(mean_arr.sum(axis=0))
calc_mean = calc_mean/mean_size
return calc_mean
# standard deviation calculation
def stand_devi_calc(arr, mean_values, class_value):
# selecting the class valued 0 or 1 data
stand_arr = np.array(arr[np.where(arr[:, 57] == class_value)])
stand_arr = np.array(stand_arr[:, :57])
stand_size = len(stand_arr)
# subtract mean value of that feature then calculating
calc_stand = stand_arr - mean_values[None, :]
calc_stand = np.array(np.power(calc_stand, 2))
calc_stand = np.array(calc_stand.sum(axis=0))
calc_stand = calc_stand/stand_size
calc_stand = np.array(np.sqrt(calc_stand))
# adding epsilon
calc_stand = np.array(calc_stand + 0.0001)
return calc_stand
# gaussian naive bayes algorithm
def gauss_calc(x, mean_value, stand_value):
first = 1 / (math.sqrt(2*math.pi)*stand_value)
second = 0 - (math.pow((x - mean_value), 2) / (2 * math.pow(stand_value, 2)))
third = math.exp(second)
final = first * third
# if the final result is 0 then spit out large negative number so it won't do log(0)
if final == 0:
return -999999
else:
return math.log(final)
# Reading data from the csv
data_file_name = "spambase.csv"
spam_data_set = np.array(pd.read_csv(data_file_name, header=None), dtype=float)
# Randomizing and splitting into test and train and target
np.random.shuffle(spam_data_set)
middle_point = int(len(spam_data_set)/2)
train_set = np.array(spam_data_set[0:middle_point, :])
test_set = np.array(spam_data_set[middle_point:, :])
train_data = np.array(train_set[:, :57])
test_data = np.array(test_set[:, :57])
train_target = np.array(train_set[:, 57])
test_target = np.array(test_set[:, 57])
# Getting the class probability
train_pos = 0
for i in range(len(train_target)):
if train_target[i] == 1:
train_pos += 1
train_pos = train_pos / len(train_target)
train_neg = 1 - train_pos
# retrieving mean value of each feature for both positive and negative
train_mean_pos = mean_calc(train_set, 1)
train_mean_neg = mean_calc(train_set, 0)
# retrieving standard deviation value of each feature for both positive and negative
train_stand_pos = stand_devi_calc(train_set, train_mean_pos, 1)
train_stand_neg = stand_devi_calc(train_set, train_mean_neg, 0)
# result after argmax
test_result = np.zeros(len(test_set))
# go through all the test set
for i in range(len(test_set)):
# calculate all the feature possibilities
test_pos = map(gauss_calc, test_data[i, :], train_mean_pos, train_stand_pos)
test_neg = map(gauss_calc, test_data[i, :], train_mean_neg, train_stand_neg)
pos_arr = np.fromiter(test_pos, dtype=float)
neg_arr = np.fromiter(test_neg, dtype=float)
# get the sum of all possibilities
pos = math.log(train_pos) + pos_arr.sum(dtype=float)
neg = math.log(train_neg) + neg_arr.sum(dtype=float)
# if positive is bigger than 1 else 0
if pos > neg:
test_result[i] = 1
else:
test_result[i] = 0
# confusion matrix
confusion = np.zeros((2, 2))
# Getting the accuracy
correct = 0
for i in range(len(test_target)):
actual = int(test_target[i])
predict = int(test_result[i])
confusion[actual, predict] += 1
if test_result[i] == test_target[i]:
correct += 1
recall = confusion[1, 1] / (confusion[1, 1] + confusion[1, 0])
precision = confusion[1, 1] / (confusion[1, 1] + confusion[0, 1])
accuracy = correct/len(test_target) * 100
# Print out accuracy, recall, precision, and confusion matrix
print("Accuracy: ", accuracy)
print("Recall: ", recall)
print("Precision: ", precision)
print("Confusion Matrix: ")
print(confusion)
|
[
"noreply@github.com"
] |
Leonlee190.noreply@github.com
|
1299c87a549de5082526fe4ff863ccec37328b32
|
a6f838a28e8401243c702de21c80127a37599cc9
|
/flask_bouncer/bouncer/forms.py
|
8e14175d6f841675590bfde8e61959537c50d38d
|
[
"MIT"
] |
permissive
|
miguelgrinberg/Flask-Bouncer
|
a01ce6321a5def491ce2942e2df91ea76bbbef78
|
fac0b761a40dc3a9ba7d5b596b158a149f4ca5dd
|
refs/heads/master
| 2023-08-19T04:41:18.667096
| 2013-08-13T07:29:14
| 2013-09-15T18:08:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,456
|
py
|
from flask import current_app
from flask.ext.wtf import Form
from wtforms import TextField, PasswordField, BooleanField, HiddenField
from wtforms.validators import ValidationError, Required, Email, EqualTo
class BaseRegisterForm(Form):
email = TextField('Email', validators = [ Required(), Email() ])
username = TextField('Username', validators = [ Required() ])
def validate_email(form, field):
if current_app.bouncer.user_class.find(email = field.data):
raise ValidationError('Email already registered')
def validate_username(form, field):
user_class = current_app.bouncer.user_class
error = user_class.validate_username(field.data)
if error:
raise ValidationError(error)
if user_class.find(username = field.data):
raise ValidationError('Username already in use')
class RegisterForm(BaseRegisterForm):
password = PasswordField('Password', validators = [
Required(), EqualTo('password2', message = 'Passwords must match') ])
password2 = PasswordField('Confirm password', validators = [ Required() ])
class LoginForm(Form):
email = TextField('Email', validators = [ Email() ])
password = PasswordField('Password', validators = [ Required() ])
remember_me = BooleanField('Keep me logged in')
class ResetRequestForm(Form):
email = TextField('Email', validators = [ Email() ])
class ResetForm(Form):
token = HiddenField('Token')
email = TextField('Email', validators = [ Email() ])
password = PasswordField('Password', validators = [
Required(), EqualTo('password2', message = 'Passwords must match') ])
password2 = PasswordField('Confirm password', validators = [ Required() ])
class ChangeEmailForm(Form):
email = TextField('New Email', validators = [ Email() ])
password = PasswordField('Password', validators = [ Required() ])
class ChangePasswordForm(Form):
old_password = PasswordField('Old password', validators = [ Required() ])
password = PasswordField('New password', validators = [
Required(), EqualTo('password2', message = 'Passwords must match') ])
password2 = PasswordField('Confirm new password', validators = [ Required() ])
class RefreshForm(Form):
email = TextField('Email', validators = [ Email() ])
password = PasswordField('Password', validators = [ Required() ])
|
[
"miguelgrinberg50@gmail.com"
] |
miguelgrinberg50@gmail.com
|
ab15497dd6bca3ca4ce369d4eeca2a26249c4d26
|
94da738a5ae494b4434517bb80a650d48866fde7
|
/load_automata.py
|
4310936a2e569e1617961b7e7b2c46390411d0f4
|
[] |
no_license
|
Par-zy/PushdownAutomata
|
f58d9a2dc4b3f4c79e80c702663f201b19e71d27
|
fc16cb47708b720d1cda4e1941f96716f6aea223
|
refs/heads/main
| 2023-04-25T23:12:22.993650
| 2021-06-10T16:51:27
| 2021-06-10T16:51:27
| 375,527,485
| 0
| 0
| null | 2021-06-10T00:48:38
| 2021-06-10T00:48:37
| null |
UTF-8
|
Python
| false
| false
| 1,526
|
py
|
# Biblioteca Regex.
import re
# Função de leitura do arquivo do autômato.
def load_file():
# Abre o arquivo para a leitura dos componentes do autômato.
file = open('file.txt', 'r').readline()
# Regex para remoção de símbolos inúteis.
regex = re.compile(r'[^a-zA-Z0-9]+')
# Loop que separa os conjuntos em tuplas.
arr = []
for i in file.split(','):
if '{' in i and not '}' in i:
sub = []
sub.append(regex.sub('', i))
elif '}' in i and not '{' in i:
sub.append(regex.sub('', i))
sub = tuple(sub)
arr.append(sub)
sub = []
elif len(sub) != 0:
sub.append(regex.sub('', i))
else:
arr.append(regex.sub('', i))
# Tratamento de erros do arquivo.
if file.count(',') != file.count(' '):
raise Exception("Erro na separacao dos componentes!")
elif len(arr) != 6:
raise Exception("Erro nos componentes do automato!")
elif 'D' not in arr:
raise Exception("Erro na letra do conjunto de regras de producao!")
else:
print(tuple(arr))
# Abre o arquivo para a leitura das funções de transição.
file = open('file.txt', 'r').read().splitlines()
# Loop que separa as funções em tuplas.
arr = []
for i in file[1:]:
arr.append(tuple([c.strip() for c in i.split(',')]))
print(tuple(arr))
print(load_file())
# Saída => [('a', 'b'), ('q0', 'q1', 'q2', 'q3'), 'D', 'q0', ('q3'), ('A', 'B')]
|
[
"monorease@gmail.com"
] |
monorease@gmail.com
|
52a1c7694389cfee52850692433b0993a76ad696
|
6bb9a36e35670d2ba4ee5dad1027f8a234bd8ec9
|
/extract.py
|
ded932aaf0d820c57d725868527d737f999718b6
|
[] |
no_license
|
NQNStudios/powerschool
|
1a1139427c0a23f4c65e2d80d56e382efb5e8327
|
7097ef39e9264aa5d0d0adf5eb3d2be70e097cdd
|
refs/heads/master
| 2021-05-29T15:24:38.024715
| 2015-01-14T23:16:06
| 2015-01-14T23:16:06
| 29,164,125
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,783
|
py
|
#! /usr/bin/env python3
from splinter import Browser
from bs4 import BeautifulSoup
import re
search_term = 2
def visit_home(browser):
browser.visit('https://powerschool.slcschools.org/guardian/home.html')
def log_in(browser):
browser.fill('account', 'NN501621')
browser.fill('pw', 'roy501621111')
browser.find_by_id('btn-enter').click()
def find_content_table(soup):
"""Finds the main content table for class info from the homepage"""
container = soup.body.find(id='container')
content_main = container.find(id='content-main')
quick_lookup = container.find(id='quickLookup')
def has_class(tag):
return tag.has_attr('class')
grid = quick_lookup.find(has_class)
table_body = grid.tbody
return table_body
def find_class_rows(content_table):
"""Finds the class rows from the homepage main content table"""
rows = content_table.find_all('tr')
return rows[3:-1] # retrieve only rows containing class info
def find_grade_columns(class_row):
"""Finds the grade columns from a class info table row"""
columns = class_row.find_all('td')
return columns[12:-2] # return only columns containing grade links
def find_term_grades(home_html, term_num):
"""Finds grade boxes from the desired school term"""
soup = BeautifulSoup(home_html)
table = find_content_table(soup)
class_rows = find_class_rows(table)
term_grades = [ ]
for class_row in class_rows:
grade_columns = find_grade_columns(class_row)
term_grades.append(grade_columns[term_num - 1])
return term_grades
def is_grade_A(term_grade):
"""Determines if a term grade is an A"""
A_regex = '^A[0-9]*$' # the letter A followed immediately by a multi-digit number
return re.search(A_regex, term_grade.text)
def is_grade_null(term_grade):
"""Determines if a term grade is null (--)"""
null_regex = '^--$'
return re.search(null_regex, term_grade.text)
def class_details_html(browser, term_grade):
"""Returns the HTML content of the class details page for the given term grade"""
visit_home(browser)
browser.find_link_by_href(term_grade.a['href']).click()
return browser.html
def important_class_details(term_num):
"""Returns a list containing the HTML content of every class details page
for class that does not have an "A" grade"""
important_details = [ ]
with Browser() as browser:
visit_home(browser)
log_in(browser)
home_html = browser.html
for term_grade in find_term_grades(home_html, term_num):
if not (is_grade_A(term_grade) or is_grade_null(term_grade)):
important_details.append(class_details_html(browser, term_grade))
return important_details
|
[
"nelson.nleroy@gmail.com"
] |
nelson.nleroy@gmail.com
|
cb264d78848f774d6e76c539461a9e4ed37b3236
|
95e4b29d364fb5f1afcb1666edff1fc09b71ea67
|
/session2/07_lists.py
|
9c40b4caf4ff27555db66173fa012dbaf9653e08
|
[] |
no_license
|
ynonp/basic-python-non-devs
|
88b64ac8f5c6ee2d22ef94bd879a09bc93449ab0
|
c28941be79671d1e197f006f0732d890229bc313
|
refs/heads/main
| 2023-02-23T23:02:13.017742
| 2021-01-13T11:19:01
| 2021-01-13T11:19:01
| 323,832,770
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
fruits = ['apple', 'banana', 'orange']
print(fruits)
print(len(fruits))
fruits.append("watermelon")
fruits += ['avocado', 'plum']
print(fruits)
if 'avocado' in fruits:
print("Yay it's winter")
for fruit in fruits:
print(f"I like to eat {fruit}")
for index, fruit in enumerate(fruits):
print(f"{index} likes to eat {fruit}")
print(fruits[0])
|
[
"ynonperek@gmail.com"
] |
ynonperek@gmail.com
|
8119ce5c1f61f3b8b2ae67403b159e21aca8aa31
|
5a5cc5bc76ca0dff51ad49eceaa0924db69b194b
|
/bridgebot/test/test_determine_trick_winner.py
|
78e4898f6ecbf1a44aec93fe11d95946e9122463
|
[] |
no_license
|
evanakm/bridge-bot
|
bbbe8b47359c8f44cd60800f620d0c329d7cd381
|
2b484585efbe35df10214315d50d8880982481e7
|
refs/heads/master
| 2021-07-12T00:38:21.720445
| 2019-11-25T04:31:45
| 2019-11-25T04:31:45
| 194,469,648
| 2
| 0
| null | 2019-11-25T04:31:46
| 2019-06-30T03:11:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,503
|
py
|
import pytest
import sys
import os
# sys.path.insert(0, os.path.abspath('../bridgebot'))
sys.path.insert(0, os.path.abspath('..'))
from game import cardplay
from game.enums import Strains, Suits, Ranks
from game.bridgehand import Card
@pytest.mark.parametrize('played_cards, trump_strain, expected', [
(
[
Card(Suits.SPADES, Ranks.FOUR),
Card(Suits.HEARTS, Ranks.ACE),
Card(Suits.CLUBS, Ranks.ACE),
Card(Suits.DIAMONDS, Ranks.ACE)
],
Strains.SPADES,
0
),
(
[
Card(Suits.HEARTS, Ranks.ACE),
Card(Suits.SPADES, Ranks.FOUR),
Card(Suits.CLUBS, Ranks.ACE),
Card(Suits.DIAMONDS, Ranks.ACE)
],
Strains.SPADES,
1
),
(
[
Card(Suits.HEARTS, Ranks.ACE),
Card(Suits.CLUBS, Ranks.ACE),
Card(Suits.DIAMONDS, Ranks.ACE),
Card(Suits.SPADES, Ranks.FOUR)
],
Strains.SPADES,
3
),
(
[
Card(Suits.SPADES, Ranks.FOUR),
Card(Suits.HEARTS, Ranks.FIVE),
Card(Suits.CLUBS, Ranks.ACE),
Card(Suits.DIAMONDS, Ranks.ACE)
],
Strains.HEARTS,
1
),
(
[
Card(Suits.SPADES, Ranks.FOUR),
Card(Suits.HEARTS, Ranks.FIVE),
Card(Suits.CLUBS, Ranks.ACE),
Card(Suits.DIAMONDS, Ranks.ACE)
],
Strains.DIAMONDS,
3
),
(
[
Card(Suits.SPADES, Ranks.FOUR),
Card(Suits.DIAMONDS, Ranks.FIVE),
Card(Suits.CLUBS, Ranks.ACE),
Card(Suits.DIAMONDS, Ranks.ACE)
],
Strains.DIAMONDS,
3
),
(
[
Card(Suits.DIAMONDS, Ranks.ACE),
Card(Suits.SPADES, Ranks.FOUR),
Card(Suits.DIAMONDS, Ranks.FIVE),
Card(Suits.CLUBS, Ranks.ACE),
],
Strains.DIAMONDS,
0
),
(
[
Card(Suits.SPADES, Ranks.FOUR),
Card(Suits.DIAMONDS, Ranks.ACE),
Card(Suits.DIAMONDS, Ranks.FIVE),
Card(Suits.CLUBS, Ranks.ACE),
],
Strains.DIAMONDS,
1
),
(
[
Card(Suits.SPADES, Ranks.FOUR),
Card(Suits.DIAMONDS, Ranks.FIVE),
Card(Suits.DIAMONDS, Ranks.ACE),
Card(Suits.CLUBS, Ranks.ACE),
],
Strains.DIAMONDS,
2
),
(
[
Card(Suits.CLUBS, Ranks.ACE),
Card(Suits.SPADES, Ranks.FOUR),
Card(Suits.DIAMONDS, Ranks.FIVE),
Card(Suits.DIAMONDS, Ranks.ACE),
],
Strains.DIAMONDS,
3
),
(
[
Card(Suits.CLUBS, Ranks.ACE),
Card(Suits.SPADES, Ranks.FOUR),
Card(Suits.DIAMONDS, Ranks.ACE),
Card(Suits.DIAMONDS, Ranks.FIVE),
],
Strains.DIAMONDS,
2
)
])
def test_determine_trick_winner(played_cards, trump_strain, expected):
assert cardplay.determine_trick_winner(played_cards, trump_strain) == expected
|
[
"brothers.mason@gmail.com"
] |
brothers.mason@gmail.com
|
dbc4f5c785908e133aceade47c97854aadabeeca
|
a70c29d384933040d318a1baf952965621b68490
|
/examples/multi_sparse_processing_example.py
|
744c5b1bbd6512101fb39c696cf6034807d653f9
|
[
"MIT"
] |
permissive
|
romadm/LibRecommender
|
f4980dcd117997284f96f7b042cf3fbbc8c0f99e
|
46bb892453e88d8411e671bd72e7a8c6e8ef1575
|
refs/heads/master
| 2023-08-27T11:51:21.842980
| 2021-11-07T01:42:35
| 2021-11-07T01:42:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,371
|
py
|
import time
import numpy as np
import pandas as pd
from libreco.data import split_by_ratio_chrono, split_multi_value, DatasetFeat
from libreco.algorithms import DeepFM
# remove unnecessary tensorflow logging
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["KMP_WARNINGS"] = "FALSE"
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
pd.set_option('display.max_columns', 20)
if __name__ == "__main__":
# choose data named "sample_movielens_genre.csv"
data = pd.read_csv("sample_data/sample_movielens_genre.csv", header=0)
print("=" * 30, "original data", "=" * 30)
print(data.head(), "\n")
sparse_col = ["sex", "occupation"]
dense_col = ["age"]
multi_value_col = ["genre"] # specify multi-value feature
user_col = ["sex", "age", "occupation"]
item_col = ["genre"]
# The "max_len" parameter means max category a sample can have.
# If it is set to None, will use max category length a sample can
# have across the whole data.
# Note if it is not None, it should also be a list,
# because there are possibly many multi_value features.
multi_sparse_col, multi_user_col, multi_item_col = split_multi_value(
data, multi_value_col, sep="|", max_len=[3], pad_val="missing",
user_col=user_col, item_col=item_col
)
print("multi_sparse_col: ", multi_sparse_col)
print("multi_user_col: ", multi_user_col)
print("multi_item_col: ", multi_item_col)
# the multi-value feature may belong to user or item, so we add them together.
user_col += multi_user_col
item_col += multi_item_col
# we do not need the original genre feature any more
item_col.remove("genre")
print("final user col: ", user_col)
print("final item col: ", item_col, "\n")
print("="*30, "transformed data", "=" * 30)
print(data.head(), "\n")
train_data, eval_data = split_by_ratio_chrono(data, test_size=0.2)
train_data, data_info = DatasetFeat.build_trainset(
train_data=train_data,
user_col=user_col,
item_col=item_col,
sparse_col=sparse_col,
dense_col=dense_col,
multi_sparse_col=multi_sparse_col,
pad_val=["missing"] # specify padding value
)
eval_data = DatasetFeat.build_testset(eval_data)
print(data_info)
# do negative sampling, assume the data only contains positive feedback
train_data.build_negative_samples(data_info, item_gen_mode="random",
num_neg=1, seed=2020)
eval_data.build_negative_samples(data_info, item_gen_mode="random",
num_neg=1, seed=2222)
deepfm = DeepFM("ranking", data_info, embed_size=16, n_epochs=2,
lr=1e-4, lr_decay=False, reg=None, batch_size=2048,
num_neg=1, use_bn=False, dropout_rate=None,
hidden_units="128,64,32", tf_sess_config=None,
multi_sparse_combiner="normal") # specify multi_sparse combiner
deepfm.fit(train_data, verbose=2, shuffle=True, eval_data=eval_data,
metrics=["loss", "balanced_accuracy", "roc_auc", "pr_auc",
"precision", "recall", "map", "ndcg"])
print("prediction: ", deepfm.predict(user=1, item=2333))
print("recommendation: ", deepfm.recommend_user(user=1, n_rec=7))
|
[
"wdmjjxg@163.com"
] |
wdmjjxg@163.com
|
dceff655267947d8d771a62427ae550a61ec0316
|
942ee5e8d54e8ebe9c5c841fbfdd1da652946944
|
/1001-1500/1008.Construct Binary Search Tree from Preorder Traversal.py
|
a127ff231de83bb7e0efe33832eb0fb3e3596dc1
|
[] |
no_license
|
kaiwensun/leetcode
|
0129c174457f32887fbca078fb448adce46dd89d
|
6b607f4aae3a4603e61f2e2b7480fdfba1d9b947
|
refs/heads/master
| 2023-08-31T07:30:50.459062
| 2023-08-27T07:59:16
| 2023-08-27T07:59:16
| 57,526,914
| 69
| 9
| null | 2023-08-20T06:34:41
| 2016-05-01T05:37:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def bstFromPreorder(self, preorder):
"""
:type preorder: List[int]
:rtype: TreeNode
"""
dummy = TreeNode(float('inf'))
path = [dummy]
pointer = dummy
for val in preorder:
if val < pointer.val:
pointer.left = TreeNode(val)
path.append(pointer)
pointer = pointer.left
else:
while val >= path[-1].val:
pointer = path.pop()
if val < pointer.val:
pointer.left = TreeNode(val)
path.append(pointer)
pointer = pointer.left
else:
pointer.right = TreeNode(val)
pointer = pointer.right
return dummy.left
|
[
"noreply@github.com"
] |
kaiwensun.noreply@github.com
|
361c574679a174d0f9e08fbf59b14c55bf4dbdf8
|
3c1225978cddb35d92969f8b5c2cb12d238d345a
|
/examples/transfer_function_with_constant_permittivity.py
|
001faf79d5388fae4b41673e5a4032b1cdb65da4
|
[
"MIT"
] |
permissive
|
janbrumm/layermodel_lib
|
4f08e0b1e81169ee9cc707b5d7fac9457cc74bc2
|
0d5e0c9ac77d302910823ebc757a4ec99541f3ff
|
refs/heads/master
| 2020-06-03T22:32:14.505315
| 2020-02-18T16:06:46
| 2020-02-18T16:06:46
| 191,758,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,544
|
py
|
# This file is part of LayerModel_lib
#
# A tool to compute the transmission behaviour of plane electromagnetic waves
# through human tissue.
#
# Copyright (C) 2018 Jan-Christoph Brumm
#
# Licensed under MIT license.
#
"""
This examples shows how to use the LayerModel class to calculate the propagation behaviour
of a plane wave through an arbitrary multi-layered dielectric with constant permittivity and conductivity.
"""
import numpy as np
from typing import Union
from LayerModel_lib import LayerModel, DielectricProperties
class SimpleDielectric(DielectricProperties):
def complex_permittivity(self, dielectric_index: Union[np.ndarray, float],
f: Union[np.ndarray, float]) -> np.ndarray:
# Here the calculation of epsilon takes place
epsilon = self.epsilon0 * self.values['eps_r'][dielectric_index]
return epsilon
# create an object for the dielectric properties
d = SimpleDielectric()
d.add_new_dielectric('Air', new_values={'eps_r': 1}) # index 0 = this should always be Air
d.add_new_dielectric('Solid1', new_values={'eps_r': 3}) # index 1
d.add_new_dielectric('Solid2', new_values={'eps_r': 5+2j}) # index 2
# create a layer model using these dielectric properties
lm = LayerModel.create_from_dict({'Air': None, 'TX': None, 'Solid1': 10, 'Solid2': 20, 'RX': None},
tissue_properties=d)
lm.print_info()
# calculate the transfer function at 1e9 Hz
(transfer_function, frequency) = lm.S21(f_start=1e9, f_end=1e9, n_samples=1)
|
[
"jan.brumm@tuhh.de"
] |
jan.brumm@tuhh.de
|
209949a3f93379832877549ce65be315beff6f67
|
404ad77945e7ff8a57dac8753b00cb7187bd7f4e
|
/104/5.py
|
76e31b10029475ca8981323951a5d8097b835680
|
[] |
no_license
|
dennisliuu/Coding-365
|
5d773493fbf69bce03de20e4a03c5fdf108612f6
|
8e1cab65837ebe2cb36fa0e4b74fb07d0ee6b081
|
refs/heads/master
| 2020-03-24T22:33:58.299791
| 2018-08-07T05:06:21
| 2018-08-07T05:06:21
| 143,091,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
ans = []
class university(object):
def __init__(self, name, props):
self.name = name
self.props = props
unis = []
n = int(input())
if n > 10:
exit()
for i in range(n):
inp = input()
inp = inp.split(' ')
unis.append(university(inp[0], inp[1:]))
m = int(input())
if m > 10:
exit()
for i in range(m):
inp = input().replace(" ", "").split('+')
for j in inp:
for k in unis:
if len(j) == 2 and j in k.props:#AABB
ans.append(k.name)
else:
sub_prop = [j[l:l+2] for l in range(0,len(j),2)] #sub_prop=[AA,BB]
if set(sub_prop).issubset(k.props): #set1<=set2 return true
ans.append(k.name)
ans.append('\n')
final_ans = []
for i in ans:
if i != '\n':
final_ans.append(i)
else:
final_ans = sorted(list(set(final_ans)))
print(*final_ans)
final_ans = []#clear \n 前ans
|
[
"dennisliuu@gmail.com"
] |
dennisliuu@gmail.com
|
074d2cf258f2969f7ed6c15dfab45f394e951518
|
3e2607dc50fe20674f5249f58f3fbc5066e7b506
|
/0x10-python-network_0/6-main.py
|
e324d588f1079621a41aa98200a74b781faf56f5
|
[] |
no_license
|
francisco0522/holbertonschool-higher_level_programming
|
685b300e93afba51699d065e73419e95fabd37d2
|
789d737a738a150b32f3b2dc9d1b106ce423eb60
|
refs/heads/master
| 2020-09-29T04:11:27.813003
| 2020-07-20T18:52:03
| 2020-07-20T18:52:03
| 226,946,158
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
#!/usr/bin/python3
""" Test function find_peak """
find_peak = __import__('6-peak').find_peak
print(find_peak([1, 2, 4, 6, 3]))
print(find_peak([4, 2, 1, 2, 3, 1]))
print(find_peak([2, 2, 2]))
print(find_peak([]))
print(find_peak([-2, -4, 2, 1]))
print(find_peak([4, 2, 1, 2, 3, 1]))
|
[
"pacho0522@gmail.com"
] |
pacho0522@gmail.com
|
052a777155acdff9389ecf23affab4296d2595e6
|
911db8b49bbb6de7b1ade39065d49f9c6936ee41
|
/tests/visitors/test_html.py
|
9062410941b8dd6b4660a5a88401461a64053e00
|
[
"MIT"
] |
permissive
|
mzulqarnain1/django-codemod
|
3e7314ccde49bf61ab0a06d4eddc1e4613f88d0d
|
d0ab541321255cccf0753ee292558272d60350ae
|
refs/heads/main
| 2023-08-04T13:03:07.782903
| 2021-09-16T14:15:05
| 2021-09-16T14:15:05
| 407,186,364
| 0
| 0
|
MIT
| 2021-09-16T13:59:40
| 2021-09-16T13:59:39
| null |
UTF-8
|
Python
| false
| false
| 560
|
py
|
from django_codemod.visitors import UnescapeEntitiesTransformer
from tests.visitors.base import BaseVisitorTest
class TestUnescapeEntitiesTransformer(BaseVisitorTest):
transformer = UnescapeEntitiesTransformer
def test_simple_substitution(self) -> None:
before = """
from django.utils.text import unescape_entities
result = unescape_entities(content)
"""
after = """
from html import unescape
result = unescape(content)
"""
self.assertCodemod(before, after)
|
[
"alla.brunoo@gmail.com"
] |
alla.brunoo@gmail.com
|
266a0e3ebc350fa28dd35cb7001511ce7d46448e
|
94bfd592bcbe134657ce8b2ed9b3fba73b9cc12d
|
/mango/contextbuilder.py
|
e39ed0a3cd594ea4e773079a2aedd4fb6254d16c
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
arniwesth/mango-explorer
|
fc53109023f5d8cf2b80faa3341b671851771ae4
|
8257902003fa2f72cc89a20d1cebaa0c8cc2ad68
|
refs/heads/main
| 2023-08-25T15:19:17.012249
| 2021-10-08T17:15:40
| 2021-10-08T17:15:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,308
|
py
|
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:hello@blockworks.foundation)
import argparse
import datetime
import copy
import logging
import os
import typing
from decimal import Decimal
from solana.publickey import PublicKey
from .client import BetterClient
from .constants import MangoConstants
from .context import Context
from .idsjsonmarketlookup import IdsJsonMarketLookup
from .idsjsontokenlookup import IdsJsonTokenLookup
from .marketlookup import CompoundMarketLookup, MarketLookup
from .serummarketlookup import SerumMarketLookup
from .spltokenlookup import SplTokenLookup
from .tokenlookup import TokenLookup, CompoundTokenLookup
# # 🥭 ContextBuilder
#
# ## Environment Variables
#
# It's possible to override the values in the `Context` variables provided. This can be easier than creating
# the `Context` in code or introducing dependencies and configuration.
#
# The following environment variables are read:
# * NAME
# * CLUSTER
# * CLUSTER_URL
# * GROUP_NAME
# * GROUP_ADDRESS
# * MANGO_PROGRAM_ADDRESS
# * SERUM_PROGRAM_ADDRESS
# # 🥭 ContextBuilder class
#
# A `ContextBuilder` class to allow building `Context` objects without introducing circular dependencies.
#
class ContextBuilder:
# Configuring a `Context` is a common operation for command-line programs and can involve a
# lot of duplicate code.
#
# This function centralises some of it to ensure consistency and readability.
#
@staticmethod
def add_command_line_parameters(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--name", type=str, default="Mango Explorer",
help="Name of the program (used in reports and alerts)")
parser.add_argument("--cluster-name", type=str, default=None, help="Solana RPC cluster name")
parser.add_argument("--cluster-url", type=str, default=None, help="Solana RPC cluster URL")
parser.add_argument("--group-name", type=str, default=None, help="Mango group name")
parser.add_argument("--group-address", type=PublicKey, default=None, help="Mango group address")
parser.add_argument("--mango-program-address", type=PublicKey, default=None, help="Mango program address")
parser.add_argument("--serum-program-address", type=PublicKey, default=None, help="Serum program address")
parser.add_argument("--skip-preflight", default=False, action="store_true", help="Skip pre-flight checks")
parser.add_argument("--commitment", type=str, default=None,
help="Commitment to use when sending transactions (can be 'finalized', 'confirmed' or 'processed')")
parser.add_argument("--blockhash-commitment", type=str, default=None,
help="Commitment to use specifically when fetching recent blockhash (can be 'finalized', 'confirmed' or 'processed')")
parser.add_argument("--encoding", type=str, default=None,
help="Encoding to request when receiving data from Solana (options are 'base58' (slow), 'base64', 'base64+zstd', or 'jsonParsed')")
parser.add_argument("--blockhash-cache-duration", type=int, help="How long to cache 'recent' blockhashes")
parser.add_argument("--gma-chunk-size", type=Decimal, default=None,
help="Maximum number of addresses to send in a single call to getMultipleAccounts()")
parser.add_argument("--gma-chunk-pause", type=Decimal, default=None,
help="number of seconds to pause between successive getMultipleAccounts() calls to avoid rate limiting")
parser.add_argument("--token-data-file", type=str, default=SplTokenLookup.DefaultDataFilepath,
help="data file that contains token symbols, names, mints and decimals (format is same as https://raw.githubusercontent.com/solana-labs/token-list/main/src/tokens/solana.tokenlist.json)")
# This function is the converse of `add_command_line_parameters()` - it takes
# an argument of parsed command-line parameters and expects to see the ones it added
# to that collection in the `add_command_line_parameters()` call.
#
# It then uses those parameters to create a properly-configured `Context` object.
#
@staticmethod
def from_command_line_parameters(args: argparse.Namespace) -> Context:
name: typing.Optional[str] = args.name
cluster_name: typing.Optional[str] = args.cluster_name
cluster_url: typing.Optional[str] = args.cluster_url
group_name: typing.Optional[str] = args.group_name
group_address: typing.Optional[PublicKey] = args.group_address
mango_program_address: typing.Optional[PublicKey] = args.mango_program_address
serum_program_address: typing.Optional[PublicKey] = args.serum_program_address
skip_preflight: bool = bool(args.skip_preflight)
commitment: typing.Optional[str] = args.commitment
blockhash_commitment: typing.Optional[str] = args.blockhash_commitment
encoding: typing.Optional[str] = args.encoding
blockhash_cache_duration: typing.Optional[datetime.timedelta] = datetime.timedelta(
seconds=args.blockhash_cache_duration) if args.blockhash_cache_duration is not None else None
gma_chunk_size: typing.Optional[Decimal] = args.gma_chunk_size
gma_chunk_pause: typing.Optional[Decimal] = args.gma_chunk_pause
token_filename: str = args.token_data_file
context: Context = ContextBuilder._build(name, cluster_name, cluster_url, skip_preflight, commitment, blockhash_commitment, encoding, blockhash_cache_duration,
group_name, group_address, mango_program_address, serum_program_address, gma_chunk_size, gma_chunk_pause, token_filename)
logging.debug(f"{context}")
return context
@staticmethod
def default():
return ContextBuilder._build(None, None, None, False, None, None, None, None, None, None, None, None, None, None, SplTokenLookup.DefaultDataFilepath)
@staticmethod
def from_group_name(context: Context, group_name: str) -> Context:
return ContextBuilder._build(context.name, context.client.cluster_name, context.client.cluster_url,
context.client.skip_preflight, context.client.commitment,
context.client.blockhash_commitment, context.client.encoding,
context.client.compatible_client.blockhash_cache_duration,
group_name, None, None, None, context.gma_chunk_size, context.gma_chunk_pause,
SplTokenLookup.DefaultDataFilepath)
@staticmethod
def forced_to_devnet(context: Context) -> Context:
cluster_name: str = "devnet"
cluster_url: str = MangoConstants["cluster_urls"][cluster_name]
fresh_context = copy.copy(context)
fresh_context.client = BetterClient.from_configuration(context.name,
cluster_name,
cluster_url,
context.client.commitment,
context.client.blockhash_commitment,
context.client.skip_preflight,
context.client.encoding,
context.client.compatible_client.blockhash_cache_duration,
context.client.instruction_reporter)
return fresh_context
@staticmethod
def forced_to_mainnet_beta(context: Context) -> Context:
cluster_name: str = "mainnet"
cluster_url: str = MangoConstants["cluster_urls"][cluster_name]
fresh_context = copy.copy(context)
fresh_context.client = BetterClient.from_configuration(context.name,
cluster_name,
cluster_url,
context.client.commitment,
context.client.blockhash_commitment,
context.client.skip_preflight,
context.client.encoding,
context.client.compatible_client.blockhash_cache_duration,
context.client.instruction_reporter)
return fresh_context
# This function is the converse of `add_command_line_parameters()` - it takes
# an argument of parsed command-line parameters and expects to see the ones it added
# to that collection in the `add_command_line_parameters()` call.
#
# It then uses those parameters to create a properly-configured `Context` object.
#
@staticmethod
def _build(name: typing.Optional[str], cluster_name: typing.Optional[str], cluster_url: typing.Optional[str],
skip_preflight: bool, commitment: typing.Optional[str],
blockhash_commitment: typing.Optional[str], encoding: typing.Optional[str],
blockhash_cache_duration: typing.Optional[datetime.timedelta],
group_name: typing.Optional[str], group_address: typing.Optional[PublicKey],
program_address: typing.Optional[PublicKey], serum_program_address: typing.Optional[PublicKey],
gma_chunk_size: typing.Optional[Decimal], gma_chunk_pause: typing.Optional[Decimal],
token_filename: str) -> "Context":
def public_key_or_none(address: typing.Optional[str]) -> typing.Optional[PublicKey]:
if address is not None and address != "":
return PublicKey(address)
return None
# The first group is only used to determine the default cluster if it is not otherwise specified.
first_group_data = MangoConstants["groups"][0]
actual_name: str = name or os.environ.get("NAME") or "Mango Explorer"
actual_cluster: str = cluster_name or os.environ.get("CLUSTER_NAME") or first_group_data["cluster"]
# Now that we have the actual cluster name, taking environment variables and defaults into account,
# we can decide what we want as the default group.
for group_data in MangoConstants["groups"]:
if group_data["cluster"] == actual_cluster:
default_group_data = group_data
break
actual_commitment: str = commitment or "processed"
actual_blockhash_commitment: str = blockhash_commitment or commitment or "processed"
actual_encoding: str = encoding or "base64"
actual_blockhash_cache_duration: datetime.timedelta = blockhash_cache_duration or datetime.timedelta(seconds=0)
actual_cluster_url: str = cluster_url or os.environ.get(
"CLUSTER_URL") or MangoConstants["cluster_urls"][actual_cluster]
actual_skip_preflight: bool = skip_preflight
actual_group_name: str = group_name or os.environ.get("GROUP_NAME") or default_group_data["name"]
found_group_data: typing.Any = None
for group in MangoConstants["groups"]:
if group["cluster"] == actual_cluster and group["name"].upper() == actual_group_name.upper():
found_group_data = group
if found_group_data is None:
raise Exception(f"Could not find group named '{actual_group_name}' in cluster '{actual_cluster}'.")
actual_group_address: PublicKey = group_address or public_key_or_none(os.environ.get(
"GROUP_ADDRESS")) or PublicKey(found_group_data["publicKey"])
actual_program_address: PublicKey = program_address or public_key_or_none(os.environ.get(
"MANGO_PROGRAM_ADDRESS")) or PublicKey(found_group_data["mangoProgramId"])
actual_serum_program_address: PublicKey = serum_program_address or public_key_or_none(os.environ.get(
"SERUM_PROGRAM_ADDRESS")) or PublicKey(found_group_data["serumProgramId"])
actual_gma_chunk_size: Decimal = gma_chunk_size or Decimal(100)
actual_gma_chunk_pause: Decimal = gma_chunk_pause or Decimal(0)
ids_json_token_lookup: TokenLookup = IdsJsonTokenLookup(actual_cluster, actual_group_name)
all_token_lookup = ids_json_token_lookup
if actual_cluster == "mainnet":
mainnet_spl_token_lookup: TokenLookup = SplTokenLookup.load(token_filename)
all_token_lookup = CompoundTokenLookup([ids_json_token_lookup, mainnet_spl_token_lookup])
elif actual_cluster == "devnet":
devnet_token_filename = token_filename.rsplit('.', 1)[0] + ".devnet.json"
devnet_spl_token_lookup: TokenLookup = SplTokenLookup.load(devnet_token_filename)
all_token_lookup = CompoundTokenLookup([ids_json_token_lookup, devnet_spl_token_lookup])
token_lookup: TokenLookup = all_token_lookup
ids_json_market_lookup: MarketLookup = IdsJsonMarketLookup(actual_cluster)
all_market_lookup = ids_json_market_lookup
if actual_cluster == "mainnet":
mainnet_serum_market_lookup: SerumMarketLookup = SerumMarketLookup.load(
actual_serum_program_address, token_filename)
all_market_lookup = CompoundMarketLookup([ids_json_market_lookup, mainnet_serum_market_lookup])
elif actual_cluster == "devnet":
devnet_token_filename = token_filename.rsplit('.', 1)[0] + ".devnet.json"
devnet_serum_market_lookup: SerumMarketLookup = SerumMarketLookup.load(
actual_serum_program_address, devnet_token_filename)
all_market_lookup = CompoundMarketLookup([ids_json_market_lookup, devnet_serum_market_lookup])
market_lookup: MarketLookup = all_market_lookup
return Context(actual_name, actual_cluster, actual_cluster_url, actual_skip_preflight, actual_commitment, actual_blockhash_commitment, actual_encoding, actual_blockhash_cache_duration, actual_program_address, actual_serum_program_address, actual_group_name, actual_group_address, actual_gma_chunk_size, actual_gma_chunk_pause, token_lookup, market_lookup)
|
[
"geoff@knife.opgeek.lan"
] |
geoff@knife.opgeek.lan
|
e78455caad6c1ed3a500146a4db24ce64c061ada
|
bf5abcc6ae91df5f7b113462023c2a2837d7e554
|
/025.py
|
fb32c0bfb3a303170429468bbc8d07d8f4fc747c
|
[] |
no_license
|
nimanp/Project-Euler
|
4ad08bfe4a7cc89ecdd16b641228a4dc20480a5c
|
c43ec2a15fe17af9c48bdc6e0f40635ee71e950d
|
refs/heads/master
| 2020-05-29T16:17:22.636971
| 2013-02-27T10:27:02
| 2013-02-27T10:27:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
def fibThousand():
next = 0
cur = 1
prev = 1
for i in range(0, 20000):
next = cur + prev
prev = cur
cur = next
if len(str(next)) >= 1000:
return i+3
answer = fibThousand()
print answer
|
[
"nimanp@flip2.engr.oregonstate.edu"
] |
nimanp@flip2.engr.oregonstate.edu
|
819e726d86ef5adfadc4b5fce43a983d848529c2
|
15b5b907f44fe6030803cdf1142d41d54ed2e6c8
|
/tsp_solver_test.py
|
0fd97a93a829450942a15ac2796d711eb485cd9e
|
[] |
no_license
|
greenmonn/tsp-solver
|
9c8338b38c0207f002b542c863158e4f147b64fc
|
8a3e3b250c7e4ea79105d6130ceacd63ca7ca10f
|
refs/heads/master
| 2020-08-01T03:06:37.995880
| 2019-10-03T13:10:04
| 2019-10-03T13:10:04
| 210,839,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,543
|
py
|
import pytest
from tsp_solver import TSP, solve_exhaustive, solve_dp
# Problem source: https://people.sc.fsu.edu/~jburkardt/datasets/tsp/tsp.html
# Problem source 2: http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/index.html
def test_read_file():
problem = TSP()
problem.from_file('problems/bier127.tsp')
assert len(problem.nodes) == 127
assert len(problem.distance_matrix.matrix) == 127
def test_tsp_solve():
# Prepare the square symmetric distance matrix for 3 nodes:
# Distance from A to B is 1.0
# B to C is 3.0
# A to C is 2.0
problem = TSP()
problem.from_array([
[],
[1.0],
[3.0, 2.0],
[5.0, 4.0, 1.0]
])
path, length = solve_exhaustive(problem)
assert list(map(lambda node: node.id, path)) == [1, 2, 3, 4]
assert length == 9.0
def test_city_5():
problem = TSP()
problem.from_array([
[],
[3.0],
[4.0, 4.0],
[2.0, 6.0, 5.0],
[7.0, 3.0, 8.0, 6.0]
])
path, length = solve_dp(problem)
assert list(map(lambda node: node.id, path)) == [1, 3, 2, 5, 4]
assert length == 19.0
def test_burma_14():
problem = TSP()
problem.from_file('problems/burma14.tsp', 'geo')
assert len(problem.nodes) == 14
path, length = solve_dp(problem)
assert list(map(lambda node: node.id, path)) == [
1, 2, 14, 3, 4, 5, 6, 12, 7, 13, 8, 11, 9, 10]
assert int(length) == 3346
def test_city_15():
problem = TSP()
problem.from_array([[],
[29],
[82, 55],
[46, 46, 68],
[68, 42, 46, 82],
[52, 43, 55, 15, 74],
[72, 43, 23, 72, 23, 61],
[42, 23, 43, 31, 52, 23, 42],
[51, 23, 41, 62, 21, 55, 23, 33],
[55, 31, 29, 42, 46, 31, 31, 15, 29],
[29, 41, 79, 21, 82, 33, 77, 37, 62, 51],
[74, 51, 21, 51, 58, 37, 37, 33, 46, 21, 65],
[23, 11, 64, 51, 46, 51, 51, 33, 29, 41, 42, 61],
[72, 52, 31, 43, 65, 29, 46, 31, 51, 23, 59, 11, 62],
[46, 21, 51, 64, 23, 59, 33, 37, 11, 37, 61, 55, 23, 59]])
path, length = solve_dp(problem)
ans = [13, 2, 15, 9, 5, 7, 3, 12, 14, 10, 8, 6, 4, 11, 1]
ans.reverse()
assert list(map(lambda node: node.id, path)) == ans
assert length == 291
|
[
"greenmon@kaist.ac.kr"
] |
greenmon@kaist.ac.kr
|
8be1ea418f424267a26e9079f2bc10539f8e6725
|
69623fac9b3e55ed7a72f9ef50d2bef7b5ba3f06
|
/docs/conf.py
|
52aea13fb8c1cf3d41d68fa332b44f2c8b84ac15
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] |
permissive
|
constanthatz/python-nameless
|
4b997e2e8f1aba5bce4a198e8388f06584691575
|
4a3dbfbe80eec9f65e6fe3910737ba36f092b879
|
refs/heads/master
| 2016-09-13T05:39:33.145674
| 2016-05-06T01:03:38
| 2016-05-06T01:03:38
| 58,170,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = 'Nameless'
year = '2016'
author = 'CH'
copyright = '{0}, {1}'.format(year, author)
version = release = '0.1.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/constanthatz/python-nameless/issues/%s', '#'),
'pr': ('https://github.com/constanthatz/python-nameless/pull/%s', 'PR #'),
}
import sphinx_py3doc_enhanced_theme
html_theme = "sphinx_py3doc_enhanced_theme"
html_theme_path = [sphinx_py3doc_enhanced_theme.get_html_theme_path()]
html_theme_options = {
'githuburl': 'https://github.com/constanthatz/python-nameless/'
}
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
|
[
"constantine.hatzis@gmail.com"
] |
constantine.hatzis@gmail.com
|
ed50cb65195f00e73e4b9f20573f2d725da88713
|
2085ed042e115bf64584ee7c6385bcbc700dc8a1
|
/d_data_mining/config.py
|
3b98b18a7cbf69b05cd0d0262abcebea2ca45ee7
|
[
"Apache-2.0"
] |
permissive
|
nicolasessisbreton/pyzehe
|
1591c9460dd0fa56427147bb93a44bf6cd8afa27
|
7497a0095d974ac912ce9826a27e21fd9d513942
|
refs/heads/master
| 2020-03-19T00:58:22.393522
| 2018-06-02T04:54:56
| 2018-06-02T04:54:56
| 135,513,352
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
import sqlite3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.set_option('display.expand_frame_repr', False)
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
db_path = 'db.sqlite3'
db = sqlite3.connect(db_path)
client = pd.read_sql('select * from client', db)
address = pd.read_sql('select * from address', db)
purchase = pd.read_sql('select * from purchase', db)
guarantee_cost = pd.read_sql('select * from guarantee_cost', db)
fund_price = pd.read_sql('select * from fund_price', db)
client.date_of_birth = pd.to_datetime(client.date_of_birth)
address.date = pd.to_datetime(address.date)
purchase.date = pd.to_datetime(purchase.date)
guarantee_cost.date = pd.to_datetime(guarantee_cost.date)
fund_price.date = pd.to_datetime(fund_price.date)
client['age'] = pd.to_datetime('2018-01-01') - client.date_of_birth
client.age = client.age.dt.days/365.25
def printn(df, head=20, title=None):
if isinstance(head, str):
title = head
head = 20
head = int(head)
if isinstance(df, pd.DataFrame):
x = df.head(head)
elif isinstance(df, str):
title = df
x = None
else:
x = df
if title:
print('# ' + title)
if x is not None:
print(x, '\n')
|
[
"nicolas.essis-breton@intact.net"
] |
nicolas.essis-breton@intact.net
|
cb388adf365759507f01b0a4e5a9bde8d941ed2c
|
5df29be2b040c3fc2d1618d5fe22cc3b94c3a364
|
/api/TheWitnessAPI/venv/lib/python3.6/site-packages/ffmpeg_streaming/_input.py
|
0c40261d2f1f7da2470cfd84f207bdb9e9cdac79
|
[] |
no_license
|
TrellixVulnTeam/Backup_BSK5
|
dff1bad570eca4ee2bdd456082d0fa22fec63b9c
|
382cfc9c53e9746a79e5a8506862c84cb4b6e757
|
refs/heads/master
| 2023-03-22T06:49:11.502186
| 2021-02-14T11:49:19
| 2021-02-14T11:49:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,608
|
py
|
"""
ffmpeg_streaming.media
~~~~~~~~~~~~
Input options
:copyright: (c) 2020 by Amin Yazdanpanah.
:website: https://www.aminyazdanpanah.com
:email: contact@aminyazdanpanah.com
:license: MIT, see LICENSE for more details.
"""
from ffmpeg_streaming._media import Media
from ffmpeg_streaming._utiles import get_os, cnv_options_to_args
from ffmpeg_streaming._clouds import Clouds
class Capture(object):
def __init__(self, video, options):
"""
@TODO: add documentation
"""
self.options = options
self.video = video
def _linux(self):
is_screen = self.options.pop('screen', False)
if is_screen:
cap = 'x11grab'
else:
cap = 'v4l2'
return {
'f': cap,
'i': self.video
}
def _windows(self):
self.video = 'video=' + str(self.video)
windows_audio = self.options.pop('windows_audio', None)
if windows_audio is not None:
self.video = self.video + ':audio=' + str(windows_audio)
return {
'f': 'dshow',
'i': self.video
}
def _os_x(self):
return {
'f': 'avfoundation',
'i': self.video
}
@staticmethod
def _unknown():
raise OSError("Unreported OS!")
def __iter__(self):
yield from getattr(self, '_' + get_os())().items()
def get_from_cloud(cloud: Clouds, options: dict):
"""
@TODO: add documentation
"""
save_to = options.pop('save_to', None)
return {
'i': cloud.download(save_to, **options),
'is_tmp': True if save_to is None else False
}
class InputOption(object):
def __init__(self, _input, **options):
"""
@TODO: add documentation
"""
self.input_ = _input
self.options = options
def __str__(self):
return " ".join(cnv_options_to_args(self._create()))
def __iter__(self):
yield from self._create().items()
def _create(self):
options = self.options.pop('pre_opts', {'y': None})
is_cap = self.options.pop('capture', False)
if isinstance(self.input_, Clouds):
options.update(get_from_cloud(self.input_, self.options))
elif is_cap:
options.update(Capture(self.input_, self.options))
elif isinstance(self.input_, (str, int)):
i_options = {'i': str(self.input_)}
i_options.update(self.options)
options.update(i_options)
else:
raise ValueError("Unknown input!")
return options
def input(_input, **options) -> Media:
"""Input options (ffmpeg pre_option ``-i`` input options)
You can also pass a cloud object as an input to the method. the file will be downloaded and will pass it to ffmpeg
if you want to open a resource from a pipe, set input "pipe:"
if you want to open a resource from a capture device, pass a device name as filename and set the capture keyword
to True. To list the supported, connected capture devices, see https://trac.ffmpeg.org/wiki/Capture/Webcam
and https://trac.ffmpeg.org/wiki/Capture/Desktop. See https://ffmpeg.org/ffmpeg.html#Main-options and
https://ffmpeg.org/ffmpeg-protocols.html for more information about input option and supported resources
such as http, ftp, and so on.
"""
return Media(InputOption(_input, **options))
__all__ = [
'input',
]
|
[
"toncyz@gmail.com"
] |
toncyz@gmail.com
|
2d90bd131c3f2674579557e64d00f3ad2e7d1f85
|
e8d6fe918ddfb22ff6ac4936923de78295b35eef
|
/data.py
|
65bd38b4f455453e0092c49b2901283d6600c88e
|
[] |
no_license
|
dldudwo0805/waymo_dataset
|
822d09f29314596bded8c16517c37da9c9742951
|
08f640a2e083d12f69f29bf61d902bd5b8a97632
|
refs/heads/master
| 2020-07-21T23:09:36.185665
| 2019-09-07T17:02:27
| 2019-09-07T17:02:27
| 206,996,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,706
|
py
|
import numpy as np
import cv2
def parse_calibration_data(data, sensor_type):
intrinsic = np.eye(3, dtype=np.float32)
extrinsic = np.eye(4, dtype=np.float32)
distortion = np.zeros((1,5), dtype=np.float32)
sensor_name = None
if sensor_type == "CAM" or sensor_type == "CAMERA":
if data.name == 0:
sensor_name = "Unknown\n"
elif data.name == 1:
sensor_name = "Front\n"
elif data.name == 2:
sensor_name = "Front Left\n"
elif data.name == 3:
sensor_name = "Front Right\n"
elif data.name == 4:
sensor_name = "Side Left\n"
elif data.name == 5:
sensor_name = "Side Right\n"
else:
sensor_name = "Unknown\n"
intrinsic[0, 0] = data.intrinsic[0]
intrinsic[1, 1] = data.intrinsic[1]
intrinsic[0, 2] = data.intrinsic[2]
intrinsic[1, 2] = data.intrinsic[3]
distortion[0, 0] = data.intrinsic[4]
distortion[0, 1] = data.intrinsic[5]
distortion[0, 2] = data.intrinsic[6]
distortion[0, 3] = data.intrinsic[7]
distortion[0, 4] = data.intrinsic[8]
cnt = 0
for val in data.extrinsic.transform:
extrinsic[int(cnt/4), int(cnt % 4)] = val
cnt += 1
return sensor_name, intrinsic, extrinsic, distortion
def save_calibration_data(file, sensor_name, intrinsic, extrinsic, distortion=None):
file.write(sensor_name)
_intrinsic = "%f, %f, %f\n%f, %f, %f\n%f, %f, %f\n"\
%(intrinsic[0, 0], intrinsic[0, 1], intrinsic[0,2],
intrinsic[1, 0], intrinsic[1, 1], intrinsic[1,2],
intrinsic[2, 1], intrinsic[2, 2], intrinsic[2, 2])
file.write(_intrinsic)
_extrinsic = "%f, %f, %f, %f\n%f, %f, %f, %f\n%f, %f, %f, %f\n%f, %f, %f, %f\n"\
%(extrinsic[0, 0], extrinsic[0, 1], extrinsic[0, 2], extrinsic[0, 3],
extrinsic[1, 0], extrinsic[1, 1], extrinsic[1, 2], extrinsic[1, 3],
extrinsic[2, 0], extrinsic[2, 1], extrinsic[2, 2], extrinsic[2, 3],
extrinsic[3, 0], extrinsic[3, 1], extrinsic[3, 2], extrinsic[3, 3])
file.write(_extrinsic)
if distortion is not None:
_distortion = "%f, %f, %f, %f, %f\n"\
%(distortion[0, 0], distortion[0, 1], distortion[0, 2], distortion[0, 3], distortion[0, 4])
file.write(_distortion)
file.write('\n')
def save_image(save_root, sensor_position, index, image):
filename = "%s/%s_%04d.png"%(save_root, sensor_position, index)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(filename, image)
cv2.waitKey(1)
|
[
"dldudwo0805@gmail.com"
] |
dldudwo0805@gmail.com
|
8960d44ca61634dd452e823c42f66e6330c2c176
|
b53c3fc57aa3e8abe94064ebda201b98911eb25b
|
/src/mails/mailsvc/MailsService.py
|
5028f8e967edbf7a2a072fc4be6b64c0314623c1
|
[] |
no_license
|
dreamcatcher2015/email_sender
|
55e280032e0f04a2188d22d7342bf405e4b01ef7
|
8a68d9b86310bbafc660ce46e5218749afd53093
|
refs/heads/master
| 2021-01-20T06:57:02.601899
| 2015-08-22T13:42:27
| 2015-08-22T13:42:27
| 41,206,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 15,017
|
py
|
#
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def send_mails(self, mails):
"""
Parameters:
- mails
"""
pass
def send_mails2(self, sendtos, subject, content, attach_files, priority):
"""
Parameters:
- sendtos
- subject
- content
- attach_files
- priority
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def send_mails(self, mails):
"""
Parameters:
- mails
"""
self.send_send_mails(mails)
return self.recv_send_mails()
def send_send_mails(self, mails):
self._oprot.writeMessageBegin('send_mails', TMessageType.CALL, self._seqid)
args = send_mails_args()
args.mails = mails
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_send_mails(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = send_mails_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "send_mails failed: unknown result");
def send_mails2(self, sendtos, subject, content, attach_files, priority):
"""
Parameters:
- sendtos
- subject
- content
- attach_files
- priority
"""
self.send_send_mails2(sendtos, subject, content, attach_files, priority)
return self.recv_send_mails2()
def send_send_mails2(self, sendtos, subject, content, attach_files, priority):
self._oprot.writeMessageBegin('send_mails2', TMessageType.CALL, self._seqid)
args = send_mails2_args()
args.sendtos = sendtos
args.subject = subject
args.content = content
args.attach_files = attach_files
args.priority = priority
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_send_mails2(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = send_mails2_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "send_mails2 failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["send_mails"] = Processor.process_send_mails
self._processMap["send_mails2"] = Processor.process_send_mails2
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_send_mails(self, seqid, iprot, oprot):
args = send_mails_args()
args.read(iprot)
iprot.readMessageEnd()
result = send_mails_result()
result.success = self._handler.send_mails(args.mails)
oprot.writeMessageBegin("send_mails", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_send_mails2(self, seqid, iprot, oprot):
args = send_mails2_args()
args.read(iprot)
iprot.readMessageEnd()
result = send_mails2_result()
result.success = self._handler.send_mails2(args.sendtos, args.subject, args.content, args.attach_files, args.priority)
oprot.writeMessageBegin("send_mails2", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class send_mails_args:
"""
Attributes:
- mails
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'mails', (TType.STRUCT,(MailObject, MailObject.thrift_spec)), None, ), # 1
)
def __init__(self, mails=None,):
self.mails = mails
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.mails = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in xrange(_size7):
_elem12 = MailObject()
_elem12.read(iprot)
self.mails.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('send_mails_args')
if self.mails is not None:
oprot.writeFieldBegin('mails', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.mails))
for iter13 in self.mails:
iter13.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.mails)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class send_mails_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('send_mails_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class send_mails2_args:
"""
Attributes:
- sendtos
- subject
- content
- attach_files
- priority
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'sendtos', (TType.STRING,None), None, ), # 1
(2, TType.STRING, 'subject', None, None, ), # 2
(3, TType.STRING, 'content', None, None, ), # 3
(4, TType.LIST, 'attach_files', (TType.STRING,None), None, ), # 4
(5, TType.I32, 'priority', None, None, ), # 5
)
def __init__(self, sendtos=None, subject=None, content=None, attach_files=None, priority=None,):
self.sendtos = sendtos
self.subject = subject
self.content = content
self.attach_files = attach_files
self.priority = priority
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.sendtos = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in xrange(_size14):
_elem19 = iprot.readString();
self.sendtos.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.subject = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.content = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.attach_files = []
(_etype23, _size20) = iprot.readListBegin()
for _i24 in xrange(_size20):
_elem25 = iprot.readString();
self.attach_files.append(_elem25)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.priority = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('send_mails2_args')
if self.sendtos is not None:
oprot.writeFieldBegin('sendtos', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.sendtos))
for iter26 in self.sendtos:
oprot.writeString(iter26)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.subject is not None:
oprot.writeFieldBegin('subject', TType.STRING, 2)
oprot.writeString(self.subject)
oprot.writeFieldEnd()
if self.content is not None:
oprot.writeFieldBegin('content', TType.STRING, 3)
oprot.writeString(self.content)
oprot.writeFieldEnd()
if self.attach_files is not None:
oprot.writeFieldBegin('attach_files', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.attach_files))
for iter27 in self.attach_files:
oprot.writeString(iter27)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.priority is not None:
oprot.writeFieldBegin('priority', TType.I32, 5)
oprot.writeI32(self.priority)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.sendtos)
value = (value * 31) ^ hash(self.subject)
value = (value * 31) ^ hash(self.content)
value = (value * 31) ^ hash(self.attach_files)
value = (value * 31) ^ hash(self.priority)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class send_mails2_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('send_mails2_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
[
"wfyang2015@163.com"
] |
wfyang2015@163.com
|
6fea8635dd9c49b1ded6592084cffdc533f2c8cb
|
817a906a83db42604c724202553a79ffa5437981
|
/tests/integration/test_print.py
|
9a9e836346456335d61ab94d5214706e68c38a1b
|
[
"MIT"
] |
permissive
|
rafacastillol/ledgeroni
|
44df0ff478da11f0748ce360482a6341443c43c8
|
4a3df2e838a604481dd8b5472cdaba35ec9a4fb6
|
refs/heads/master
| 2021-06-22T17:24:03.202974
| 2019-11-21T23:39:01
| 2019-11-21T23:39:01
| 215,432,759
| 0
| 0
|
MIT
| 2021-04-20T18:40:34
| 2019-10-16T01:50:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
from click.testing import CliRunner
from ledgeroni.cli import cli
def test_printing():
"Tests print command output without extra options"
runner = CliRunner()
result = runner.invoke(cli, [
'-f', 'tests/sample_data/index.ledger', '--price-db',
'tests/sample_data/prices_db', 'print'])
assert result.exit_code == 0
assert "Sold some bitcoins" in result.output
assert "I owe Joe for a favor" in result.output
def test_filtering():
"Tests print command output with a filter specified"
runner = CliRunner()
result = runner.invoke(cli, [
'-f', 'tests/sample_data/index.ledger', '--price-db',
'tests/sample_data/prices_db', 'print', 'Expense'])
assert result.exit_code == 0
assert 'Sold some bitcoins' not in result.output
assert 'Purchased reddit gold for the year' in result.output
assert 'I owe Joe for a favor' in result.output
def test_without_ledger():
"Throws an error when no ledger file is specified"
runner = CliRunner()
result = runner.invoke(cli, [
'--price-db', 'tests/sample_data/prices_db', 'balance'])
assert result.exit_code == 2
|
[
"rcastillo@nearsoft.com"
] |
rcastillo@nearsoft.com
|
dc04368f8187e194465e7ed0081eeb50b1530cfa
|
455669e844e3cc72e9406ee5d35a2b90dc420afc
|
/DigitsMultiplication.py
|
ab00ce8d3d2794af4842e520484812ffc260e124
|
[] |
no_license
|
victordsantoss/elementary-island-by-checkio
|
6e29cb0002308030fc1495527180fb82cd3eeb73
|
730f5e152cf103ef5248c8ceafa9cc17b7921a37
|
refs/heads/master
| 2023-02-17T10:24:13.215427
| 2021-01-07T23:00:13
| 2021-01-07T23:00:13
| 277,915,986
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
def checkio (number : int):
result = 1
while number > 0:
if number % 10 != 0:
result *= number % 10
number //= 10
else:
number //= 10
return result
|
[
"victor.samuelsantoss@gmail.com"
] |
victor.samuelsantoss@gmail.com
|
f7bdc8e1bbb77efac08fa7b839d14c78651a2987
|
fd59d27e462844b0d6e79b4434555f7ed4eb3a40
|
/main.py
|
8cf847d80c2a1ed06f13e7cac1c5179f6e0baba4
|
[] |
no_license
|
alepiaz/MyScannerBot
|
cc6b51e498e0a726f6c748bc2de2bd37e01df639
|
3ae50381375318c048aa7dc9e2185241d1e1d38e
|
refs/heads/main
| 2023-07-01T18:33:02.092696
| 2021-07-26T21:13:46
| 2021-07-26T21:13:46
| 389,769,456
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,730
|
py
|
# -*- coding: utf-8 -*-
from functions import *
TOKEN = "827961133:AAE66epsHDf8Yr3xeofp3KRyvP8qiigrrqk"
bot = telegram.Bot(TOKEN)
def main():
updater = Updater(TOKEN, request_kwargs={'read_timeout': 20, 'connect_timeout': 20})
dp = updater.dispatcher
j = updater.job_queue
dp.add_handler(CommandHandler('start', helpcmd))
dp.add_handler(CommandHandler('help', helpcmd))
dp.add_handler(CommandHandler('download', downloadcmd))
dp.add_handler(CommandHandler('delete', deletecmd))
dp.add_handler(MessageHandler(Filters.photo, check_photo))
dp.add_handler(MessageHandler(Filters.document, check_file ))
dp.add_handler(CallbackQueryHandler(next_handler, pattern='next[0-9].*'))
dp.add_handler(CallbackQueryHandler(prev_handler, pattern='prev[0-9].*'))
dp.add_handler(CallbackQueryHandler(crop_handler, pattern='crop[0-9].*'))
dp.add_handler(CallbackQueryHandler(adapt_handler, pattern='adapt.*'))
dp.add_handler(CallbackQueryHandler(height_handler, pattern='a4.*'))
dp.add_handler(CallbackQueryHandler(width_handler, pattern='card.*'))
dp.add_handler(CallbackQueryHandler(bw_handler, pattern='bw.*'))
dp.add_handler(CallbackQueryHandler(orig_handler, pattern='orig.*'))
dp.add_handler(CallbackQueryHandler(colork_handler, pattern='colork.*'))
dp.add_handler(CallbackQueryHandler(grayk_handler, pattern='grayk.*'))
dp.add_handler(CallbackQueryHandler(pdf_handler, pattern='pdf.*'))
dp.add_handler(CallbackQueryHandler(dl_handler, pattern='dl.*'))
dp.add_handler(CallbackQueryHandler(back_handler, pattern='back.*'))
dp.add_error_handler(error_callback)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
alepiaz.noreply@github.com
|
ec0746871b2e7af2980112ac77780008503fb772
|
c3127b52c94cbf3ad2e3464ade6c2e66e0aa6e62
|
/modules/ResourceFaceRecognition/utils.py
|
418143e1f259c1caf1dd71adac2ca24598dac134
|
[] |
no_license
|
Skydddoogg/npr_ai_modules
|
132d1517e1184ddb9b813b97335fc28c37c41ad9
|
bf0d4fe7273b5fb44e42b9e5b84ebedbe6f7933f
|
refs/heads/master
| 2022-12-05T00:32:56.302228
| 2020-08-16T05:48:40
| 2020-08-16T05:48:40
| 273,669,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,851
|
py
|
import os
from modules.ResourceFaceRecognition import config
import face_recognition
import cv2
import numpy as np
from PIL import ImageFont, ImageDraw, Image
def get_all_image_path_from_db():
if not os.path.isdir(config.db_path):
os.mkdir(config.db_path)
all_image_path = [f for f in os.listdir(config.db_path) if os.path.isfile(os.path.join(config.db_path, f)) and '.jpg' in f]
return all_image_path
def encode_images(list_image_path):
known_face_encodings = []
known_face_names = []
# Encode the fetched images
for image_name in list_image_path:
image = face_recognition.load_image_file(os.path.join(config.db_path, image_name))
face_encoding = face_recognition.face_encodings(image)[0]
known_face_encodings.append(face_encoding)
known_face_names.append(image_name.split('.')[0])
return known_face_encodings, known_face_names
def display_bbox_in_image(image, face_locations, face_names):
font_size = 150
# font = ImageFont.truetype("THSarabunNew.ttf", font_size)
font = ImageFont.load_default()
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
img_pil = Image.fromarray(image)
draw = ImageDraw.Draw(img_pil)
# Draw a label with a name below the face
draw.rectangle(((left, bottom - 30), (right, bottom)), fill=(0, 0, 255))
draw.text((left + 6, bottom - 30), name, font = font)
image = np.array(img_pil)
# Draw a box around the face
cv2.rectangle(image, (left, top), (right, bottom), (0, 0, 255), 2)
# Display the resulting image
cv2.imshow('Video', image)
|
[
"59070071@it.kmitl.ac.th"
] |
59070071@it.kmitl.ac.th
|
d2a94dfbf30dff2f1a579ee6fac2ffc57d957736
|
5cb9f3d7752ec48ac031228117f1517a4474ebb0
|
/slidingWindowMax.py
|
f97df2c62a16914a20d55c90bcf55c2aef8b2846
|
[] |
no_license
|
MLSaj/PythonDive
|
2f126ff50030cb25b8db32fb5ddd62140b2858ad
|
eb378e4bd9a1ec6396b65e62f4c22d9a7fc2a720
|
refs/heads/master
| 2020-12-23T03:30:52.605164
| 2020-03-11T00:45:09
| 2020-03-11T00:45:09
| 237,019,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
import collections
class Solution:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
if(nums == None or len(nums) == 0):
return []
Qi = collections.deque()
n = len(nums)
output = []
for i in range(k):
while(Qi and nums[i] >= nums[Qi[-1]]):
Qi.pop()
Qi.append(i)
for i in range(k,n):
output.append(nums[Qi[0]])
while Qi and Qi[0] <= i - k :
Qi.popleft()
while Qi and nums[i] >= nums[Qi[-1]]:
Qi.pop()
Qi.append(i)
output.append(nums[Qi[0]])
return output
|
[
"noreply@github.com"
] |
MLSaj.noreply@github.com
|
560b69cfbd0fd321091c869652d6b50072aa91fd
|
27c27208a167f089bb8ce4027dedb3fcc72e8e8a
|
/Athena/2010/Bonus2c.py
|
4630a85e8d69332309fd1999e71b1f65374b610e
|
[] |
no_license
|
stankiewiczm/contests
|
fd4347e7b84c8c7ec41ba9746723036d86e2373c
|
85ed40f91bd3eef16e02e8fd45fe1c9b2df2887e
|
refs/heads/master
| 2021-05-10T16:46:41.993515
| 2018-02-16T09:04:15
| 2018-02-16T09:04:15
| 118,587,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
from numpy import *
from RandomArray import *
def C(n,r,P3):
C = 1; k = 1;
while k < r:
C = (C*(n+1-k))/(k)%P3;
k += 1
if (k%10**6 == 0):
print k;
return C;
Tot = 0L; Sum = 0L;
for line in file("Bonus2.txt"):
p = int(line);
# print p, C(p,p/2+1, p**3);
Tot = int (p*random());
# print p, Tot;
Sum += Tot*p*p + 2*p;
print Sum
#7514470 45086079
|
[
"mstankiewicz@gmail.com"
] |
mstankiewicz@gmail.com
|
35acab6b770cd094c823756a3cefcbd6789f0305
|
71b6c423d1095eb8badddf5728097f37b1a23ce5
|
/5/1.py
|
d908dc5ff99d530cdc88604899b1c6279803d026
|
[] |
no_license
|
Zigolox/AOC2020
|
2f12c9cfa16a185226f1d29265efae00ceb090e8
|
95f6ba8b710c3afa74956ad4505d1432e2ceca3c
|
refs/heads/main
| 2023-02-01T02:19:47.251842
| 2020-12-17T23:37:51
| 2020-12-17T23:37:51
| 317,674,853
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
with open("input.txt", "r") as boardingpass:
max = 0
for boarding in boardingpass:
bin = boarding.replace('F','0').replace('B','1').replace('L','0').replace('R','1')
if (n := int(bin[:7],2) * 8 + int(bin[7:],2)) > max:
max = n
print(max)
|
[
"41542666+Zigolox@users.noreply.github.com"
] |
41542666+Zigolox@users.noreply.github.com
|
7da405979dd423ccfdf111cd0e73d50315de9c65
|
33bcbc643350eba190df238145ab87c50c7f4496
|
/distributed/master.py
|
6d4deb1fdde02e36576e52c632d607ef51fabc2b
|
[] |
no_license
|
wufan0920/simple-spider
|
fcc5d34376704fb632d61e9a3d2cafcbcf7f259f
|
54e94db5f5c571afad59d30c350a9dd488fb3d89
|
refs/heads/master
| 2021-06-04T03:15:16.632707
| 2020-06-22T06:15:55
| 2020-06-22T06:15:55
| 20,523,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import SocketServer
import thread,time
from SimpleXMLRPCServer import SimpleXMLRPCServer
class MultiThreadRPCServer(SocketServer.ThreadingMixIn,SimpleXMLRPCServer):
pass
global url_set
global url_pool
global server
global pool_lock
global set_lock
def add_url(url):
pool_lock.acquire()
set_lock.acquire()
if not(url in url_set) and url.find('html')!=-1 and url.find('ustc')!=-1:
url_pool.append(url)
url_set.add(url)
set_lock.release()
pool_lock.release()
return 0
def get_url():
pool_lock.acquire()
if len(url_pool)!=0:
url=url_pool.pop()
else:
url=0
pool_lock.release()
return url
def stop_parse():
server.shutdown()
return 0
if __name__=='__main__':
initial_url = 'http://staff.ustc.edu.cn/~bjhua/courses/security/2013/index.html'
url_set = set()
url_pool = []
#server = MultiThreadRPCServer(("localhost",8000))
server = MultiThreadRPCServer(("192.168.1.100",8000))
pool_lock=thread.allocate_lock()
set_lock=thread.allocate_lock()
url_set.add(initial_url)
url_pool.append(initial_url)
server.register_function(add_url)
server.register_function(get_url)
server.register_function(stop_parse)
server.serve_forever()
print url_set
|
[
"wufan0920@163.com"
] |
wufan0920@163.com
|
3cf95526b48a3c3212a3a24d24a7fafb35255959
|
a7985ae0b4b521abe36d84386079c97b8f3665d4
|
/MODULO_4_CIENCIA_DA_COMPUTACAO/BLOCO_35/dia_3/exercicios/exercise_01.py
|
16f291a8bfd70a67fd07c31055d79ce35f3d981d
|
[] |
no_license
|
herculesgabriel/trybe-exercises
|
0ceb74f7058440c1b14d1301a46826d48a11f503
|
e67611c8d9fc1f97b5de7aa1d20e0d940252cffb
|
refs/heads/master
| 2023-07-08T21:00:39.577965
| 2021-08-14T10:58:28
| 2021-08-14T10:58:28
| 289,084,136
| 1
| 0
| null | 2021-08-14T10:58:28
| 2020-08-20T18:48:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 724
|
py
|
class Soldier:
def __init__(self, level):
self.level = level
def attack(self):
return self.level * 1
class Jedi:
def __init__(self, level):
self.level = level
def attackWithSaber(self):
return self.level * 100
class JediCharacterAdapter:
def __init__(self, jedi):
self.jedi = jedi
def attack(self):
return self.jedi.attackWithSaber()
class StarWarsGame:
def __init__(self, character):
self.character = character
def fight_enemy(self):
print(f"You caused {self.character.attack()} of damage to the enemy")
jedi = JediCharacterAdapter(Jedi(20))
StarWarsGame(Soldier(5)).fight_enemy()
StarWarsGame(jedi).fight_enemy()
|
[
"herculesgabriel00@gmail.com"
] |
herculesgabriel00@gmail.com
|
484f557a8ccacec04be5837eefaa4b6211d9c672
|
660ccb10a08c418bdf489cfa5e56cf0242c9cda6
|
/7.27/support.py
|
30a6c68467943242a149ae36454d521cdd1ea838
|
[] |
no_license
|
huseph/learn_python
|
f3886d75a1ed0dbbbe4dd944d0c25ff8fa4cbdff
|
96c65ccd1c6420e2bd3e0c370d551455789f1bf8
|
refs/heads/master
| 2020-08-06T11:30:05.253573
| 2019-10-16T08:54:00
| 2019-10-16T08:54:00
| 212,960,682
| 0
| 0
| null | 2019-10-16T08:54:02
| 2019-10-05T07:25:39
|
Python
|
UTF-8
|
Python
| false
| false
| 278
|
py
|
def hanoi(n, fro, ass, tar):
global summ
if n == 1:
print('%c --> %c' %(fro, tar))
summ += 1
else:
hanoi(n-1, fro, tar, ass)
print('%c --> %c' %(fro, tar))
summ += 1
hanoi(n-1, ass, fro, tar)
return summ
summ = 0
|
[
"920993863@qq.com"
] |
920993863@qq.com
|
c3c594e8a75b39bcba481a10f6fb61d63a17535b
|
9c98b3bb0dd3f14e22962c817cb8cfdd4036556d
|
/puppies/settings.py
|
5e45fe10cdcd89a8da1c51ff1f1694c86691c6e7
|
[] |
no_license
|
yifanwangsh/puppies_test
|
f67869c58fe71a566b60667b048a397f0771b312
|
172afe32a7a91dec05ab93ca56a54cc3b77b6924
|
refs/heads/master
| 2020-05-21T09:03:44.282446
| 2019-05-08T13:44:29
| 2019-05-08T13:44:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,201
|
py
|
"""
Django settings for puppies project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4z0&a#ry$&!xr%&u&k$ipz&ti#aqmjgc&$5yvv)q=#&p(5%ypt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'puppies',
'post',
'rest_framework',
# 'post.apps.PostConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'puppies.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'puppies.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"michelle.zhou@Zipians-MacBook-Pro.local"
] |
michelle.zhou@Zipians-MacBook-Pro.local
|
c8910827478d7be0bf3ac3ea2057cd36fcb10e9e
|
0c6d97a73dd587f8c27d0be21751cd3c68465486
|
/engine/classifiers.py
|
affbd683d2258e4cd5fa827f61acac1bf85b767d
|
[] |
no_license
|
MachineResearchGroup/Research2021
|
5517500439ded33133a279cfcbcd79b703eb66b5
|
b783f7bc24f72d073747b371b3141ababd862b96
|
refs/heads/main
| 2023-07-08T23:33:26.602468
| 2021-06-25T00:08:10
| 2021-06-25T00:08:10
| 379,478,028
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,028
|
py
|
from collections import OrderedDict
import collections
import joblib
import pandas as pd
from sklearn.svm import SVC as SVM
from sklearn.linear_model import SGDClassifier as SGD
from sklearn.naive_bayes import MultinomialNB as MNB
from sklearn.ensemble import ExtraTreesClassifier as ET
from sklearn.neural_network import MLPClassifier as MLP
from sklearn.linear_model import LogisticRegression as LR
from sklearn.linear_model import PassiveAggressiveClassifier as PA
clf_Name = {ET: 'ET', LR: 'LR', MLP: 'MLP', MNB: 'MNB', PA: 'PA', SGD: 'SGD', SVM: 'SVM'}
#clf_Prt = {ET: [], LR: [], MLP: [], MNB: [], PA: [], SGD: [], SVM: []}
def getClf(interaction, resampling):
clf_Prt = {ET: [], LR: [], MLP: [], MNB: [], PA: [], SGD: [], SVM: []}
for clf in clf_Name:
params = get_params(interaction, resampling, clf_Name[clf])
instanciamento(clf_Prt, clf, params)
return clf_Prt
def get_params(interaction, resampling, algorithm):
params = pd.read_csv('../results/hyperparametrization/data_'+str(interaction)+'/'+resampling+'/hypeResultsBayesSearchCV(' + algorithm + ').csv')
return params['Params']
# def getClf(interaction, fold, resampling):
# for clf in clf_Prt:
# classifier = get_model(interaction, fold, resampling, clf)
# clf_Prt[clf].append(classifier)
# return clf_Prt
#
#
# def get_params(index_data, resampling, algorithm):
# params = pd.read_csv('../results/hyperparametrization/data_'+str(index_data)+'/'+resampling+'/hypeResultsBayesSearchCV(' + algorithm + ').csv')
# return params['Params']
def instanciamento(clf_Prt, _class, params):
for param in params:
param = dict(eval(param))
_classifier = _class(**param)
clf_Prt[_class].append(_classifier)
def getClf_Name(classifier):
return clf_Name[classifier]
def get_model(interaction, fold, resampling, clf):
return joblib.load('../results/hyperparametrization/models/data_'+str(interaction)+'/'+resampling+'/'+clf.__name__+'('+str(fold)+').joblib.pkl')
|
[
"geovanemiguel2@gmail.com"
] |
geovanemiguel2@gmail.com
|
c2815948beaae0c87f6d0119ed66e6ed60c020c9
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03146/s953135523.py
|
2afc8fe333f0af60c818fd13f4a0935e52d56d7d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
s = int(input())
a = s
lis = list()
while True:
if (s % 2) == 0:
s = s / 2
if s in lis:
break
lis.append(int(s))
else:
s = 3*s + 1
if s in lis:
break
lis.append(int(s))
if (a ==1) or (a == 2) or (a == 4):
print(4)
else:
print(len(lis)+2)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
dec2bb69a9b2c91b17a99c892e5801ff632c0e57
|
b4c93bad8ccc9007a7d3e7e1d1d4eb8388f6e988
|
/farmercoupon/migrations/0046_auto_20210321_1515.py
|
17c88b10e8f353d76777e54e7a6f7170a539cd7a
|
[] |
no_license
|
flashdreiv/fis
|
39b60c010d0d989a34c01b39ea88f7fc3be0a87d
|
b93277785d6ad113a90a011f7c43b1e3e9209ec5
|
refs/heads/main
| 2023-04-02T12:46:32.249800
| 2021-03-31T00:27:29
| 2021-03-31T00:27:29
| 343,431,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
# Generated by Django 3.1.7 on 2021-03-21 07:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('farmercoupon', '0045_auto_20210321_1453'),
]
operations = [
migrations.AlterField(
model_name='farmer',
name='mobile_number',
field=models.CharField(blank=True, max_length=13, null=True, unique=True),
),
]
|
[
"dreivan.orprecio@gmail.com"
] |
dreivan.orprecio@gmail.com
|
c8ee81ed22b18265970fc5f7220c1da05afc76be
|
4eba2b7b10863244894f1318cff60ed616c96e7c
|
/section14_OOP/CurrencyConverter-0.16.1/CurrencyConverter-0.16.1/lecture113.py
|
1196b43334c483faca1f938fd4445865a534088d
|
[
"Apache-2.0"
] |
permissive
|
Aritiaya50217/CompletePython3Programming
|
1bf2b89b1b3793807671c80635010f1b383940b9
|
f3d132226ec56a8d3edd6690c578486a1adcc409
|
refs/heads/main
| 2023-06-12T12:02:20.950605
| 2021-06-13T12:48:57
| 2021-06-13T12:48:57
| 374,885,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
from currency_converter import CurrencyConverter
from datetime import date
''' The fallback method can be configured with the fallback_on_missing_rate_method parameter, which currently supports "linear_interpolation" and "last_known" values. '''
c = CurrencyConverter(fallback_on_missing_rate=True,fallback_on_wrong_date=True)
# ถ้า value หรือ 100 ที่เราใส่มีค่ามากกว่า Rate ของ BGN จะเกิด Error จึงใช้ fallback_on_missing_rate = True เพื่อให้แสดงค่า Rate จริง ๆ ของ BGN ออกมา
print(c.convert(100, 'BGN', date=date(2010, 11, 21)))
# หาก date ที่เราใส่ไปไม่มีในข้อมูลจะเกิด Error จึงใส่ fallback_on_wrong_date=True เพื่อแสดงค่า Rate ของวันล่าสุดออกมา
print(c.convert(100, 'EUR', 'USD', date=date(1986, 2, 2)))
print(c._get_rate)
|
[
"artitaya2466@gmail.com"
] |
artitaya2466@gmail.com
|
98d914f4d637189e6d785027efa9c91d88294f60
|
2536b3524e8eed4524009502a5151f5e16fc68fe
|
/douban/views.py
|
454eae0ead0f0adec7ab0b88ac21b31a02134a55
|
[] |
no_license
|
sakishum/livehouse
|
360d44c6fd315f8a5d47b6626f27154b9e6b8515
|
e600415def0830e026dc20235696012a94a7e0a0
|
refs/heads/master
| 2020-12-28T20:43:42.126179
| 2013-08-23T17:23:32
| 2013-08-23T17:23:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
# -*- coding: utf-8 -*-
from django.http import Http404, HttpResponse
from django.shortcuts import render_to_response
from django.core.paginator import Paginator, InvalidPage, EmptyPage
import utils
import json
def fans_rank(request):
results, update_time = utils.fans_rank()
page_num = 10
before_page_num = 4
after_page_num = 4
paginator = Paginator(results, page_num)
try:
page = int(request.GET.get('page', '1'))
if page < 1:
page = 1
except ValueError:
page = 1
if page >= after_page_num:
page_range = paginator.page_range[page - after_page_num : page + before_page_num]
else:
page_range = paginator.page_range[:page + before_page_num]
try:
page_results = paginator.page(page)
except (EmptyPage, InvalidPage), e:
page_results = paginator.page(paginator.num_pages)
print e
return render_to_response('douban_fans_rank.html', {'title': '增粉排行榜', 'fans': page_results, 'page_range': page_range, 'data': json.dumps(page_results.object_list), 'update_time': update_time.strftime('%Y/%m/%d')})
|
[
"pantaovay@gmail.com"
] |
pantaovay@gmail.com
|
9c729cdc83e3d3bc15c0648008e69b005a80660f
|
6a57f556827d789c37c7a0ff721157f3c06a4131
|
/run.py
|
2be3bfb4a1d4eadd75d6f0fbff5bf2fad5f7fbfd
|
[] |
no_license
|
Spanarchian/tet_heroku
|
4761f81cea0ca1971869692fac4d65df44e7170d
|
90a16de81b6128820a8de68e8e24d3b91a1973e0
|
refs/heads/master
| 2020-03-31T05:54:32.657550
| 2018-10-07T17:11:56
| 2018-10-07T17:11:56
| 151,961,995
| 0
| 0
| null | 2018-10-07T16:48:26
| 2018-10-07T16:37:39
|
Python
|
UTF-8
|
Python
| false
| false
| 198
|
py
|
#!/usr/bin/python
from web import app
import connexion
# app = connexion.App(__name__, specification_dir='web/swagger/')
# app.add_api('my_api.yaml')
app.run(debug=True, host='0.0.0.0', port=8999)
|
[
"spanarchian@gmail.com"
] |
spanarchian@gmail.com
|
55982913ed3bbfc7dc134197425faad596895b4b
|
65c8a6a7af2ee8cdf3866d012ea814887bd68a26
|
/TestInterface/Common/Excel.py
|
83e109ea75a2d27526d19a72668adcf0da22baac
|
[] |
no_license
|
1282270620/automation_test
|
9b3c595c3f7a139ded0a638ae4bcf31e0b7f9686
|
3faf86f0d641089eaf27eba906d22157dd2c1f5d
|
refs/heads/master
| 2020-04-01T06:35:33.873989
| 2018-10-21T03:05:17
| 2018-10-21T03:05:17
| 152,954,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
from openpyxl import load_workbook
def read_test_case_data(file_path):
param_list = []
total_list = []
data_dict = {}
workbook = load_workbook(file_path)
worksheet = workbook.active
columns = worksheet.max_columns
for i in range(0,columns):
title = worksheet[1][i].value
param_list.append(title)
rows = workbook.max_rows
for row in range(2,rows+1):
data_list = []
for col in range(0,columns):
cell_value = worksheet[row][col].value
if cell_value is None:
cell_value = " "
data_list.append[cell_value]
total_list.append(data_list)
data_dict["param_list"] = param_list
data_dict["total_list"] = total_list
return data_dict
|
[
"1282270620@qq.com"
] |
1282270620@qq.com
|
5b7d24df541998ed7e81ff93c7d68a8d44408fe5
|
192e7c0a7291c12aaf45b4981867809ef16447af
|
/test_board.py
|
7bddfaad35ad408a843522e6dc1158627a2b8fdc
|
[] |
no_license
|
rimbi/python-boggle
|
faab4c3d1f6cb2702b1437a407919fcfc0781d9c
|
30616c5b16231d64d53f61c1d758012d7f4b09e9
|
refs/heads/master
| 2020-03-25T05:25:53.056292
| 2018-08-08T11:36:46
| 2018-08-08T11:36:46
| 143,446,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,904
|
py
|
#!/usr/bin/env python
"""test_board.py: Tests for Board class."""
from expects import expect, be, equal
from board import Board, Cell
def test_board_should_say_no_when_the_word_is_not_in_board():
# given
board = Board(['ADHG', 'PDFF', 'EKJU', 'FTGT'])
# when
res = board.contains('CAT')
# then
expect(res).to(be(False))
#
# def test_board_should_recognize_verticle_words():
# # given
# board = Board(['CDHG', 'ADFF', 'TKJU', 'FTGT'])
# # when
# res = board.contains('CAT')
# # then
# expect(res).to(be(True))
def test_given_a_char_board_should_return_corresponding_cells():
# given
board = Board(['CDHG', 'ADFF', 'TKJU', 'FTGT'])
# when
res = board._get_cells_of_char('F')
# then
expect(list(res)).to(equal([Cell('F', 1, 2), Cell('F', 1, 3), Cell('F', 3, 0)]))
def test_given_a_corner_cell_it_should_return_coordinates_of_correct_neighbours():
# given
cell = Cell('C', 0, 0)
# when
res = cell.get_coordinates_of_neighbours()
# then
expect(set(res)).to(equal(set([(1, 0), (1, 1), (0, 1)])))
def test_given_a_edge_cell_it_should_return_coordinates_of_correct_neighbours():
# given
cell = Cell('C', 1, 0)
# when
res = cell.get_coordinates_of_neighbours()
# then
expect(set(res)).to(equal(set([(0, 0), (0, 1), (1, 1), (2, 0), (2, 1)])))
def test_cells_with_same_values_should_be_equal():
# given
cell1 = Cell('C', 1, 0)
cell2 = Cell('C', 1, 0)
# when
res = cell1 == cell2
# then
expect(res).to(be(True))
def test_given_a_corner_cell_board_should_return_neighbour_cells():
# given
board = Board(['CDHG', 'ADFF', 'TKJU', 'FTGT'])
cell = Cell('C', 0, 0)
# when
res = board._get_neighbour_cells(cell)
# then
expect(set(res)).to(equal(set([Cell('A', 1, 0), Cell('D', 1, 1), Cell('D', 0, 1)])))
def test_board_should_verify_single_char_words():
# given
board = Board(['CDHG', 'ADFF', 'TKJU', 'FTGT'])
# when
res = board.contains('F')
# then
expect(res).to(be(True))
def test_board_should_verify_vertical_two_chars_words():
# given
board = Board(['CDHG', 'ADFF', 'TKJU', 'FTGT'])
# when
res = board.contains('FU')
# then
expect(res).to(be(True))
def test_board_should_verify_horizontal_two_chars_words():
# given
board = Board(['CDHG', 'ADFF', 'TKJU', 'FTGT'])
# when
res = board.contains('AD')
# then
expect(res).to(be(True))
def test_board_should_verify_diagonal_two_chars_words():
# given
board = Board(['CDHG', 'ADFF', 'TKJU', 'FTGT'])
# when
res = board.contains('AT')
# then
expect(res).to(be(True))
def test_board_should_verify_vertical_plus_horizontal_words():
# given
board = Board(['CDHG', 'ADFF', 'TKJU', 'FSET'])
# when
res = board.contains('FUTES')
# then
expect(res).to(be(True))
|
[
"cemeliguzel@gmail.com"
] |
cemeliguzel@gmail.com
|
1481e8acc81c942c14c9c3693947baf07e18f0d6
|
752441ecb984c9527bfe38ee74f02d2eaaee217e
|
/salesrep/models.py
|
80624c6304a7daaf3586dbd4bf29268200578316
|
[] |
no_license
|
AtufaShireen/order-management-system
|
88a7c5a29dc8d5e7bb852bc7384e37997c9822ad
|
c16753f55847ecfba3318dc356ffa1f974161595
|
refs/heads/master
| 2023-04-12T12:02:45.698363
| 2021-05-11T20:39:29
| 2021-05-11T20:39:29
| 366,150,241
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,277
|
py
|
from django.db import models
from django.db.models import Avg, Count, Min, Sum
from datetime import datetime,timedelta
from django.utils import timezone
from django.db.models.signals import post_delete
from django.utils.translation import ugettext_lazy as _
from django.dispatch import receiver
import pytz
timeZ_Ny = pytz.timezone('Asia/Kolkata')
from collections import deque
team_q = deque(['Team A','Team B'])
teams=(
("A","Team A"),
("B","Team B"),
)
statuses=(
("Pending","Delivery Pending"),
("WithDrawm","Rejected by Company"),
("Rejected","Rejected by customer"),
("Delivered","Delivery completed"),
)
counter=1 # server needs to be running for creating unique order_num
def check_today():
try:
vx=OrderIntake.objects.latest().order_time.date()
date_today=datetime.today().date()
except OrderIntake.DoesNotExist:
return False
else:
if date_today == vx:
return True
else:
return False
def get_ord_date():
global counter
if check_today() == True:
counter+=1
else:
counter=1
return counter
def get_team():
t = team_q.pop()
team_q.appendleft(t)
return t
class RangeField(models.FloatField): # write on gfg
description = _("Integer field with range")
def __init__(self,min_val,max_val,*args,**kwargs):
self.min_val=min_val
self.max_val=max_val
super().__init__(*args, **kwargs)
def formfield(self, **kwargs):
min_value=self.min_val
max_value=self.max_val
return super().formfield(**{
'min_value': min_value,
'max_value': max_value,
**kwargs,
})
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
kwargs['min_val']=self.min_val
kwargs['max_val']=self.max_val
return name, path, args, kwargs
def rel_db_type(self, connection):
if connection.features.related_fields_match_type:
return self.db_type(connection)
else:
return models.FloatField().db_type(connection=connection)
# Create your models here.
class OrderIntake(models.Model):
order_num=models.CharField(default='OrderNumber',max_length=100,editable=False)
order_id=models.IntegerField(default=0,null=True)
cust_name=models.CharField(default='',max_length=60)
cust_add=models.CharField(default='',max_length=60)
distance=RangeField(min_val=0.1,max_val=10.1,default=0.1)
order_time=models.DateTimeField(default=timezone.now,editable=True)
estimated_time=models.DateTimeField(default=timezone.now,editable=True)
# return_time=models.DateTimeField(default=timezone.now,editable=True)
team=models.CharField(choices=teams,default='get_team',max_length=60)
status=models.CharField(choices=statuses,default="Pending",max_length=60)
total_price=models.FloatField(default=0.0)
def save(self,*args,**kwargs):
if self.id is None: # if its a new add
self.team=get_team()
self.order_id=get_ord_date()
self.order_num=f"{datetime.today().strftime('%d_%m_%Y')}_{self.order_id}"
super().save(*args,**kwargs)
def __str__(self):
return f'{self.order_num}'
class Meta:
get_latest_by = 'order_time'
@receiver(post_delete, sender=OrderIntake)
def my_handler(sender,instance, **kwargs):
global counter
if check_today()==True:
counter-=1
class ItemsIntake(models.Model):
item=models.CharField(default='',max_length=60)
quantity=models.IntegerField(default=1)
price=models.FloatField(default=0.0) # total price
order=models.ForeignKey(OrderIntake,related_name="order_items",on_delete=models.CASCADE)
def __str__(self):
return f'{self.item} in cart..'
categories=(
("Television","Television"),
("Refrigerator","Refrigerator"),
)
class Inventory(models.Model):
category=models.CharField(choices=categories,blank=True,max_length=60)
model_num=models.CharField(default='',max_length=60)
avail=models.IntegerField(default=0)
price=models.FloatField(default=0.0)
def __str__(self):
return f'{self.model_num} In..'
|
[
"atufashireen@gmail.com"
] |
atufashireen@gmail.com
|
104749dfcb25a977c7e9d35eee693760e72b92c5
|
4a6a34164f19e2e149ac0aa6382ae76fb1ec34ad
|
/tweets.py
|
53eb21a1086cc79449c38bb5bcc11ec9161ba29e
|
[] |
no_license
|
deepikaganesan/twitter
|
f57f57b17841a9da6281d9c98f6af3a42fc8e4a9
|
d497089a373e2e99009d6439cb3f5d018e97d8c3
|
refs/heads/master
| 2020-05-05T06:20:00.239236
| 2019-04-06T03:38:48
| 2019-04-06T03:38:48
| 179,784,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,503
|
py
|
from flask import Flask, render_template, request
import tweepy
consumer_key = "klc9lTZuJfxAalGGOIXFjTbhr"
consumer_secret = "gPhGZE1j6egZSXTkyw5p3mZdem2VhNb8aHxfCae7PtPggJKF8q"
access_token = "1112587571622637568-BR4xHHlqA7L0e58zp0bKB9U6I5AFfj"
access_token_secret = "GrcusxoUGerHchMsbmdTFRw6ZHbnCcI7NTa85GL2LjDLu"
app=Flask(__name__)
@app.route('/')
def index():
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
search=request.args.get('query')
public_tweets = api.user_timeline(search,count=10)
return render_template('ho.html', tweets=public_tweets)
if __name__=='__main__':
app.run(debug=True)
<!DOCTYPE html>
<html>
<head>
<title>Flask Tutorial</title>
<link rel="stylesheet" type="text/css" href="https://stackpath.bootstrapcdn.com/bootstrap/4.2.1/css/bootstrap.min.css">
</head>
<body>
<div class="container">
<div class="row justify-content-center">
<div class="col-md-6">
<div class="p-5">
<form class="form-inline" method="GET" action="{{ url_for('index') }}">
<input class="form-control" type="text" name="query">
<button class="btn btn-primary" type="submit">search</button>
</form>
</div>
{% for tweet in tweets %}
<div class="card mt-2">
<div class="card-body">
<div class="card-text">
{{ tweet._json.text }}
</div>
</div>
</div>
{% endfor %}
</div>
</div>
</div>
</body>
</html>
|
[
"ganesandeepika97@gmail.com"
] |
ganesandeepika97@gmail.com
|
62412d91b02cf2e4a9741be69033658baa67a980
|
a7064a51f9096db2d71e2a55560e968572b2f053
|
/L2/lesson2_4_step8.py
|
c06eb58a53639e5246c741d79a037f3861ed16dd
|
[] |
no_license
|
podushka/stepik-autotest-course
|
6ddfc2dd221c6267056095b1ea983ed29ca423dc
|
7db09e174ce1c10d4e643a7bd215ce24e23b12f5
|
refs/heads/main
| 2023-06-22T11:43:05.565157
| 2021-07-26T09:20:23
| 2021-07-26T09:20:23
| 389,284,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
import os, time, math
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
try:
link = "http://suninjuly.github.io/explicit_wait2.html"
browser = webdriver.Chrome()
browser.get(link)
price = WebDriverWait(browser, 12).until(EC.text_to_be_present_in_element((By.ID, 'price'), '100'))
button = browser.find_element_by_id('book')
button.click()
x = int(browser.find_element_by_id('input_value').text)
answer = browser.find_element_by_id('answer')
answer.send_keys(calc(x))
buttonq = browser.find_element_by_id('solve')
buttonq.click()
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit()
print(os.path.abspath(__file__))
print(os.path.abspath(os.path.dirname(__file__)))
|
[
"r.tolokolnikov@gmail.com"
] |
r.tolokolnikov@gmail.com
|
6d9e2b0b712a499fb3a0db83a17c6896874bbc46
|
2a0a372a8d8d1e0cd27e827cb1310fe454ce621d
|
/golden_section_method.py
|
782da4a20df6810cd76c400f4b1d0cebbb2c3c0e
|
[] |
no_license
|
lovelyscientist/func-optimization-methods
|
7fa1ece6cf6f5af4bb5f571a0ddfe627a770f4f8
|
34da3268621a1757b48d5fe5fc7d1b63e4f09f99
|
refs/heads/master
| 2022-04-22T12:14:41.409264
| 2020-04-25T06:28:34
| 2020-04-25T06:28:34
| 107,968,244
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
PHI = 1.6180339887499
REVERSED_PHI = 1/PHI
def calculate(goal_function, a, b, epsilon):
x = 0
while (b - a) > epsilon:
lamda = b - (b - a)*REVERSED_PHI
mu = a + (b - a)*REVERSED_PHI
if goal_function(lamda) <= goal_function(mu):
b = mu
x = lamda
else:
a = lamda
x = mu
return x
|
[
"tischenko.vlada@gmail.com"
] |
tischenko.vlada@gmail.com
|
e2d1ddc68461f5ae6c95b3a24f33b7dc98868114
|
7ef6adf2dd9dd300c3f145a030b6cc7466246182
|
/users/migrations/0002_customuser_random.py
|
4e3bc49bdcdfdf8dbe18bc32aeea66f04626cf54
|
[] |
no_license
|
grubberr/milo_django_task
|
f5f933723d7953ad2c91cd7810be4d43ea08d7c2
|
1f271b88abb26b3fb7fed1b3ae0d01b5c47443f8
|
refs/heads/master
| 2020-05-29T14:40:53.018260
| 2016-05-31T12:48:29
| 2016-05-31T12:48:29
| 60,007,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-30 12:06
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='random',
field=models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(100)]),
),
]
|
[
"grubberr@gmail.com"
] |
grubberr@gmail.com
|
11e6637c8cdf5481418ae3d932ccf8eb8de5f5ad
|
a8de4bf4f78c0c74b822292f100452a7a6a62d90
|
/SDM/apps/PullingController.py
|
d9ed23d6022f416b368ca89801e1ee642a3029ac
|
[
"Apache-2.0"
] |
permissive
|
jalilm/SDN-Monitoring
|
d7f78ccfdcf3b6552d58ab5a5dc108570686629e
|
4ba8dd0f0ed5e44c0e803713d6c82ee2c815c7e4
|
refs/heads/master
| 2021-05-03T11:44:24.295957
| 2016-10-06T07:58:26
| 2016-10-06T07:58:26
| 31,901,311
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
from datetime import datetime
from ryu.lib import hub
from SDM.apps.BaseController import BaseController
class PullingController(BaseController):
def __init__(self, *args, **kwargs):
super(PullingController, self).__init__(*args, **kwargs)
self.monitor_threads = {}
def after_datapaths_construction(self):
for dp in self.datapaths:
datapath = self.datapaths[dp]
self.monitor_threads[datapath] = hub.spawn(self.monitor, datapath)
def monitor(self, datapath):
time_step_number = 0
while True:
hub.sleep(self.parameters['RunParameters']['timeStep'])
time_step_number += 1
self.info('')
self.info('Time step #%d - ' + datetime.now().strftime('%H:%M:%S.%f'), time_step_number)
self.info('Sending stats request: %016x', datapath.id)
datapath.request_stats()
|
[
"jalilm@cs.technion.ac.il"
] |
jalilm@cs.technion.ac.il
|
e3ad770d194974649d0e233d158c3dcfd664d5c1
|
357fefa288745c9ab3bc276a7ef0bc815f3fec2a
|
/src/core/map.py
|
e51a6df05cc9041838d6ebc03ab9fe58bb0adde9
|
[
"MIT"
] |
permissive
|
jdvelasq/techminer
|
61da47f44719e462732627edcc1094fab6c173f1
|
7a34a9fd684ce56cfbab583fa1bb71c1669035f9
|
refs/heads/main
| 2023-03-15T23:26:22.876051
| 2023-03-13T21:47:24
| 2023-03-13T21:47:24
| 204,352,276
| 0
| 1
|
MIT
| 2019-12-09T02:37:11
| 2019-08-25T21:34:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
import pandas as pd
# from techminer.core.params import MULTIVALUED_COLS
def map_(x, column, f):
x = x.copy()
if x[column].dtype != "int64" and column != "Abstract" and column != "Title":
z = x[column].map(lambda w: w.split(";") if not pd.isna(w) else w)
z = z.map(lambda w: [f(z.strip()) for z in w] if isinstance(w, list) else w)
z = z.map(
lambda w: [z for z in w if not pd.isna(z)] if isinstance(w, list) else w
)
z = z.map(lambda w: ";".join(w) if isinstance(w, list) else w)
return z
# if column in [
# "Abstract_Phrase_Keywords",
# "Abstract_Phrase_Keywords_CL",
# "Abstract_Phrase_Author_Keywords",
# "Abstract_Phrase_Author_Keywords_CL",
# "Abstract_Phrase_Index_Keywords",
# "Abstract_Phrase_Index_Keywords_CL",
# ]:
# z = x[column].map(lambda w: w.split("//"), na_action="ignore")
# z = z.map(lambda w: [z.split(";") for z in w], na_action="ignore")
# z = z.map(lambda w: [[f(y.strip()) for y in z] for z in w], na_action="ignore")
# z = z.map(lambda w: [";".join(z) for z in w], na_action="ignore")
# z = z.map(lambda w: "//".join(w), na_action="ignore")
# return z
return x[column].map(lambda w: f(w))
|
[
"jdvelasq@unal.edu.co"
] |
jdvelasq@unal.edu.co
|
9a74945ca5d6ee81c1216e99168488e60bf90245
|
1fc10c4ab99efa9207e638c4282f4912bd095cd9
|
/bot/models/credit.py
|
14b9d9595cc2aab1fe05829aee29fb7a37ec5b2b
|
[
"Apache-2.0"
] |
permissive
|
naderAbolfazli/boom
|
ef750545573eec032a6572eb0a35df91b4672675
|
f58cc002ad71206c031c3eabf8166a287ff42839
|
refs/heads/master
| 2020-04-17T00:27:31.070037
| 2019-04-16T11:59:45
| 2019-04-16T11:59:45
| 166,050,642
| 1
| 0
|
Apache-2.0
| 2019-01-18T05:05:02
| 2019-01-16T14:07:36
|
Python
|
UTF-8
|
Python
| false
| false
| 524
|
py
|
import datetime
from sqlalchemy import Column, Integer, Float, DateTime
from bot.models.base import Base
class Credit(Base):
__tablename__ = "credit"
id = Column(Integer, primary_key=True)
from_user = Column(Integer)
to_user = Column(Integer)
balance = Column(Float)
date_time = Column(DateTime)
def __init__(self, from_user, to_user, balance):
self.from_user = from_user
self.to_user = to_user
self.balance = balance
self.date_time = datetime.datetime.now()
|
[
"abolfazli.nader@gmail.com"
] |
abolfazli.nader@gmail.com
|
410fce7233c32bf39c91a2b8a33081646bc4200f
|
ed2755470d22451657f88267d21f1d3aa92be2f8
|
/exercise_3/free_space.py
|
92074ec81765075ba95999c1c9da5220d064cf4f
|
[] |
no_license
|
enrico-kaack/RoboticGames
|
ba6fe843995a6572d1b3abeffb4fa85642c1b022
|
b968d0e272ad989cc203cea125dd2ab3a5695474
|
refs/heads/master
| 2020-08-23T17:05:02.333340
| 2020-03-31T17:26:00
| 2020-03-31T17:26:00
| 216,669,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,330
|
py
|
#!/usr/bin/env python
import numpy as np
import rospy
from sensor_msgs.msg import PointCloud
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
'''
Die gesammte Kollisionsvermeidung ist in einer Klasse verpackt um die momentanigen Geschwindigkeitsdaten aus dem callback des
Geschwindigkeitssubscribers herauszuholen und diese in der berechnung der neuen Richtgeschwindigkeit zu benutzen.
alternativ haetten hier auch globale Variabeln verwendet werden koennen, diese Methode wird in der Community allerdings als
eleganter angesehen.
'''
class FreeSpace:
def __init__(self):
self.current_vel_x = 0.0
self.current_ang_z = 0.0
'''
Die verwendung eines Kraftbasierten Ansatzes bedeutet, dass die momentane Geschwindigkeit
modifiziert wird.
Dazu muss sie allerdings zunaechst bekannt sein.
Der Roboter in der Simulation stellt diese bereits ueber einen sogenannten Subscriber
zur verfuegung. Ein Tutorium ist auf folgender Website zu finden:
http://wiki.ros.org/ROS/Tutorials/WritingPublisherSubscriber%28python%29
'''
rospy.Subscriber("/p3dx/p3dx_velocity_controller/odom", Odometry, self.velocity_callback)
rospy.Subscriber("/robotic_games/sonar", PointCloud, self.sonar_callback)
'''
Das Ergebniss der Berechnung wird dem Roboter als soll-geschwindigkeit zurueckgegeben.
dies passiert ueber einen sogenannten Publisher
(siehe wieder http://wiki.ros.org/ROS/Tutorials/WritingPublisherSubscriber%28python%29 )
geregelt, der Name des Topics wird dabei von der Simulation vorgegeben.
'''
self.col_avoid_publisher = rospy.Publisher("/p3dx/p3dx_velocity_controller/cmd_vel", Twist, queue_size=10)
rospy.spin()
def velocity_callback(self, current_odometry):
self.current_vel_x = current_odometry.twist.twist.linear.x
self.current_ang_z = current_odometry.twist.twist.angular.z
def sonar_callback(self, current_sonar_scan):
adjustment = Twist()
# Die Sonarsensoren des Roboters werden im folgenden Array gespeichert
sonar_points = current_sonar_scan.points
# Die Orientierung der einzelnen Sensoren folgt:
sonar_angles = np.array([-90.0, -50.0, -30.0, -10.0, 10.0, 30.0, 50.0, 90.0])
sonar_angles = sonar_angles / 360.0 * 2 * np.pi
#berechnung des Abstands
sonar_ranges = np.zeros(len(sonar_angles))
for i in range(0, len(sonar_angles)):
sonar_ranges[i] = np.sqrt(sonar_points[i].x**2 + sonar_points[i].y**2)
biggest_distance = np.amax(sonar_ranges)
# check from which angle this small range comes from
indices_biggest_ranges = np.argmax(sonar_ranges)
id_biggest_range = indices_biggest_ranges if not isinstance(indices_biggest_ranges, list) else indices_biggest_ranges[0]
rospy.loginfo(id_biggest_range)
if id_biggest_range > 3:
adjustment.angular.z = -2
else:
adjustment.angular.z = 2
adjustment.linear.x = 0.5
self.col_avoid_publisher.publish(adjustment)
if __name__ == '__main__':
rospy.init_node("FreeSpace")
try:
node = FreeSpace()
except rospy.ROSInterruptException:
pass
|
[
"e.kaack@live.de"
] |
e.kaack@live.de
|
b092a8391b17115a173a7c6b1ce85f0c5ae84038
|
39589b58251f1f973268fb120596c43d6f7ba607
|
/main.py
|
0cc065709187d79b52aaba48f66d8e9265848005
|
[] |
no_license
|
koty/window-controller
|
04c6e76c59a5fe73ae3168b499e12f936dc87018
|
9493f3baf7c5f858d3e514de23a97d4d29610992
|
refs/heads/master
| 2023-05-31T09:45:03.452743
| 2020-07-02T21:22:47
| 2020-07-02T21:22:47
| 271,969,974
| 0
| 0
| null | 2023-05-22T23:31:03
| 2020-06-13T08:28:04
|
Python
|
UTF-8
|
Python
| false
| false
| 350
|
py
|
from data_sender import send
from thermo import getData
from window_opener import open_or_close_window
def entry_point():
# 気温取得
data = getData()
# SpreadSheetに送信
result_json = send(data)
# 結果に応じて窓を開け締めする
open_or_close_window(result_json['rows'])
if __name__ == '__main__':
entry_point()
|
[
"kouichi.nishizawa+bitbucket@gmail.com"
] |
kouichi.nishizawa+bitbucket@gmail.com
|
51d136a030c6b856b55533e03a606203b9769cdf
|
3536e1b19fc412c5d7258b294e60587397609500
|
/webBP/models/user.py
|
ad816cd965d028bb07d64079d038e8569965ab35
|
[] |
no_license
|
jozefzivcic/bp
|
0bfb395876e62eccbedc5806d0365d30db60632d
|
634bb0bad3d69bdf21435812adfd663a2e2efaa0
|
refs/heads/master
| 2021-03-27T10:29:21.853168
| 2017-05-06T19:54:07
| 2017-05-06T19:54:07
| 46,180,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
class User:
def __init__(self, name='', passwd=''):
"""
Initializes object User().
"""
self.id = 0
self.name = name
self.password = passwd
|
[
"jozefzivcic@gmail.com"
] |
jozefzivcic@gmail.com
|
67a0718b921c973eb8de3545032a6aa9a7e71156
|
2ba68010eedaf1f9f5841a4291a2633428a2ed31
|
/practical11/createtable.py
|
bf752285caada443999c0e6a061c99aea5879269
|
[] |
no_license
|
karnikashukla/python
|
b28ad08bf27620e997ae6f3357ea854d45b0442d
|
2525437cfbcf6d516589899dfb7e42984beb32ec
|
refs/heads/master
| 2020-07-13T15:29:09.556998
| 2019-09-26T07:18:33
| 2019-09-26T07:18:33
| 205,106,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
import mysql.connector;
from mysql.connector import Error;
try:
con=mysql.connector.connect(host="localhost",database="python",user="root",password="mcalab");
print("Connected..!!");
querycreatetable="create table Student (name varchar(20) not null ,";
querycreatetable=querycreatetable+"birthdate date ,gender char(1),";
querycreatetable=querycreatetable+"semester int(1),python_marks decimal,";
querycreatetable=querycreatetable+"java_marks decimal,php_marks decimal,";
querycreatetable=querycreatetable+"total_marks decimal,percentage decimal,";
querycreatetable=querycreatetable+"grade char(1))";
print(querycreatetable);
cursor=con.cursor();
result=cursor.execute(querycreatetable);
print("Table created successfully..!");
except Error as e:
print("Error : ",e);
|
[
"noreply@github.com"
] |
karnikashukla.noreply@github.com
|
5099f5e4125e35576d50804c57b745af27a90ce6
|
d31bcf4849f0c049b5d492f0bfa2ebc204393535
|
/resource/apps.py
|
53b227ef839f5b9543714ad4bfeda28dda96b6ce
|
[] |
no_license
|
gqxie/kangni
|
2e1ad7455a36aee2a9f7cd3abe5f517450f7f400
|
32b63b84712700759c00254288c2d5b2a5bbf2b4
|
refs/heads/master
| 2022-03-17T19:19:59.179976
| 2019-11-01T07:39:17
| 2019-11-01T07:39:17
| 190,677,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 125
|
py
|
from django.apps import AppConfig
class ResourceConfig(AppConfig):
name = 'resource'
verbose_name = '综合管理'
|
[
"xieguoqiang@chezhibao.com"
] |
xieguoqiang@chezhibao.com
|
4b2c832732ca282c4091ea254b3e8ede8639ed30
|
49ecc6b8af0eacf3e2e38ce16ba374bc210e538a
|
/src/controllers/order_controller.py
|
e72de367450658aa779191b7cb8e73842262fd24
|
[] |
no_license
|
HarryTranAU/shopify_feature
|
a65e68448f45cda4b5856a02502489f89778843d
|
1d65a0d91288427926e4efa6116f3545856dfaab
|
refs/heads/master
| 2023-02-10T15:05:42.244055
| 2021-01-04T07:55:51
| 2021-01-04T07:55:51
| 321,240,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,570
|
py
|
from models.Order import Order
from models.Store import Store
from models.Customer import Customer
from models.Product import Product
from main import db
from schemas.OrderSchema import order_schema, orders_schema
from flask import Blueprint, request, jsonify, abort, Response
from services.auth_service import verify_user
from flask_jwt_extended import jwt_required
order = Blueprint("orders",
__name__,
url_prefix="/<int:storeId>/order")
@order.route("/", methods=["GET"])
def order_index(storeId):
orders = Order.query.join(Customer)\
.join(Store)\
.filter(Customer.store_id == storeId).all()
return jsonify(orders_schema.dump(orders))
@order.route("/<int:customerID>", methods=["POST"])
def order_create(storeId, customerID):
order_fields = order_schema.load(request.json)
new_order = Order()
new_order.order_placed = order_fields["order_placed"]
cart = order_fields["cart"]
for item in cart:
item_query = Product.query.filter_by(id=item).first()
new_order.orders_products.append(item_query)
db.session.commit()
new_order.customer_id = customerID
customer = Customer.query.filter_by(id=customerID).first()
if not customer:
return abort(400, description="Incorrect customer")
customer.order.append(new_order)
db.session.commit()
return jsonify(order_schema.dump(new_order))
@order.route("/delete/<int:orderID>", methods=["DELETE"])
@jwt_required
@verify_user
def order_delete(user, storeId, orderID):
store = Store.query.filter_by(id=storeId, user_id=user.id).first()
if not store:
return abort(400, description="Incorrect storeID in URL")
order = Order.query.filter_by(id=orderID).first()
if not order:
return abort(400, description="orderID does not exist")
db.session.delete(order)
db.session.commit()
return abort(Response("Order deleted successfully"))
@order.route("/checkout/<int:orderID>", methods=["PUT", "PATCH"])
@jwt_required
@verify_user
def order_checkout(user, storeId, orderID):
order_fields = order_schema.load(request.json)
store = Store.query.filter_by(id=storeId, user_id=user.id).first()
if not store:
return abort(400, description="Incorrect storeID in URL")
order = Order.query.filter_by(id=orderID)
if not order:
return abort(400, description="orderID does not exist")
order.update(order_fields)
db.session.commit()
return jsonify(order_schema.dump(order[0]))
@order.route("/sum/<int:orderID>", methods=["GET"])
@jwt_required
@verify_user
def order_sum(user, storeId, orderID):
store = Store.query.filter_by(id=storeId, user_id=user.id).first()
if not store:
return abort(400, description="Incorrect storeID in URL")
order = db.session.query(Order).filter_by(id=orderID).one()
sum = 0
for item in order.orders_products:
sum += item.price
return jsonify({"Order Total": int(sum)})
@order.route("/abandoned", methods=["GET"])
@jwt_required
@verify_user
def order_abandoned(user, storeId):
store = Store.query.filter_by(id=storeId, user_id=user.id).first()
if not store:
return abort(400, description="Incorrect storeID in URL")
orders = Order.query.filter_by(order_placed=False)\
.join(Customer)\
.join(Store)\
.filter(Customer.store_id == storeId)\
.all()
return jsonify(orders_schema.dump(orders))
|
[
"css012013@coderacademy.edu.au"
] |
css012013@coderacademy.edu.au
|
776e4aec6458a9eaeb6af74da807c15f9e38d6c4
|
2324dea2cb3003c8ab7e8fd80588d44973eb8c77
|
/Euler_4_204.py
|
9e1eee008b0582451d080d8893bf8bf651eef46d
|
[] |
no_license
|
MikeOcc/MyProjectEulerFiles
|
5f51bc516cb6584732dc67bb2f9c7fd9e6d51e56
|
4d066d52380aade215636953589bf56d6b88f745
|
refs/heads/master
| 2021-01-16T18:45:44.133229
| 2015-05-27T18:28:43
| 2015-05-27T18:28:43
| 5,876,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
#
# Euler 204
#
# -1]
from Functions import IsPrime
factors = []
for i in range(2,100):
if IsPrime(i):
factors.append(i)
from itertools import combinations
print factors
x = combinations(factors,len(factors)/2)
ctr=0
for z in x:
print z
mut =1
for y in z:
mut*=y
if y <=10**9:
ctr+=1
print ctr
|
[
"mike.occhipinti@mlsassistant.com"
] |
mike.occhipinti@mlsassistant.com
|
47ce89932269ec0784f092efbc334955db53900f
|
b8a31aefcd6d9d4c5a71059f20313f25c6d15567
|
/dataset.py
|
00451c17056fc7fbd3bcd8f6ff641e562e54f3ee
|
[] |
no_license
|
jjrico/ANN-Perceptron-Character-Recognition
|
fab32e01bc3d13fc9756a0cd90d8ba1d3ebbafb9
|
48a3defae851a893e816380cc6fc953c42bda1d7
|
refs/heads/master
| 2021-01-15T00:44:21.824669
| 2020-03-11T23:22:56
| 2020-03-11T23:22:56
| 242,817,507
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,146
|
py
|
# dataset.py
#
# 5x7 dot-matrix fonts
#
# Converted from hexadecimal and rotated counter-clockwise.
#
# Training data from https://github.com/noopkat/oled-font-5x7
# Test data from https://geoffg.net/Downloads/GLCD_Driver/glcd_library_1_0.h
#
TRAINING_DATA = [
[
'.###.',
'#...#',
'#...#',
'#...#',
'#####',
'#...#',
'#...#',
],
[
'####.',
'#...#',
'#...#',
'####.',
'#...#',
'#...#',
'####.',
],
[
'.###.',
'#...#',
'#....',
'#....',
'#....',
'#...#',
'.###.',
],
[
'###..',
'#..#.',
'#...#',
'#...#',
'#...#',
'#..#.',
'###..',
],
[
'#####',
'#....',
'#....',
'####.',
'#....',
'#....',
'#####',
],
[
'#####',
'#....',
'#....',
'###..',
'#....',
'#....',
'#....',
],
[
'.###.',
'#...#',
'#....',
'#....',
'#..##',
'#...#',
'.###.',
],
[
'#...#',
'#...#',
'#...#',
'#####',
'#...#',
'#...#',
'#...#',
],
[
'.###.',
'..#..',
'..#..',
'..#..',
'..#..',
'..#..',
'.###.',
],
[
'..###',
'...#.',
'...#.',
'...#.',
'...#.',
'#..#.',
'.##..',
],
[
'#...#',
'#..#.',
'#.#..',
'##...',
'#.#..',
'#..#.',
'#...#',
],
[
'#....',
'#....',
'#....',
'#....',
'#....',
'#....',
'#####',
],
[
'#...#',
'##.##',
'#.#.#',
'#...#',
'#...#',
'#...#',
'#...#',
],
[
'#...#',
'#...#',
'##..#',
'#.#.#',
'#..##',
'#...#',
'#...#',
],
[
'.###.',
'#...#',
'#...#',
'#...#',
'#...#',
'#...#',
'.###.',
],
[
'####.',
'#...#',
'#...#',
'####.',
'#....',
'#....',
'#....',
],
[
'.###.',
'#...#',
'#...#',
'#...#',
'#.#.#',
'#..#.',
'.##.#',
],
[
'####.',
'#...#',
'#...#',
'####.',
'#.#..',
'#..#.',
'#...#',
],
[
'.####',
'#....',
'#....',
'.###.',
'....#',
'....#',
'####.',
],
[
'#####',
'..#..',
'..#..',
'..#..',
'..#..',
'..#..',
'..#..',
],
[
'#...#',
'#...#',
'#...#',
'#...#',
'#...#',
'#...#',
'.###.',
],
[
'#...#',
'#...#',
'#...#',
'#...#',
'#...#',
'.#.#.',
'..#..',
],
[
'#...#',
'#...#',
'#...#',
'#.#.#',
'#.#.#',
'##.##',
'#...#',
],
[
'#...#',
'#...#',
'.#.#.',
'..#..',
'.#.#.',
'#...#',
'#...#',
],
[
'#...#',
'#...#',
'.#.#.',
'..#..',
'..#..',
'..#..',
'..#..',
],
[
'#####',
'....#',
'...#.',
'..#..',
'.#...',
'#....',
'#####',
],
]
TEST_DATA = [
[
'..#..',
'.#.#.',
'#...#',
'#...#',
'#####',
'#...#',
'#...#',
],
[
'.###.',
'#...#',
'#...#',
'####.',
'#...#',
'#...#',
'####.',
],
[
'.###.',
'#...#',
'#....',
'#....',
'#....',
'#...#',
'.###.',
],
[
'####.',
'#...#',
'#...#',
'#...#',
'#...#',
'#...#',
'####.',
],
[
'#####',
'#....',
'#....',
'###..',
'#....',
'#....',
'#####',
],
[
'#####',
'#....',
'#....',
'####.',
'#....',
'#....',
'#....',
],
[
'.###.',
'#...#',
'#....',
'#..##',
'#...#',
'#...#',
'.###.',
],
[
'#...#',
'#...#',
'#...#',
'#####',
'#...#',
'#...#',
'#...#',
],
[
'.###.',
'..#..',
'..#..',
'..#..',
'..#..',
'..#..',
'.###.',
],
[
'....#',
'....#',
'....#',
'....#',
'#...#',
'#...#',
'.###.',
],
[
'#...#',
'#..#.',
'#.#..',
'##...',
'#.#..',
'#..#.',
'#...#',
],
[
'#....',
'#....',
'#....',
'#....',
'#....',
'#....',
'#####',
],
[
'#...#',
'##.##',
'#.#.#',
'#.#.#',
'#...#',
'#...#',
'#...#',
],
[
'#...#',
'##..#',
'#.#.#',
'#..##',
'#...#',
'#...#',
'#...#',
],
[
'.###.',
'#...#',
'#...#',
'#...#',
'#...#',
'#...#',
'.###.',
],
[
'####.',
'#...#',
'#...#',
'####.',
'#....',
'#....',
'#....',
],
[
'.###.',
'#...#',
'#...#',
'#...#',
'#...#',
'.###.',
'....#',
],
[
'####.',
'#...#',
'#...#',
'####.',
'#...#',
'#...#',
'#...#',
],
[
'.###.',
'#...#',
'#....',
'.###.',
'....#',
'#...#',
'.###.',
],
[
'#####',
'..#..',
'..#..',
'..#..',
'..#..',
'..#..',
'..#..',
],
[
'#...#',
'#...#',
'#...#',
'#...#',
'#...#',
'#...#',
'.###.',
],
[
'#...#',
'#...#',
'#...#',
'#...#',
'#...#',
'.#.#.',
'..#..',
],
[
'#...#',
'#...#',
'#...#',
'#...#',
'#.#.#',
'##.##',
'#...#',
],
[
'#...#',
'.#.#.',
'..#..',
'..#..',
'..#..',
'.#.#.',
'#...#',
],
[
'#...#',
'#...#',
'#...#',
'.#.#.',
'..#..',
'..#..',
'..#..',
],
[
'#####',
'....#',
'...#.',
'..#..',
'.#...',
'#....',
'#####',
],
]
|
[
"noreply@github.com"
] |
jjrico.noreply@github.com
|
1e8ba8241496ea970912a7d157671ac23574026d
|
0a91c1c44e253b634a3baed57a7ac69ca71a9c0e
|
/conditionals/penguinConditionalWith_for.py
|
166fa83b7be9d6e7f35a3bf2fc98f7f7d7ee2ad8
|
[
"CC0-1.0"
] |
permissive
|
cmulliss/turtles-doing-things
|
fb515ab19e61566c8acd7293d46ec66908b0f0b8
|
70c8241bcf6d3b37104a59e92b5cf5a002fcb0bf
|
refs/heads/master
| 2021-09-24T00:44:08.919979
| 2021-09-19T16:17:14
| 2021-09-19T16:17:14
| 58,326,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
#!/usr/bin/python
# declare penguins and give it members, then loop through and assign each
# member to favPenguins, then print as list
penguins = ['rockhopper', 'emperor', 'gentoo', 'king']
for favPenguins in penguins:
print(favPenguins)
#can enumerate too
for index, favPenguins in enumerate(penguins):
print (index, favPenguins)
penguins = 'Penguins'
for i in penguins:
print(i)
# Using range for looping through numbers, range() function has syntax
# range(start, end, stop) although always misses off end
for i in range(2, 12, 3):
print (i)
|
[
"cmulliss@gmail.com"
] |
cmulliss@gmail.com
|
6112ddd8f29a30bbb8770d8447e579f9e1145ec2
|
ccf99b6cb57ea045ba3efa0bf3b814520817e378
|
/test/test_po2_to_so2.py
|
c0a14d9cd8630fe5b6c6f39b9d1ea988a0849207
|
[
"MIT"
] |
permissive
|
bakenzua/po2so2
|
cbe1daec0686d6bf615d044d16e52e1dc1957fd3
|
3aafe19d223c7d9c48a9b0bab99ca75beb779782
|
refs/heads/master
| 2021-05-13T23:13:35.660484
| 2018-01-29T12:24:21
| 2018-01-29T12:24:21
| 116,508,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
from unittest import TestCase
import numpy as np
from po2so2 import thomas_po2_to_so2
class TestPo2ToSo2(TestCase):
def test_scalar_calculation(self):
po2 = 5.653266
self.assertEqual(thomas_po2_to_so2(po2, kpa=False), 0.010272119260622511)
po2 = 125.0
self.assertEqual(thomas_po2_to_so2(po2, kpa=False), 0.98428832449680714)
def test_numpy_array_calculation(self):
po2s = np.array([1.884422, 2.512563, 3.140704, 3.768844])
self.assertEqual(thomas_po2_to_so2(po2s).size, 4)
|
[
"barnaby.sanderson@gstt.nhs.uk"
] |
barnaby.sanderson@gstt.nhs.uk
|
61118e2908aec8f5d9dc4ed73e8afa25f3cfce6d
|
60657084df602869030a80f6a151ab642be086da
|
/demoshop/settings/components/apps.py
|
ee5d2e8a3cfab05e3510632194f73a6a4929305a
|
[] |
no_license
|
aldarund/djoscarshop
|
41fc7778551476bfccd33a0406b6344af8dff5ab
|
4fd5e2437592d174f1bc5b4721ffab93aa524124
|
refs/heads/master
| 2020-12-24T14:10:34.069700
| 2015-02-19T14:38:08
| 2015-02-19T14:38:08
| 29,343,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
'django.contrib.sitemaps',
'compressor',
) + tuple(oscar.get_core_apps([
'demoshop.basket',
'demoshop.promotions',
'demoshop.catalogue',
'demoshop.dashboard',
'demoshop.dashboard.promotions',
]))
|
[
"imdagger@yandex.ru"
] |
imdagger@yandex.ru
|
c6a662fa0401545386201f2ee40694da29db8886
|
220459182037b6c33dea601dd049e2961a7e0503
|
/basic_line.py
|
d914f6d4671827ebda57a9e22ec2a1567826d79f
|
[] |
no_license
|
hillws/data_visualization
|
03883b6761d00db0643bf52a4f150ede1a977ccc
|
1b54bd01ca038ae443551eec768ce2398cad6111
|
refs/heads/master
| 2021-01-10T10:43:16.909246
| 2016-03-03T07:51:49
| 2016-03-03T07:51:49
| 53,015,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 81
|
py
|
import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4], [5, 7, 9, 4])
plt.show()
|
[
"hill.ws@gmail.com"
] |
hill.ws@gmail.com
|
882286783d98b167e15a8312231ea6104bf67007
|
475295bee54c659b4de9588a62c3ea50fe9999af
|
/migrations/versions/b65fb71ed283_.py
|
8e6805ea2870babf85eb6a25cd6905d6fd5b1877
|
[] |
no_license
|
anyric/Flask_blog
|
ed9b29334c01d5210196ba12d4d0ef6da669e49c
|
8bc118c9f3baeec09cee060b5554abdd154e71e0
|
refs/heads/master
| 2020-03-17T06:44:07.797328
| 2018-06-04T17:06:53
| 2018-06-04T17:06:53
| 133,367,408
| 0
| 0
| null | 2018-06-04T17:06:54
| 2018-05-14T13:47:16
|
Python
|
UTF-8
|
Python
| false
| false
| 781
|
py
|
"""empty message
Revision ID: b65fb71ed283
Revises: c46db5818600
Create Date: 2018-05-19 23:00:12.915023
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b65fb71ed283'
down_revision = 'c46db5818600'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('about_me', sa.String(length=140), nullable=True))
op.add_column('user', sa.Column('last_seen', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'last_seen')
op.drop_column('user', 'about_me')
# ### end Alembic commands ###
|
[
"anyamaronyango@gmail.com"
] |
anyamaronyango@gmail.com
|
6f924c5bd1c9ff710845a6e9d62a91c262343429
|
8281a209e636af52f2de230a22dc6ff739baff5f
|
/lstm.py
|
b0f35219183416c73715cfd6d869982356af6760
|
[] |
no_license
|
shwinshaker/CS253-PA4
|
e7b3b40bdce0989a177b9aaf2e2bcce7b8e34f31
|
8dad0248140df37cf8fbd4ba560ac79fb4b71bfb
|
refs/heads/master
| 2020-04-24T18:36:30.822498
| 2019-04-05T01:47:50
| 2019-04-05T01:47:50
| 172,185,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,244
|
py
|
import torch
import torch.nn as nn
from music_dataloader import createLoaders
import numpy as np
import time
import shutil
class Evaluation():
def __init__(self):
self.epoch = 1
self.loss = .0
self.count_data = 0
self.count_save = 0
self.count_chunk = 0
self.history = {}
def reset(self, epoch):
self.epoch = epoch
self.loss = .0
self.count_data = 0
self.count_save = 0
self.count_chunk = 0
self.history[epoch] = []
def __call__(self, loss, outputs):
loss_ = loss.cpu().detach().numpy()
outputs_ = outputs.cpu().detach().numpy().squeeze()
chunk_size = outputs_.shape[0]
self.loss += loss_ * chunk_size
self.count_data += chunk_size
self.count_chunk += 1
def avg_loss(self):
return self.loss / self.count_data
def save(self, train_loss, val_loss):
self.count_save += 1
self.history[self.epoch].append((train_loss, val_loss))
# lstm model
class Composer(nn.Module):
def __init__(self, dim=93, hidden_dim=100, device=None):
super(Composer, self).__init__()
self.dim = dim
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(input_size=dim, hidden_size=hidden_dim,
batch_first=True)
self.linear = nn.Linear(hidden_dim, dim)
self.hidden = self._init_hidden(device)
def _init_hidden(self, device):
return [torch.zeros([1, 1, self.hidden_dim]).to(device),
torch.zeros([1, 1, self.hidden_dim]).to(device)]
def forward(self, chunk):
assert(chunk.shape[0]==1)
# assert(chunk.shape[1]==100)
assert(chunk.shape[2]==93)
self.hidden = [h.detach() for h in self.hidden]
output, self.hidden = self.lstm(chunk, self.hidden)
opt_chunk = self.linear(output.view(chunk.shape[1], -1))
return opt_chunk # output
def preprocessing(chunk_size=100):
# load data
loaders, encoder = createLoaders(extras=extras, chunk_size=chunk_size)
dataloaders = dict(zip(['train', 'val', 'test'], loaders))
print('------- Info ---------')
for phase in dataloaders:
print('- %s size: %i' % (phase, len(dataloaders[phase])))
print('----------------------')
return dataloaders, encoder
def build_model(input_dim=93, hidden_dim=100, learning_rate=0.1, device=None):
model = Composer(dim=input_dim, hidden_dim=hidden_dim, device=device)
# run on the gpu or cpu
model = model.to(device)
criterion = nn.CrossEntropyLoss()
# optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
return model, criterion, optimizer
def train_model(model, criterion, optimizer, dataloaders,
num_epochs=1, best_loss=10, chunk_size=100,
evaluate=Evaluation(), device=None, istest=False):
# init timer
since = time.time()
start_epoch = evaluate.epoch
step = 500 * 100 // chunk_size
if istest: step = 10
for epoch in range(start_epoch, num_epochs+1):
print('\nEpoch {}/{}'.format(epoch, num_epochs))
print('-' * 10)
## reset evaluator in a new epoch
evaluate.reset(epoch)
for i, (inputs, targets) in enumerate(dataloaders['train']):
# Put the minibatch data in CUDA Tensors and run on the GPU if supported
inputs, targets = inputs.to(device), targets.to(device)
model.zero_grad()
# regular stuff
outputs = model(inputs)
# squeeze the unnecessary batchsize dim
loss = criterion(outputs, targets.squeeze())
loss.backward()
optimizer.step()
# evaluation
evaluate(loss, outputs)
# validate every n chunks
if i % step == 0:
train_loss = evaluate.avg_loss()
# validate first
val_loss = validate_model(model, criterion,
dataloaders['val'],
istest=istest,
device=device)
# update best loss
is_best = val_loss < best_loss
best_loss = min(val_loss, best_loss)
# verbose
print('[%i] '
'train-loss: %.4f '
'val-loss: %.4f '
'' % (evaluate.count_save,
train_loss,
val_loss))
# save for plot
evaluate.save(train_loss, val_loss)
save_checkpoint({'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'best_loss': best_loss,
'history': evaluate}, is_best)
if istest:
if i == 100: break
time_elapsed = time.time() - since
print('\nTraining complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
# could also be use to test
def validate_model(model, criterion, loader, device=None, verbose=False, istest=False):
model.eval() # Set model to evaluate mode
evaluate = Evaluation()
step = 50
if istest: step = 1
with torch.no_grad():
for j, (inputs, targets) in enumerate(loader):
# Put the minibatch data in CUDA Tensors and run on the GPU if supported
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
loss = criterion(outputs, targets.squeeze())
evaluate(loss, outputs)
if verbose:
if j % step == 0:
print('[%i] val-loss: %.4f' % (j, evaluate.avg_loss()))
if istest:
if j == 2: break
model.train() # Set model to training mode
return evaluate.avg_loss()
def save_checkpoint(state, is_best):
filename='checkpoint'+str(model_num)+'.pth.tar'
bestname='model_best'+str(model_num)+'.pth.tar'
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, bestname)
def check_cuda():
# Check if your system supports CUDA
use_cuda = torch.cuda.is_available()
# Setup GPU optimization if CUDA is supported
if use_cuda:
device = torch.device("cuda")
extras = {"num_workers": 1, "pin_memory": True}
else: # Otherwise, train on the CPU
device = torch.device("cpu")
extras = False
return use_cuda, device, extras
def main(learning_rate=0.01, hidden_size=100, chunk_size=100, device=None):
# hyperparameters
num_epochs = 50
# learning_rate = 0.1
# hidden_size = 100
# chunk_size = 100
print('------- Hypers --------\n'
'- epochs: %i\n'
'- learning rate: %g\n'
'- hidden size: %i\n'
'- chunk size: %i\n'
'----------------'
'' % (num_epochs, learning_rate, hidden_size, chunk_size))
dataloaders, encoder = preprocessing(chunk_size=chunk_size)
# save loader and encoder for later use
torch.save({'loaders': dataloaders,
'encoder': encoder,
'hidden_size': hidden_size},
'init'+str(model_num)+'.pth.tar')
model, criterion, optimizer = build_model(input_dim=encoder.length,
hidden_dim=hidden_size,
learning_rate=learning_rate,
device=device)
if resume:
print('---> loading checkpoint')
path = 'checkpoint'+str(model_num)+'.pth.tar'
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
evaluate = checkpoint['history']
best_loss = checkpoint['best_loss']
else:
best_loss = 10 # anything as long as sufficiently large
evaluate = Evaluation()
train_model(model, criterion, optimizer, dataloaders,
num_epochs=num_epochs, evaluate=evaluate, chunk_size=chunk_size,
best_loss=best_loss, istest=debug, device=device)
if __name__ == "__main__":
# global parameters
torch.manual_seed(7)
model_num = 0
debug = False # debug mode
resume = False # requires former checkpoint file
use_cuda, device, extras = check_cuda()
print('\n------- Globals --------\n'
'- resume training: %s\n'
'- debug mode: %s\n'
'- # model: %i\n'
'- cuda supported: %s\n'
'------------------------'
'' % ('yes' if resume else 'no',
'on' if debug else 'off',
model_num,
'yes' if use_cuda else 'no'))
main(device=device)
|
[
"740992427@qq.com"
] |
740992427@qq.com
|
099f3655b36e05bb4ae5f8df6067c36b9cf5b5aa
|
312fe5a7489a9ec9f51626dc8f29abbd05f4a2f0
|
/distutils/msvccompiler.py
|
0c9e1b8f70579e1d29498d79b1444ae9c6e7a459
|
[] |
no_license
|
eskrano/3coursework
|
940d3bf768d0b60a5aac543086280da39506a5e5
|
55fd26475ae4885f396d14374b876c2b6643ee99
|
refs/heads/master
| 2016-08-11T07:04:39.416844
| 2015-12-20T22:59:45
| 2015-12-20T22:59:45
| 44,703,992
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,119
|
py
|
"""distutils.msvccompiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio.
"""
# Written by Perry Stoll
# hacked by Robin Becker and Thomas Heller to do a better job of
# finding DevStudio (through the registry)
import sys, os
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils import log
_can_read_reg = False
try:
import winreg
_can_read_reg = True
hkey_mod = winreg
RegOpenKeyEx = winreg.OpenKeyEx
RegEnumKey = winreg.EnumKey
RegEnumValue = winreg.EnumValue
RegError = winreg.error
except ImportError:
try:
import win32api
import win32con
_can_read_reg = True
hkey_mod = win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegEnumKey = win32api.RegEnumKey
RegEnumValue = win32api.RegEnumValue
RegError = win32api.error
except ImportError:
log.info("Warning: Can't read registry to find the "
"necessary compiler setting\n"
"Make sure that Python modules winreg, "
"win32api or win32con are installed.")
pass
if _can_read_reg:
HKEYS = (hkey_mod.HKEY_USERS,
hkey_mod.HKEY_CURRENT_USER,
hkey_mod.HKEY_LOCAL_MACHINE,
hkey_mod.HKEY_CLASSES_ROOT)
def read_keys(base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while True:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i += 1
return L
def read_values(base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while True:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[convert_mbcs(name)] = convert_mbcs(value)
i += 1
return d
def convert_mbcs(s):
dec = getattr(s, "decode", None)
if dec is not None:
try:
s = dec("mbcs")
except UnicodeError:
pass
return s
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.load_macros(version)
def set_macro(self, macro, path, key):
for base in HKEYS:
d = read_values(base, path)
if d:
self.macros["$(%s)" % macro] = d[key]
break
def load_macros(self, version):
vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version
self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir")
net = r"Software\Microsoft\.NETFramework"
self.set_macro("FrameworkDir", net, "installroot")
try:
if version > 7.0:
self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1")
else:
self.set_macro("FrameworkSDKDir", net, "sdkinstallroot")
except KeyError as exc: #
raise DistutilsPlatformError(
"""Python was built with Visual Studio 2003;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2003 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = read_values(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = s.replace(k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def get_build_architecture():
"""Return the processor architecture.
Possible results are "Intel", "Itanium", or "AMD64".
"""
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return "Intel"
j = sys.version.find(")", i)
return sys.version[i+len(prefix):j]
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = get_build_version()
self.__arch = get_build_architecture()
if self.__arch == "Intel":
# x86
if self.__version >= 7:
self.__root = r"Software\Microsoft\VisualStudio"
self.__macros = MacroExpander(self.__version)
else:
self.__root = r"Software\Microsoft\Devstudio"
self.__product = "Visual Studio version %s" % self.__version
else:
# Win64. Assume this was built with the platform SDK
self.__product = "Microsoft SDK compiler %s" % (self.__version + 6)
self.initialized = False
def initialize(self):
self.__paths = []
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
self.__paths = self.get_msvc_paths("path")
if len(self.__paths) == 0:
raise DistutilsPlatformError("Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed."
% self.__product)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
self.set_path_env_var('lib')
self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in os.environ['path'].split(';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = ";".join(self.__paths)
self.preprocess_options = None
if self.__arch == "Intel":
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX',
'/Z7', '/D_DEBUG']
else:
# Win64
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
]
else:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile %s to %s"
% (src, obj))
output_opt = "/Fo" + obj
try:
self.spawn([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn([self.lib] + lib_args)
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
(libraries, library_dirs, runtime_library_dirs) = fixed_args
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
os.path.dirname(objects[0]),
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn([self.linker] + ld_args)
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC++")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in os.environ['Path'].split(';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
def get_msvc_paths(self, path, platform='x86'):
"""Get a list of devstudio directories (include, lib or path).
Return a list of strings. The list will be empty if unable to
access the registry or appropriate registry keys not found.
"""
if not _can_read_reg:
return []
path = path + " dirs"
if self.__version >= 7:
key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories"
% (self.__root, self.__version))
else:
key = (r"%s\6.0\Build System\Components\Platforms"
r"\Win32 (%s)\Directories" % (self.__root, platform))
for base in HKEYS:
d = read_values(base, key)
if d:
if self.__version >= 7:
return self.__macros.sub(d[path]).split(";")
else:
return d[path].split(";")
# MSVC 6 seems to create the registry entries we need only when
# the GUI is run.
if self.__version == 6:
for base in HKEYS:
if read_values(base, r"%s\6.0" % self.__root) is not None:
self.warn("It seems you have Visual Studio 6 installed, "
"but the expected registry settings are not present.\n"
"You must at least run the Visual Studio GUI once "
"so that these entries are created.")
break
return []
def set_path_env_var(self, name):
"""Set environment variable 'name' to an MSVC path type value.
This is equivalent to a SET command prior to execution of spawned
commands.
"""
if name == "lib":
p = self.get_msvc_paths("library")
else:
p = self.get_msvc_paths(name)
if p:
os.environ[name] = ';'.join(p)
if get_build_version() >= 8.0:
log.debug("Importing new compiler from distutils.msvc9compiler")
OldMSVCCompiler = MSVCCompiler
from distutils.msvc9compiler import MSVCCompiler
# get_build_architecture not really relevant now we support cross-compile
from distutils.msvc9compiler import MacroExpander
|
[
"alex.priadko2014@yandex.ru"
] |
alex.priadko2014@yandex.ru
|
337617852658f8fb4eb3d93033ad048125b600a3
|
d2f903b8f67295808bbfffe9b672028adcba7c6f
|
/pruebas/pruebas.py
|
1b8cd75a44d16f3c30a8b6433396cf1038a4971a
|
[] |
no_license
|
buresgit/programas
|
a6da3a9066af50bdfcee0f9401a3af4aafd00cce
|
4528cf8ad86a9c72bd0a7353b9155b88c37fe78c
|
refs/heads/master
| 2023-05-31T22:54:32.591497
| 2021-06-20T18:46:40
| 2021-06-20T18:46:40
| 378,703,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20
|
py
|
print("ola mundo")
|
[
"mburesa@gmail.com"
] |
mburesa@gmail.com
|
b6c1709dcf6475107a4d41740e7e6aa550ea5693
|
459ee3e8fc6cbd8656e3a376aa7236b322bcc3fa
|
/datasets/utils.py
|
3fc8495b611d134dfc5cd100cb3d98343d2f0366
|
[
"MIT"
] |
permissive
|
CodeforRuhrgebiet/simple-od-portal
|
744e3da603b16c2649604894307eae62e71b3454
|
31d333dc4b7b0a58c06fe71191bd05e375ad2ac6
|
refs/heads/master
| 2021-01-17T08:22:51.239067
| 2018-03-20T21:39:10
| 2018-03-20T21:39:10
| 83,888,680
| 5
| 0
|
MIT
| 2020-02-12T10:02:15
| 2017-03-04T11:29:52
|
Python
|
UTF-8
|
Python
| false
| false
| 97
|
py
|
from slugify import slugify_de
def slugify(value):
return slugify_de(value, to_lower=True)
|
[
"simon.woerpel@medienrevolte.de"
] |
simon.woerpel@medienrevolte.de
|
145e87588c0f4561e659ede1c2319b1fe2cf3d88
|
da8ee509f1578c8880383b2b9cccbfb9f3ffa4fe
|
/oraclebmc/core/models/update_security_list_details.py
|
a0e782762660aff0da999e3b099a02e02c569f16
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"UPL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
kingleoric2010/bmcs-python-sdk
|
99d7f7a3d5db03ae8b5ddd25385cef06ee2fe166
|
e6f0152f8ecfc6dbcb6ff677d845e36dbc170a56
|
refs/heads/master
| 2021-01-24T18:39:54.506155
| 2017-02-28T19:55:38
| 2017-03-01T06:12:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,324
|
py
|
# coding: utf-8
# Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
from ...util import formatted_flat_dict
class UpdateSecurityListDetails(object):
def __init__(self):
self.swagger_types = {
'display_name': 'str',
'egress_security_rules': 'list[EgressSecurityRule]',
'ingress_security_rules': 'list[IngressSecurityRule]'
}
self.attribute_map = {
'display_name': 'displayName',
'egress_security_rules': 'egressSecurityRules',
'ingress_security_rules': 'ingressSecurityRules'
}
self._display_name = None
self._egress_security_rules = None
self._ingress_security_rules = None
@property
def display_name(self):
"""
Gets the display_name of this UpdateSecurityListDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
:return: The display_name of this UpdateSecurityListDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this UpdateSecurityListDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
:param display_name: The display_name of this UpdateSecurityListDetails.
:type: str
"""
self._display_name = display_name
@property
def egress_security_rules(self):
"""
Gets the egress_security_rules of this UpdateSecurityListDetails.
Rules for allowing egress IP packets.
:return: The egress_security_rules of this UpdateSecurityListDetails.
:rtype: list[EgressSecurityRule]
"""
return self._egress_security_rules
@egress_security_rules.setter
def egress_security_rules(self, egress_security_rules):
"""
Sets the egress_security_rules of this UpdateSecurityListDetails.
Rules for allowing egress IP packets.
:param egress_security_rules: The egress_security_rules of this UpdateSecurityListDetails.
:type: list[EgressSecurityRule]
"""
self._egress_security_rules = egress_security_rules
@property
def ingress_security_rules(self):
"""
Gets the ingress_security_rules of this UpdateSecurityListDetails.
Rules for allowing ingress IP packets.
:return: The ingress_security_rules of this UpdateSecurityListDetails.
:rtype: list[IngressSecurityRule]
"""
return self._ingress_security_rules
@ingress_security_rules.setter
def ingress_security_rules(self, ingress_security_rules):
"""
Sets the ingress_security_rules of this UpdateSecurityListDetails.
Rules for allowing ingress IP packets.
:param ingress_security_rules: The ingress_security_rules of this UpdateSecurityListDetails.
:type: list[IngressSecurityRule]
"""
self._ingress_security_rules = ingress_security_rules
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
[
"joe.levy@oracle.com"
] |
joe.levy@oracle.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.