blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
213 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
246 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
dc6654c8b29354bae2c8124eca17c8df897b3151
4169f7c1917f6c5176bbe4102ec913c221b03221
/cases/squirrel.py
cbaf0b42768b6995148ea0ca63e2e9db5226c39e
[]
no_license
kolun4ik/hight_sch_prog
e68a92797274a1779493d550965e6a988495ca40
8f7b8281767b06a328d2bbde479f80f8d61f9af5
refs/heads/master
2021-01-05T08:43:22.959298
2020-02-27T17:23:28
2020-02-27T17:23:28
240,959,108
0
0
null
null
null
null
UTF-8
Python
false
false
553
py
#!/usr/bin/env python3 def squirrel(N): """Function return first number of factorial(N)""" def factorial(num): """Recursive calculate factorial N""" if type(num) == int and num >= 0: result = lambda num: result(num - 1) * num if num > 0 else 1 return result(num) else: raise TypeError( "The given value must be positive integer, not %s" % ( type(num) if type(num) != int else "negative")) return int(str(factorial(N))[0])
[ "11_ka@rambler.ru" ]
11_ka@rambler.ru
b767ca2840715076e2f253752baa84e527c57070
48644eee6f07514617d539d4c029988f183bccd4
/source/DB/DB.py
e8e093b5b952d0b144d238dd2aefae948c97d5f3
[]
no_license
maksiplus19/db_coursework_client
ad2895fb5b59d05be0005676c248189b47727582
dd6e1a5e04ff16a70d438ea0cc4eb6382cbd457e
refs/heads/master
2020-09-05T11:21:02.235094
2019-11-10T11:52:58
2019-11-10T11:52:58
220,088,693
0
0
null
null
null
null
UTF-8
Python
false
false
4,366
py
import hashlib from typing import List from PyQt5.QtSql import QSqlDatabase, QSqlQuery, QSqlTableModel from PyQt5.QtWidgets import QMessageBox from source.Show import Show, Episode def get_hash(password: str): return hashlib.new('sha256', bytes(password, 'utf8')).hexdigest() class DB: def __init__(self): self.__db = QSqlDatabase('QODBC3') self.__db.setDatabaseName('DRIVER={SQL Server};SERVER=localhost;DATABASE=SerialTracker;Port=1433') self.__id = 0 self.__only_watching = False if not self.__db.open(): QMessageBox.critical(self, 'Ошибка', 'Не удалось подключиться к базе данных') exit(-2) self.__query = QSqlQuery(self.__db) def authorise(self, login: str, password: str) -> bool: self.__query.exec(f"exec Authorise '{login}', '{get_hash(password)}'") self.__query.next() if self.__query.value(0): self.__id = self.__query.value(0) return self.__id > 0 def get_shows(self) -> List[Show]: shows = [] if not self.__only_watching: self.__query.exec('exec GetAllShows') else: self.__query.exec(f'exec GetWatchingShow {self.__id}') while self.__query.next(): shows.append(Show(show_id=self.__query.value(0), name=self.__query.value(1), year=self.__query.value(2), timing=self.__query.value(3), description=self.__query.value(4), score=self.__query.value(5))) for show in shows: show.my_score = self.get_score(show) return shows def get_episodes(self, show_id: int) -> List[Episode]: episodes = [] self.__query.exec(f'exec GetEpisodes {show_id}') while self.__query.next(): episodes.append(Episode( episode_id=self.__query.value(0), season=self.__query.value(1), series=self.__query.value(2), score=self.__query.value(3), name=self.__query.value(4), description=self.__query.value(5) )) for episode in episodes: episode.my_score = self.get_score(episode) return episodes def set_only_watching(self, status: bool): self.__only_watching = status def episode_score_update(self, data: Episode, value: int): if value is None: self.__query.exec(f'exec ReviewEpisode {self.__id}, {data.id}') else: self.__query.exec(f'exec ReviewEpisode {self.__id}, {data.id}, {value}') def show_score_update(self, data: Show, value: int): if value is None: self.__query.exec(f'exec ReviewShow {self.__id}, {data.show_id}') else: self.__query.exec(f'exec ReviewShow {self.__id}, {data.show_id}, {value}') def get_score(self, data): if type(data) is Episode: self.__query.exec(f'exec GetWatcherEpisodeScore {self.__id}, {data.id}') else: self.__query.exec(f'exec GetWatcherShowScore {self.__id}, {data.show_id}') self.__query.next() return self.__query.value(0) if self.__query.value(0) else None def mark_status_show(self, data: Show): self.__query.exec(f'exec MarkShow {self.__id}, {data.show_id}') def get_watching_show(self): watching_show_id = [] self.__query.exec(f'exec GetWatchingShow {self.__id}') while self.__query.next(): watching_show_id.append(self.__query.value(0)) return watching_show_id def get_watched_episode(self): watched_episode_id = [] self.__query.exec(f'exec GetWatchedEpisode {self.__id}') while self.__query.next(): watched_episode_id.append(self.__query.value(1)) return watched_episode_id def get_stat_model(self): model = QSqlTableModel(None, self.__db) model.setTable('Stat') model.select() return model def registration(self, login: str, password: str): self.__query.exec(f"exec Registration '{login}', '{get_hash(password)}'") self.__query.next() return self.__query.value(0) == 1 def mark_status_episode(self, data): self.__query.exec(f'exec MarkEpisode {self.__id}, {data.id}') def is_logged(self): return self.__id > 0
[ "Niklvitoz1" ]
Niklvitoz1
cad61e5b6b55ee980d989c5409aed0a212af4692
2e45b85f471c2e934b66ac7a6e15706c18c67dd0
/main/urls.py
d587b41cbdde1f22e80c17e6098f0ac20bc0bbf5
[]
no_license
atmaniali/RestFrameWorkTuto
6d6ad59f0894edb9052d2bd0eb8688431373f1ea
3489fa1f7d173da5018926ba3311eb2d7fd9fad3
refs/heads/master
2023-07-18T06:59:34.987217
2021-08-27T13:24:47
2021-08-27T13:24:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
51
py
from django.urls import path urlpatterns = [ ]
[ "atmaniali97@gmail.com" ]
atmaniali97@gmail.com
5282140cec29c877becb1961b4e358217fbf6af4
c18b8b690a97ccc43072a890a28ba0011aa82a52
/algorithm/剑指Offer/Offer06-从尾到头打印链表.py
ee8166ff12c28c11bd6e0c5e05ec21cccfadeb9b
[]
no_license
sudorm1023/job-interview-task
8a6ad070384721e47c1b9737dbf29c4658fadc73
a3f5ab4bb1dc54ebee193978bf9be45c3aef6742
refs/heads/main
2023-08-11T07:18:57.704588
2021-09-12T03:32:54
2021-09-12T03:32:54
404,328,723
0
0
null
null
null
null
UTF-8
Python
false
false
431
py
#!/usr/bin/env python3 from typing import List # Definition for singly-linked list. class ListNode: def __init__(self, x): self.val = x self.next = None class Solution: def reversePrint(self, head: ListNode) -> List[int]: ans = [] if not head: return ans while head: ans.insert(0, head.val) head = head.next return ans
[ "80558239+hackerAlice@users.noreply.github.com" ]
80558239+hackerAlice@users.noreply.github.com
b6f2c338259b8cd9f5e1b6e090f07dd7beb40b72
49c4844fc19c65e29e56cb8d66c97edcd5482988
/python/basics/km_to_miles.py
9962245006b48eb3cd2e268a71f1f492a31e0f42
[]
no_license
Rufito-ruf/training
cac7b4a2f3f9667fd8eea7aa3d2fa2e016c160a8
0b6c5f7909ed719b987b23c6f8f73e5aba8b86fb
refs/heads/main
2023-04-29T11:54:40.280508
2021-05-18T10:55:54
2021-05-18T10:55:54
367,260,567
0
0
null
null
null
null
UTF-8
Python
false
false
116
py
km = float(input("Kilometers: ")) miles = km * 0.62137 print("{0} kilometers equal {1} miles".format(km, miles))
[ "Rufito.ruf@gmail.com" ]
Rufito.ruf@gmail.com
238bfddaa63aa0992234f5a8de11ff5951340c3c
0b8932d66a5143102afa9e19012829a972e2995a
/hrwros/hrwros_week1/scripts/week1_assignment2.py
79ff15343bafa4e734305d248c0248a33551fabe
[]
no_license
ROSDevoloper/DelftX_ROS1x_hrwros
90deb7008f4712e03eb3ab62e532105b2c3e929c
e3423187cc90a121acebedd67a40f5653096804f
refs/heads/master
2023-04-26T01:56:37.273250
2021-05-16T07:10:22
2021-05-16T07:10:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,548
py
#! /usr/bin/env python # Assignment 2 for Week1: In this assignment you will subscribe to the topic that # publishes information on the box height in metres and use the metres_to_feet # service to convert this height in metres to height in feet. import rospy from hrwros_msgs.msg import BoxHeightInformation from hrwros_msgs.srv import ConvertMetresToFeet, ConvertMetresToFeetRequest, ConvertMetresToFeetResponse def box_height_info_callback(data): try: # Create a proxy for the service to convert metres to feet. metres_to_feet = rospy.ServiceProxy('metres_to_feet', ConvertMetresToFeet) # Call the service here. service_response = metres_to_feet(data.box_height) # Write a log message here to print the height of this box in feet. rospy.loginfo("Box height in meter: %f, converted to feet: %f", data.box_height, service_response.distance_feet) except rospy.ServiceException, e: print "Service call failed: %s"%e if __name__ == '__main__': # Initialize the ROS node here. rospy.init_node('box_height_in_feet', anonymous = False) # First wait for the service to become available. rospy.loginfo("Waiting for service...") rospy.wait_for_service('metres_to_feet') rospy.loginfo("Service %s is now available", 'metres_to_feet') # Create a subscriber to the box height topic. rospy.Subscriber('box_height_info', BoxHeightInformation, box_height_info_callback) # Prevent this ROS node from terminating until Ctrl+C is pressed. rospy.spin()
[ "jamie.chen@aeolusbot.com" ]
jamie.chen@aeolusbot.com
348c0a3cf29d7a0c94ef69ca24c2aa07df6e742e
38db17d0795115fae6df48d96a983a606157c5e4
/reagent/workflow/model_managers/model_based/cross_entropy_method.py
6a64c6d17eaf10d77dd20503225aa3cbff10db59
[ "BSD-3-Clause" ]
permissive
OregonWebSells/ReAgent
d1bbfb5716bb6824695ea7eca29259cfde98df5e
866f91785ca86db32fb67744aa063fe77791ff21
refs/heads/master
2022-11-15T06:32:53.355397
2020-07-07T20:08:57
2020-07-07T20:10:30
278,593,186
1
0
BSD-3-Clause
2020-07-10T09:26:16
2020-07-10T09:26:15
null
UTF-8
Python
false
false
5,465
py
#!/usr/bin/env python3 import logging import numpy as np import reagent.types as rlt import torch from reagent.core.dataclasses import dataclass, field from reagent.gym.policies.policy import Policy from reagent.models.cem_planner import CEMPlannerNetwork from reagent.parameters import CEMTrainerParameters, param_hash from reagent.preprocessing.identify_types import CONTINUOUS_ACTION from reagent.preprocessing.normalization import get_num_output_features from reagent.training.cem_trainer import CEMTrainer from reagent.workflow.model_managers.model_based.world_model import WorldModel from reagent.workflow.model_managers.world_model_base import WorldModelBase logger = logging.getLogger(__name__) class CEMPolicy(Policy): def __init__(self, cem_planner_network: CEMPlannerNetwork, discrete_action: bool): self.cem_planner_network = cem_planner_network self.discrete_action = discrete_action def act(self, obs: rlt.FeatureData) -> rlt.ActorOutput: greedy = self.cem_planner_network(obs) if self.discrete_action: _, onehot = greedy return rlt.ActorOutput( action=onehot.unsqueeze(0), log_prob=torch.tensor(0.0) ) else: return rlt.ActorOutput( action=greedy.unsqueeze(0), log_prob=torch.tensor(0.0) ) @dataclass class CrossEntropyMethod(WorldModelBase): __hash__ = param_hash trainer_param: CEMTrainerParameters = field(default_factory=CEMTrainerParameters) def __post_init_post_parse__(self): super().__post_init_post_parse__() # TODO: should this be in base class? def create_policy(self, serving: bool = False) -> Policy: return CEMPolicy(self.cem_planner_network, self.discrete_action) def build_trainer(self) -> CEMTrainer: world_model_manager: WorldModel = WorldModel( trainer_param=self.trainer_param.mdnrnn ) world_model_manager.initialize_trainer( self.use_gpu, self.reward_options, # pyre-fixme[6]: Expected `Dict[str, # reagent.parameters.NormalizationData]` for 3rd param but got # `Optional[typing.Dict[str, reagent.parameters.NormalizationData]]`. # pyre-fixme[6]: Expected `Dict[str, # reagent.parameters.NormalizationData]` for 3rd param but got # `Optional[typing.Dict[str, reagent.parameters.NormalizationData]]`. self._normalization_data_map, ) world_model_trainers = [ world_model_manager.build_trainer() for _ in range(self.trainer_param.num_world_models) ] world_model_nets = [trainer.memory_network for trainer in world_model_trainers] terminal_effective = self.trainer_param.mdnrnn.not_terminal_loss_weight > 0 action_normalization_parameters = ( self.action_normalization_data.dense_normalization_parameters ) sorted_action_norm_vals = list(action_normalization_parameters.values()) discrete_action = sorted_action_norm_vals[0].feature_type != CONTINUOUS_ACTION action_upper_bounds, action_lower_bounds = None, None if not discrete_action: action_upper_bounds = np.array( [v.max_value for v in sorted_action_norm_vals] ) action_lower_bounds = np.array( [v.min_value for v in sorted_action_norm_vals] ) cem_planner_network = CEMPlannerNetwork( mem_net_list=world_model_nets, cem_num_iterations=self.trainer_param.cem_num_iterations, cem_population_size=self.trainer_param.cem_population_size, ensemble_population_size=self.trainer_param.ensemble_population_size, num_elites=self.trainer_param.num_elites, plan_horizon_length=self.trainer_param.plan_horizon_length, state_dim=get_num_output_features( self.state_normalization_data.dense_normalization_parameters ), action_dim=get_num_output_features( self.action_normalization_data.dense_normalization_parameters ), discrete_action=discrete_action, terminal_effective=terminal_effective, gamma=self.trainer_param.rl.gamma, alpha=self.trainer_param.alpha, epsilon=self.trainer_param.epsilon, action_upper_bounds=action_upper_bounds, action_lower_bounds=action_lower_bounds, ) # store for building policy # pyre-fixme[16]: `CrossEntropyMethod` has no attribute `discrete_action`. self.discrete_action = discrete_action # pyre-fixme[16]: `CrossEntropyMethod` has no attribute `cem_planner_network`. self.cem_planner_network = cem_planner_network logger.info( f"Built CEM network with discrete action = {discrete_action}, " f"action_upper_bound={action_upper_bounds}, " f"action_lower_bounds={action_lower_bounds}" ) return CEMTrainer( cem_planner_network=cem_planner_network, world_model_trainers=world_model_trainers, parameters=self.trainer_param, use_gpu=self.use_gpu, ) def build_serving_module(self) -> torch.nn.Module: """ Returns a TorchScript predictor module """ raise NotImplementedError()
[ "facebook-github-bot@users.noreply.github.com" ]
facebook-github-bot@users.noreply.github.com
f6579424ab22a4963654f518a1dc73dd8a3f66cb
3862752149b2d83ff329941f8e4a1a9fb758b94f
/python/fundamentals/insertion_sort.py
be5a04001e05b3d3b873c62110c2751ece6406b9
[]
no_license
g-chance/python_stack
847f4b62208d8f3094d01518d12d8dea9805e0ae
b3bd9c0beae2cedfbd0b4052ec6ae7e2b5c8c7ef
refs/heads/main
2022-12-26T04:09:09.671484
2020-10-15T19:56:47
2020-10-15T19:56:47
304,432,982
0
0
null
null
null
null
UTF-8
Python
false
false
389
py
def insertionSort(myList): for i in range(0,len(myList)): min = myList[i] newminindex = -1 for j in range(i,-1,-1): if myList[j] > min: newminindex = j if newminindex != -1: temp = myList[i] myList.pop(i) myList.insert(newminindex,temp) return myList print(insertionSort([2,3,4,5,5]))
[ "ge.chance@gmail.com" ]
ge.chance@gmail.com
64061a6517b985e5687fd1c9a8a8932facf5b93b
b32c6fb15b01afb9dae3ae6be9d5bf50d7f427ed
/Meta-RL/test.py
e6103d67a74fb15d8b6318d3964443e8ee3ffa40
[]
no_license
harshp96/Project_RL
73b4416684d2b6b067f806552834e756cbc54d05
23adf5918968ca04a3333b01a2180d84eefa7138
refs/heads/master
2022-07-05T01:00:48.259854
2020-05-15T20:42:43
2020-05-15T20:42:43
261,724,748
0
0
null
2020-05-06T10:23:57
2020-05-06T10:23:56
null
UTF-8
Python
false
false
835
py
import numpy as np import os import time import gym from collections import deque import tensorflow as tf import tensorflow.keras as K from Agent import * import sys ENV_NAME = sys.argv[1] TRAIN_ITERATIONS = 5000 MAX_EPISODE_LENGTH = 1000 TRAJECTORY_BUFFER_SIZE = 32 BATCH_SIZE = 16 RENDER_EVERY = 1 WEIGHT_FILE = sys.argv[2] env = gym.make(ENV_NAME) agent = Agent(env.action_space.n,env.observation_space.shape,BATCH_SIZE) samples_filled = 0 agent.actor_network.load_weights(WEIGHT_FILE) for i in range(15): episode_reward = 0 state = env.reset() while True: action = agent.choose_action(state) env.render() state, reward, done, _ = env.step(action) if done: break episode_reward += reward print('Episodes:', i, 'Episodic_Reweard:', episode_reward) env.close()
[ "vinay.rp.36@gmail.com" ]
vinay.rp.36@gmail.com
f71899c4f0598fbd6fc8c252ba0db5ffe919aea3
fed7697f8be852f836240215ae732fdeb85bde6f
/model/PhoneticModel.py
79065e1f8a97339cff21cb5d07cc160dc6412e06
[]
no_license
luckyBoy93/bar_poem
1243c45f4448e94746f5d14717ae10a37d94dfa5
0dc533383a1c45771a6016d2235c9afc387be37a
refs/heads/master
2020-01-23T21:18:03.661921
2016-11-23T10:50:54
2016-11-23T10:50:54
74,567,478
0
0
null
null
null
null
UTF-8
Python
false
false
1,923
py
# -*- coding: utf-8 -*- class PhoneticModel(object): vowels_sound = [u'и', u'ы', u'у', u'э', u'о', u'а'] vowels_special = [u"я", u"е", u"ё", u"ю"] vowels = [u'и', u'ы', u'у', u'э', u'о', u'а', u"я", u"е", u"ё", u"ю"] consonants_sound = [u'б', u'в', u'г', u'д', u'з', u'к', u'л', u'м', u'н', u'п', u'р', u'с', u'т', u'ф', u'х', u'ж', u'ш', u'ц', u'ч', u'й'] # voiced_paired = ['б', 'в', 'г', 'д', 'ж', 'з'] voiced_paired = [u'б', u'в', u'г', u'д', u'ж', u'з'] # clunk_paired = ['п', 'ф', 'к', 'т', 'ш', 'с'] clunk_paired = [u'п', u'ф', u'к', u'т', u'ш', u'с'] #original_word #new_ending def __init__(self, word, accent=255): self.original_word = word def get_ending(self): #only last two chars last = self.original_word[-1:] # print(self.original_word, 'я') penultimate = self.original_word[-2:-1] # print(penultimate, last) #penultimate char if penultimate in self.vowels_special: if penultimate == u'я': penultimate = u'а' elif penultimate == u'е': penultimate = u'э' elif penultimate == u'ё': penultimate = u'о' elif penultimate == u'ю': penultimate = u'у' if last in self.voiced_paired: ind = self.voiced_paired.index(last) last = self.clunk_paired[ind] self.new_ending = penultimate + last return self.new_ending #check single vowel and vowels absence @staticmethod def check_single_accent(w): number_of_vowels = 0 for el in PhoneticModel.vowels: if el in w: number_of_vowels+=1 if number_of_vowels == 0: return -1 if number_of_vowels == 1: return 0; return 255
[ "matveysodboev@gmal.com" ]
matveysodboev@gmal.com
37f730abf76b5898a30f9f8b016882fccd6ab098
6dc543432268082fb86b27f1911530db2e1e9961
/lesson9_step2.py
ed101d6fa553de5fd3990a09da83488a05d9a691
[]
no_license
VorobevaViktoria/stepik--auto-tests-course
c56d277088b26d055c0924fcd5d5814051f06217
f0cdc5fd1b855da0c3131a83431c17b3e8e35796
refs/heads/main
2023-05-28T07:42:36.841079
2021-06-21T16:42:33
2021-06-21T16:42:33
378,983,425
0
0
null
null
null
null
UTF-8
Python
false
false
878
py
from selenium import webdriver import time import math try: link = "http://suninjuly.github.io/redirect_accept.html" browser = webdriver.Chrome() browser.get(link) button = browser.find_element_by_css_selector("button") button.click() browser.switch_to_window(browser.window_handles[1]) x = browser.find_element_by_id('input_value').text print(x) reg = str(math.log(abs(12*math.sin(int(x))))) ans = browser.find_element_by_id('answer').send_keys(reg) button = browser.find_element_by_css_selector("button") button.click() finally: # ожидание чтобы визуально оценить результаты прохождения скрипта time.sleep(15) # закрываем браузер после всех манипуляций browser.quit()
[ "vivoro.696@gmail.com" ]
vivoro.696@gmail.com
0c29784495c76d2827c28b6d5c88023bfda7ec39
facb8b9155a569b09ba66aefc22564a5bf9cd319
/wp2/merra_scripts/02_preprocessing/merraLagScripts/575-tideGauge.py
b6c542ea053416d9f5768343d1851f8784ac2689
[]
no_license
moinabyssinia/modeling-global-storm-surges
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
6e385b2a5f0867df8ceabd155e17ba876779c1bd
refs/heads/master
2023-06-09T00:40:39.319465
2021-06-25T21:00:44
2021-06-25T21:00:44
229,080,191
0
0
null
null
null
null
UTF-8
Python
false
false
3,772
py
# -*- coding: utf-8 -*- """ Created on Tue Mar 31 17:12:23 2020 **************************************************** Load predictors & predictands + predictor importance **************************************************** @author: Michael Tadesse """ #import packages import os import pandas as pd import datetime as dt #used for timedelta from datetime import datetime #define directories # dir_name = 'F:\\01_erainterim\\03_eraint_lagged_predictors\\eraint_D3' dir_in = "/lustre/fs0/home/mtadesse/merraAllCombined" dir_out = "/lustre/fs0/home/mtadesse/merraAllLagged" def lag(): os.chdir(dir_in) #get names tg_list_name = sorted(os.listdir()) x = 575 y = 576 for tg in range(x, y): os.chdir(dir_in) tg_name = tg_list_name[tg] print(tg_name, '\n') pred = pd.read_csv(tg_name) #create a daily time series - date_range #get only the ymd of the start and end times start_time = pred['date'][0].split(' ')[0] end_time = pred['date'].iloc[-1].split(' ')[0] print(start_time, ' - ', end_time, '\n') date_range = pd.date_range(start_time, end_time, freq = 'D') #defining time changing lambda functions time_str = lambda x: str(x) time_converted_str = pd.DataFrame(map(time_str, date_range), columns = ['date']) time_converted_stamp = pd.DataFrame(date_range, columns = ['timestamp']) """ first prepare the six time lagging dataframes then use the merge function to merge the original predictor with the lagging dataframes """ #prepare lagged time series for time only #note here that since MERRA has 3hrly data #the lag_hrs is increased from 6(eraint) to 31(MERRA) time_lagged = pd.DataFrame() lag_hrs = list(range(0, 31)) for lag in lag_hrs: lag_name = 'lag'+str(lag) lam_delta = lambda x: str(x - dt.timedelta(hours = lag)) lag_new = pd.DataFrame(map(lam_delta, time_converted_stamp['timestamp']), \ columns = [lag_name]) time_lagged = pd.concat([time_lagged, lag_new], axis = 1) #datafrmae that contains all lagged time series (just time) time_all = pd.concat([time_converted_str, time_lagged], axis = 1) pred_lagged = pd.DataFrame() for ii in range(1,time_all.shape[1]): #to loop through the lagged time series print(time_all.columns[ii]) #extracting corresponding tag time series lag_ts = pd.DataFrame(time_all.iloc[:,ii]) lag_ts.columns = ['date'] #merge the selected tlagged time with the predictor on = "date" pred_new = pd.merge(pred, lag_ts, on = ['date'], how = 'right') pred_new.drop('Unnamed: 0', axis = 1, inplace = True) #sometimes nan values go to the bottom of the dataframe #sort df by date -> reset the index -> remove old index pred_new.sort_values(by = 'date', inplace=True) pred_new.reset_index(inplace=True) pred_new.drop('index', axis = 1, inplace= True) #concatenate lagged dataframe if ii == 1: pred_lagged = pred_new else: pred_lagged = pd.concat([pred_lagged, pred_new.iloc[:,1:]], axis = 1) #cd to saving directory os.chdir(dir_out) pred_lagged.to_csv(tg_name) os.chdir(dir_in) #run script lag()
[ "michaelg.tadesse@gmail.com" ]
michaelg.tadesse@gmail.com
e1de3d2c58fc1b2abff7cc9a52e569f48ba4e842
d90d6cc85a4dad85bb2ba0e2637ee6acc0289d8a
/collab/migrations/0001_initial.py
5c8dd4f30d7f4980564cd615a74a2357a8565faf
[]
no_license
bo-boka/tech-collab
181607c0a64a50b406e17691b76a5660b7f93a60
b06dde37ca24f60050bee1f48a02ac8e5bf2b3c0
refs/heads/master
2023-08-18T10:34:24.753240
2021-10-22T04:18:35
2021-10-22T04:18:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,205
py
# Generated by Django 3.2.5 on 2021-10-12 20:43 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone import taggit.managers class Migration(migrations.Migration): initial = True dependencies = [ ('taggit', '0003_taggeditem_add_unique_index'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Collab', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ], ), migrations.CreateModel( name='Match', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('rank', models.PositiveIntegerField(blank=True, null=True)), ], ), migrations.CreateModel( name='Project', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=500)), ('date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Date')), ('description', models.TextField(blank=True, max_length=1000)), ('city', models.CharField(max_length=50)), ('archived', models.BooleanField(default=False, verbose_name='Archive')), ('collaborators', models.ManyToManyField(related_name='collaborators', through='collab.Collab', to=settings.AUTH_USER_MODEL)), ('founder', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('matches', models.ManyToManyField(related_name='matches', through='collab.Match', to=settings.AUTH_USER_MODEL)), ('skills_needed', taggit.managers.TaggableManager(help_text='A comma-separated list of skills', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Skills Needed')), ], ), migrations.CreateModel( name='Technology', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50, unique=True)), ], ), migrations.CreateModel( name='SocialProj', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('platform', models.CharField(choices=[('ps', 'fa fa-globe fa-3x'), ('gh', 'fa fa-github fa-3x'), ('li', 'fa fa-linkedin fa-3x'), ('tw', 'fa fa-twitter fa-3x'), ('tl', 'fa fa-trello fa-3x'), ('fb', 'fa fa-facebook-square fa-3x'), ('ig', 'fa fa-instagram fa-3x'), ('yt', 'fa fa-youtube fa-3x')], default='gh', max_length=2)), ('url', models.URLField(blank=True, null=True)), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='collab.project')), ], ), migrations.AddField( model_name='project', name='technologies', field=models.ManyToManyField(to='collab.Technology'), ), migrations.AddField( model_name='match', name='project', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='collab.project'), ), migrations.AddField( model_name='match', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='collab', name='project', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='collab.project'), ), migrations.AddField( model_name='collab', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), ]
[ "boka.sarah@gmail.com" ]
boka.sarah@gmail.com
b6605148601741c129622e449e20884a81e6f769
ef7a5e1445706482a0e20d2632f6cd3d0e279031
/amy/extrequests/migrations/0009_auto_20190811_0611.py
433fd03ac13af69f58a2cd1789b62ec40bdc378f
[ "MIT" ]
permissive
pbanaszkiewicz/amy
7bf054463f4ecfa217cc9e52a7927d22d32bcd84
f97631b2f3dd8e8f502e90bdb04dd72f048d4837
refs/heads/develop
2022-11-17T18:56:18.975192
2022-11-03T23:19:41
2022-11-03T23:19:41
28,005,098
0
3
MIT
2018-03-20T18:48:55
2014-12-14T19:25:22
Python
UTF-8
Python
false
false
918
py
# Generated by Django 2.1.7 on 2019-08-11 06:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('extrequests', '0008_auto_20190809_1004'), ] operations = [ migrations.AlterField( model_name='selforganizedsubmission', name='public_event', field=models.CharField(choices=[('invite', 'This event is open to learners by invitation only.'), ('closed', 'This event is open to learners inside of my institution.'), ('public', 'This event is open to learners outside of my institution.'), ('other', 'Other:')], default='', help_text='Many of our workshops restrict registration to learners from the hosting institution. If your workshop will be open to registrants outside of your institution please let us know below.', max_length=20, verbose_name='Is this workshop open to the public?'), ), ]
[ "piotr@banaszkiewicz.org" ]
piotr@banaszkiewicz.org
b0cccf46779814b4c6e76577118d9a1e6946bfbb
e2cef159954890d47f121b3a55a39f9f419db29b
/backend/datatypes/exceptions/data.py
3a63ac113f46670da7aa948a2edc3ff380f395e7
[]
no_license
lzw1012/Quantative-Trading-Strategy-Platform
02b6ef565b6adde5a8e7772af62559bc8b4a4304
b1feedb2636abdecb12840c6c048b21ed104dd20
refs/heads/master
2022-12-07T16:13:09.409933
2020-09-06T01:31:14
2020-09-06T01:31:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
225
py
class DataRetrievalException(Exception): def __init__(self, message: str, error: Exception): super().__init__(message) self.raw_errors = error def get_raw_error(self): return self.raw_errors
[ "bennyhwanggggg@users.noreply.github.com" ]
bennyhwanggggg@users.noreply.github.com
05127bab5e7cd5aa5e5c401e742b3599156bec9d
f07a42f652f46106dee4749277d41c302e2b7406
/Data Set/bug-fixing-2/c5d3454cb2cfc1eec30d29e2caa92e9c4e0409a5-<_create_new_token>-fix.py
3c6067738e66f81caaf8a9285d6cae4afb3dee19
[]
no_license
wsgan001/PyFPattern
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
cc347e32745f99c0cd95e79a18ddacc4574d7faa
refs/heads/main
2023-08-25T23:48:26.112133
2021-10-23T14:11:22
2021-10-23T14:11:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
241
py
def _create_new_token(self): token = ApiToken.objects.create(user=self.user, application=self.application, scope_list=self.sentry_app.scope_list, expires_at=token_expiration()) self.install.update(api_token=token) return token
[ "dg1732004@smail.nju.edu.cn" ]
dg1732004@smail.nju.edu.cn
99601b2bd2f842e0cef8328fd3aff516c46ff2f5
6569bebdb6ff518bf93f925f23801606ff9082a2
/oreilly/python2/examples/chapter13/minmax.py
36973c0c7f1b09a9d67f50f650928b28431d0853
[]
no_license
qiupq/LearnPython
bede6346f46d6669907867f13b8ce25c86254cde
f7f072d5f92d8135f1318a7a20fcffe3caf730ec
refs/heads/master
2020-06-12T07:59:52.815020
2019-07-24T09:29:56
2019-07-24T09:29:56
194,238,183
0
0
null
null
null
null
UTF-8
Python
false
false
317
py
def minmax(test, *args): res = args[0] for arg in args[1:]: if test(arg, res): res = arg return res def lessthan(x, y): return x < y # see also: lambda def grtrthan(x, y): return x > y print minmax(lessthan, 4, 2, 1, 5, 6, 3) print minmax(grtrthan, 4, 2, 1, 5, 6, 3)
[ "qq@163.com" ]
qq@163.com
a7a2c63f1cc5696f3d2e59c1b82e802b6dc26aa4
8f4663bde3e89ed7a08c82018306d8005c3ed2dc
/Moduller/Soru_1/modulCiro.py
d76d1b4e727cc9d4c8c799cf3d15055479c2d4cb
[]
no_license
hukumrancelik/Proglamlama
73016890ddac94fe8bb316b58a11e2212c4bb997
bdcda07ebb32f22ddb1b75ffcf82b550ed604f63
refs/heads/master
2021-01-25T14:26:37.340072
2018-04-26T18:32:14
2018-04-26T18:32:14
123,696,854
0
0
null
null
null
null
UTF-8
Python
false
false
381
py
def kar (gelir,gider): global isletmeKar isletmeKar=gelir-gider return isletmeKar def ciro (tc,tcs): global adamCiro adamCiro=tc/tcs return adamCiro gelir=int(input("Geliri Giriniz:")) gider=int(input("Gideri Giriniz:")) tc=int(input("Toplam Ciro:")) tcs=int(input("Toplam Çalışan Sayısı:")) kar (gelir,gider) ciro (tc,tcs)
[ "noreply@github.com" ]
hukumrancelik.noreply@github.com
8d00a5d3590acea8d2d79962351af52512841d1a
69cb4546efba1f081a949821d62b97a0b3c158f9
/Traveling-Salesman-Problem/Python_regulating_81.py
7730ce7417f3c4711b0bf1feed0bc5b513fa2716
[]
no_license
bikashkundu-99/projects
4311878b7fa516c60d59eb457066ce0010b09a4b
67e377e5409872ca7e12d3f98e0ffb55f331c743
refs/heads/main
2023-01-06T06:55:17.931293
2020-11-05T11:54:34
2020-11-05T11:54:34
310,283,550
1
0
null
null
null
null
UTF-8
Python
false
false
2,505
py
#Importing libraries import pandas as pd import numpy as np import scipy as sci import seaborn as sns import matplotlib.pyplot as plt #%matplotlib inline import os, sys print(os.getcwd()) # current working directory output_file = pd.read_csv('OutputCities.csv',sep=';',header=0) distance = pd.read_csv("TR81_KGM_csv.csv",sep=';') distance.index = distance['IL ADI'] del distance['IL NO'] del distance['IL ADI'] del distance['IL ADI.1'] del distance['IL NO.1'] output_file['starting'] = output_file['A'].map(lambda x: str(x).split(',')[1]) output_file['going'] = output_file['starting'].map(lambda x: int(str(x)[:-1])) cities_list = list(distance.columns.values) cities_series = pd.Series(cities_list) cities_number = list(range(82)) del cities_number[0] output_file['leaving'] = output_file['A'].map(lambda x: int(str(x).split(',')[0][2:])) output_file.index = output_file.index + 1 route = [] starting_destination=51 route.append(starting_destination) for i in range(80): route.append(output_file['going'][starting_destination]) starting_destination = output_file['going'][starting_destination] city_centers = pd.read_csv('city_centers.csv',sep=';',encoding='cp857') city_centers = city_centers[['A','B','C']] city_centers['A'] = cities_series labels = cities_list data = city_centers[['B','C']].values city_centers['A'] = pd.Series(cities_number) city_centers.index = city_centers.index + 1 city_centers['B'] = city_centers['B'].map(lambda x: round(x,3)) city_centers['C'] = city_centers['C'].map(lambda x: round(x,3)) latitude=[] longitude=[] for i in route: latitude.append(city_centers['B'][i]) longitude.append(city_centers['C'][i]) #Adding starting point to the end for convenience latitude.append(latitude[0]) longitude.append(longitude[0]) dict1={'lat':latitude,'lon':longitude} df1 = pd.DataFrame(dict1) plt.figure(figsize=(23,10)) plt.plot(list(df1['lon']),list(df1['lat']),'o-',color='red',label=labels) plt.ylabel("Latitude",size=20) plt.xlabel("Longitude",size=20) plt.title('Starting from Nigde ending at Nevsehir') for label, x, y in zip(labels, data[:, 1], data[:, 0]): plt.annotate( label, xy=(x, y), xytext=(-20, 20), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0')) plt.show()
[ "noreply@github.com" ]
bikashkundu-99.noreply@github.com
99b4413a055447c8b32e78f741812715f4083232
9385dc6ce2dfcef6ea2a87afa2251df7f930f1ab
/src/disparity.py
3c8b41226220e20fb7595518d504b6eee7e86440
[ "MIT" ]
permissive
andyfangdz/turtlebot_tracking
1a9635f200c47eec49500af8652013b97d8c2227
eb1a4411262f2d7fc2da3f4a5a32cc2067141b99
refs/heads/master
2020-12-31T04:42:46.972423
2016-04-27T21:01:57
2016-04-27T21:01:57
57,240,200
0
0
null
null
null
null
UTF-8
Python
false
false
2,194
py
import numpy as np import cv2 from matplotlib import pyplot as plt import video #imgL = cv2.imread('tsukuba_l.png',0) #imgR = cv2.imread('tsukuba_r.png',0) #!/usr/bin/env python import numpy as np import cv2 ply_header = '''ply format ascii 1.0 element vertex %(vert_num)d property float x property float y property float z property uchar red property uchar green property uchar blue end_header ''' def write_ply(fn, verts, colors): verts = verts.reshape(-1, 3) colors = colors.reshape(-1, 3) verts = np.hstack([verts, colors]) with open(fn, 'w') as f: f.write(ply_header % dict(vert_num=len(verts))) np.savetxt(f, verts, '%f %f %f %d %d %d') if __name__ == '__main__': print 'loading images...' camL = video.create_capture(1) camR = video.create_capture(0) _,imgL = camL.read() _,imgR = camR.read() # disparity range is tuned for 'aloe' image pair window_size = 3 min_disp = 16 num_disp = 112-min_disp stereo = cv2.StereoSGBM(minDisparity = min_disp, numDisparities = num_disp, SADWindowSize = window_size, uniquenessRatio = 10, speckleWindowSize = 100, speckleRange = 32, disp12MaxDiff = 1, P1 = 8*3*window_size**2, P2 = 32*3*window_size**2, fullDP = False ) print 'computing disparity...' disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0 print 'generating 3d point cloud...', h, w = imgL.shape[:2] f = 0.8*w # guess for focal length Q = np.float32([[1, 0, 0, -0.5*w], [0,-1, 0, 0.5*h], # turn points 180 deg around x-axis, [0, 0, 0, -f], # so that y-axis looks up [0, 0, 1, 0]]) points = cv2.reprojectImageTo3D(disp, Q) colors = cv2.cvtColor(imgL, cv2.COLOR_BGR2RGB) mask = disp > disp.min() out_points = points[mask] out_colors = colors[mask] out_fn = 'out.ply' write_ply('out.ply', out_points, out_colors) print '%s saved' % 'out.ply' cv2.imshow('left', imgL) cv2.imshow('disparity', (disp-min_disp)/num_disp) cv2.waitKey() cv2.destroyAllWindows()
[ "turtlebot@turtlebot.(none)" ]
turtlebot@turtlebot.(none)
fa3e6c8bef1e11080fd9322682808e0380829d0e
a3fbd9e063ef8491d0432caefdd9da9d810350e0
/LightHouse_Simulation_2.py
2d0a1424de12eda690bfb3c1bb1ffe21be8e6e21
[]
no_license
unJASON/EKFLocolization
0505970a61756621abb7118720dcba54a65cf2d3
fb1e6a45efe95aa81f850973ad50e3d99e389258
refs/heads/master
2022-09-05T20:04:28.760407
2020-06-01T11:51:34
2020-06-01T11:51:34
261,731,735
0
0
null
2020-05-06T10:58:49
2020-05-06T10:58:48
null
UTF-8
Python
false
false
5,970
py
import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation import mpl_toolkits.mplot3d.axes3d as p3 from light_house_create import dataCreate from SwarmEKF2 import swarmEKF show_animation = True np.random.seed(16) # seed the random number generator for reproducibility border = {"xmin":-8, "xmax":8, "ymin":-8, "ymax":8,"zmin":0,"zmax":5} numRob = 10 # number of robots lighthouse_Idx=[0,1] dimension = 2 dt = 0.01 # time interval [s] simTime = 70.0 # simulation time [s] maxVel = 2 # maximum velocity [m/s] devInput = np.array([[0.25, 0.25, 0.01]]).T # input deviation in simulation, Vx[m/s], Vy[m/s], yawRate[rad/s] devObser = 0.1 # observation deviation of distance[m] ekfStride = 1 # update interval of EKF is simStride*0.01[s] #初始化 xTrue = np.random.uniform(-5, 5, (dimension, numRob)) # random initial groundTruth of state [x, y, yaw]' of numRob robots relativeState = np.zeros((3, numRob, numRob)) # [x_ij, y_ij, yaw_ij]' of the second robot in the first robot's view data = dataCreate(numRob, border, maxVel, dt, devInput, devObser,dimension=dimension) #Create input data such as velocities, yaw rates, distances... estiEKF = swarmEKF(10, 0.1, 0.25, 0.4, 0.1, numRob,dt=dt,dimension=dimension,lighthouse_Idx=lighthouse_Idx) xTrue[:,0] = [0,0] xTrue[:,1] = [1,1] xEsti = np.random.uniform(-5, 5,(dimension*2, numRob)) # xEsti[dimension:,:] = 0 # xEsti[:dimension,:] = xTrue def goRec(u,step): cycle = 1500 vel = -1 if step % cycle < cycle * 0.25: u[:, 1] = [vel, 0] elif step % cycle < cycle * 0.5: u[:, 1] = [0, vel] elif step % cycle < cycle * 0.75: u[:, 1] = [-vel, 0] else: u[:, 1] = [0, -vel] u[:, 0] = -u[:, 1] # u[:, 0] = 0 # u[:, 1] = 0 # u[:, 2] = 0 u[:,2:] = 0 return u def animate(step): global xTrue, relativeState, xEsti,estiEKF,Pmatrix u = data.calcInput_FlyIn1m(step) # u = goRec(u,step) xTrue, zNois, uNois = data.update(xTrue, u) if step % ekfStride == 0: #生成一个随机排列 然后作为后者的某个列表加入 # permu = np.random.permutation(numRob).tolist() # for ele in lighthouse_Idx: # permu.remove(ele) # permu = lighthouse_Idx + permu reference_list = [] for i in range(numRob): xEsti = estiEKF.CovEKF(uNois, zNois, xEsti, xTrue, ekfStride, i) # xEsti = estiEKF.CovEKF2(uNois, zNois, xEsti, xTrue, ekfStride, i, reference_list) # reference_list.append(i) # print(i, estiEKF.Pmatrix[i, 0, 0],estiEKF.Pmatrix[i, 1, 1],estiEKF.Pmatrix[i, 2, 2],estiEKF.Pmatrix[i, 3, 3]) # print(i,"true:", xTrue[:,i], "est:",xEsti[0:2, i]," ",xEsti[0+dimension:2+dimension,i]) pointsTrue.set_data(xTrue[0, :], xTrue[1, :]) # plot groundTruth points pointsEsti.set_data(xEsti[0, :]+xEsti[0+dimension,:], xEsti[1, :]+xEsti[1+dimension,:]) # plot estimated points circle.center = (xTrue[0, 0], xTrue[1, 0]) circle.radius = zNois[0, 1] # plot a circle to show the distance between robot 0 and robot 1 time_text.set_text("t={:.2f}s".format(step * dt)) return pointsTrue, pointsEsti, circle, pointsTrueHead, pointsEstiHead, time_text def animate3D(step): global xTrue, relativeState, xEsti u = data.calcInput_FlyIn1m(step) xTrue, zNois, uNois = data.update(xTrue, u) if step % ekfStride == 0: for i in range(numRob): # xEsti = estiEKF.EKF(uNois, zNois, xEsti,xTrue, ekfStride, i) xEsti = estiEKF.CovEKF(uNois, zNois, xEsti, xTrue, ekfStride, i) # xEsti = estiEKF.anchorEKF(uNois, zNois, xEsti, xTrue, ekfStride, i) pointsTrue = ax.scatter(xTrue[0, :], xTrue[1, :],xTrue[2,:], c="b") # plot groundTruth points pointsEsti = ax.scatter(xEsti[0, :], xEsti[1, :],xEsti[2,:], c="r") # plot estimated points return pointsTrue, pointsEsti if show_animation: if dimension == 2: # Set up an animation fig = plt.figure() ax = fig.add_subplot(111, aspect='equal') ax.set(xlim=(border["xmin"], border["xmax"]), ylim=(border["ymin"], border["ymax"])) ax.set_xlabel('X (m)') ax.set_ylabel('Y (m)') title = ax.set_title('Simulated swarm') pointsTrue, = ax.plot([], [], linestyle="", marker="o", color="b", label="GroundTruth") pointsEsti, = ax.plot([], [], linestyle="", marker="o", color="r", label="Relative EKF") pointsTrueHead, = ax.plot([], [], linestyle="", marker=".", color="g") pointsEstiHead, = ax.plot([], [], linestyle="", marker=".", color="g") ax.legend() circle = plt.Circle((0, 0), 0.2, color='black', fill=False) ax.add_patch(circle) rectangle = plt.Rectangle((-5,-5),width=10,color='pink',height=8,fill=False) ax.add_patch(rectangle) time_text = ax.text(0.01, 0.97, '', transform=ax.transAxes) time_text.set_text('') #ani.save('particle_box.mp4', fps=30, extra_args=['-vcodec', 'libx264']) ani = animation.FuncAnimation(fig, animate,init_func=None, frames=None, interval=1, blit=True) plt.show() elif dimension == 3: fig = plt.figure() ax = p3.Axes3D(fig) ax.set_xlim3d([border["xmin"], border["xmax"]]) ax.set_xlabel('X') ax.set_ylim3d([border["ymin"], border["ymax"]]) ax.set_ylabel('Y') ax.set_zlim3d([border["zmin"], border["zmax"]]) ax.set_zlabel('Z') ax.set_title('Simulated swarm') # pointsTrueHead, = ax.plot([], [], linestyle="", marker=".", color="g") # pointsEstiHead, = ax.plot([], [], linestyle="", marker=".", color="g") ax.legend() # time_text = ax.text(0.01, 0.97, '', transform=ax.transAxes) # time_text.set_text('') ani = animation.FuncAnimation(fig, animate3D, frames=None, interval=100, blit=True) plt.show() else: pass else: pass
[ "2693017973@qq.com" ]
2693017973@qq.com
fbe692e5e7973fbeafde0b6f60f935a1d502ce4f
e204428f6f7dad0ff48eee1192f99db6650c5b75
/desafios/ex050.py
d65316e6f23f307518a6b98a8dc382bbb1b3378c
[]
no_license
jhonatarios/curso-em-video-python
9375054db0bdd798fb1e36377f9bda9c294f0724
2057114bef8522dbddca602e091df1e6333d29f9
refs/heads/main
2023-03-21T19:03:46.897730
2021-03-15T22:05:33
2021-03-15T22:05:33
322,444,229
1
0
null
null
null
null
UTF-8
Python
false
false
195
py
s = 0 ct = 0 for c in range(1, 7): num = int(input("Digite um numero: ")) if num % 2 == 0: s += num ct += 1 print(f"A soma dos {ct} numeros pares sao de {s}")
[ "noreply@github.com" ]
jhonatarios.noreply@github.com
622bd96bfeec39eee2c82f831a0cca205e65261e
7d84f1af14f8c30eeaa07d5f36542d93e03a4097
/module2/shelve_test.py
5906662ed5b18a66a6a55655be4574666f9f92ec
[]
no_license
crazy-heng/study
5ab50a73c4ce51593e493e8ecee2e591d2200a9e
034f7a5d63f278a4ac4678ed18ee63413a102757
refs/heads/master
2020-04-01T01:26:07.564735
2018-09-17T16:01:47
2018-09-17T16:01:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
298
py
#!/usr/bin/env python # -*- coding: utf-8 -*- import shelve f = shelve.open("shelve_test") # 打开一个文件存多个pickle序列化 names = ["alex", "rain", "test"] info = {'name': 'alex', 'age': 22} f["names"] = names # 持久化列表 f['info_dic'] = info print(f["names"]) f.close()
[ "fanhk@126.com" ]
fanhk@126.com
5337e58e084f5e16d09483a58c895265af1990b4
de658b734a22f7ea4294fab96706f3631193d6d4
/week4/tower_of_hanoi.py
b0e179ef11d3955074445317d4802bc86784249e
[]
no_license
chenchienlin/Algorithmic-Toolbox
ed89f638ddfb31c6848ab1329d1e945efb66db15
6a534f2e03691239b654112413f47cfa04228ad5
refs/heads/main
2023-08-17T13:24:36.316338
2021-10-12T14:03:45
2021-10-12T14:03:45
376,751,097
0
0
null
null
null
null
UTF-8
Python
false
false
340
py
import logging logging.basicConfig(level=logging.DEBUG) LOGGER = logging.getLogger() def tower_of_hanoi(n, src, dist, temp): if n > 0: tower_of_hanoi(n-1, src, temp, dist) LOGGER.info(f'Moved disk {n} from {src} to {dist}') tower_of_hanoi(n-1, temp, dist, src) tower_of_hanoi(n=3, src='A', dist='C', temp='B')
[ "x91003502@gmail.com" ]
x91003502@gmail.com
6cbffd5154a78f534ce188a06d305f6500d1f83c
c9e95974e3f3320f2da36ba23403d46e00ac884d
/projects/mars/model_classes/StarshipUnfilteredWaterStorage.py
522b7245bb1ae90e4fac5ff9605087164a02c68d
[ "MIT" ]
permissive
ModelFlow/modelflow
877ff8d80ab2987b0572bebcf3753ae0942a5ae2
c2b720b2da8bb17462baff5c00bbe942644474b0
refs/heads/master
2023-07-12T17:22:49.540043
2021-08-26T03:51:26
2021-08-26T03:51:26
280,748,869
8
0
MIT
2021-08-18T19:48:57
2020-07-18T22:17:52
Python
UTF-8
Python
false
false
529
py
class StarshipUnfilteredWaterStorage: name = "StarshipUnfilteredWaterStorage" params = [] states = [ { "key": "unfiltered_water", "label": "", "units": "kg", "private": False, "value": 0, "confidence": 0, "notes": "", "source": "" } ] @staticmethod def run_step(states, params, utils): if states.unfiltered_water < 0: utils.terminate_sim_with_error("unfiltered_water < 0")
[ "1890491+adamraudonis@users.noreply.github.com" ]
1890491+adamraudonis@users.noreply.github.com
54b8b389d5f08e4c1a30532f1f7ececf6b4eb42c
20e7b2997f4dd90ec4cea01124d70ee5bcf9b58f
/server/prodeagle/counter.py
111cf99e0bfdfb5150c404631c55a58361f41a50
[]
no_license
baden/gps-maps27
77f87738984d3c711697bdbb380eeb039a1096eb
defe2925e6b35da5df6b546fd15e645d10d1a5b4
refs/heads/master
2021-03-12T23:52:22.735079
2014-01-29T12:31:20
2014-01-29T12:31:20
32,263,820
0
0
null
null
null
null
UTF-8
Python
false
false
2,232
py
#!/usr/bin/env python # # Copyright 2011 MiuMeet AG. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from google.appengine.api import memcache from prodeagle import counter_names import logging SAVE_PRODEAGLE_STATS = True def incr(name, delta=1, save_stats=SAVE_PRODEAGLE_STATS): if delta: incrBatch({ name : delta }, save_stats) class Batch(): def __init__(self): self.pending = {} def incr(self, name, delta=1): if delta: self.pending[name] = self.pending.get(name, 0) + delta def commit(self, save_stats=SAVE_PRODEAGLE_STATS): if self.pending: incrBatch(self.pending, save_stats) self.pending = {} def incrBatch(counters, save_stats=SAVE_PRODEAGLE_STATS): try: cnm = counter_names.getDefaultCounterNamesManager() slot = counter_names.getEpochRounded() existing = memcache.offset_multi(counters, namespace=cnm.namespace, key_prefix=str(slot), initial_value=0) new_counter_names = [] for name in counters: if (counters[name] == existing[name]): new_counter_names += [name] (data_store_access, n_added_names) = cnm.addIfNew(new_counter_names) if save_stats and (data_store_access or n_added_names): counters = Batch() if data_store_access: counters.incr("ProdEagle.Datastore.ReadAccess") if n_added_names: counters.incr("ProdEagle.NewNames", n_added_names) counters.incr("ProdEagle.Datastore.WriteAccess") counters.commit(save_stats=False) except: logging.warning("Couldn't increase the following counters: %s" % ", ".join(counters.keys()))
[ "baden.i.ua@gmail.com@cd201f0b-5521-6f96-0748-8efd02dae0ad" ]
baden.i.ua@gmail.com@cd201f0b-5521-6f96-0748-8efd02dae0ad
9eb7947fa8d2ab6df2fb73bb055c5d33355a2efd
54f41c4020ce0f3159dd044618b57843310fe070
/drivers/plot_astrometric_excess.py
cfe353d09871004823d322b6cd6a4dd5286f4248
[ "MIT" ]
permissive
lgbouma/billy
f8205ce64667c3495ecbf3032629acc7395798a8
aa70e55c61085464945a49c806a2f96911f35706
refs/heads/master
2023-08-06T16:04:46.733163
2021-09-17T23:15:13
2021-09-17T23:15:13
247,311,434
0
1
null
2020-05-19T23:04:02
2020-03-14T16:22:55
TeX
UTF-8
Python
false
false
337
py
import os import billy.plotting as bp from billy import __path__ RESULTSDIR = os.path.join(os.path.dirname(__path__[0]), 'results') PLOTDIR = os.path.join(RESULTSDIR, 'cluster_membership') bp.plot_astrometric_excess(PLOTDIR, ruwe=0) bp.plot_astrometric_excess(PLOTDIR, ruwe=1) bp.plot_astrometric_excess(PLOTDIR, ruwe=1, talklabels=1)
[ "bouma.luke@gmail.com" ]
bouma.luke@gmail.com
6499cbe189f6bc6f6c2beb875d3039bb2e298254
e83429984b5139df15d3930f3c4a00a168c662e8
/decision_tree/decision_tree_train.py
93f707c535779fd0de50af1623c0d78747ecc8e1
[]
no_license
matrharr/machine-learning-p1
39ac2590ea73d1a1126ee66706f569e1c4c5a5ba
2dece5beafc077c56b55cac793bc2e4e0639d851
refs/heads/master
2023-03-08T18:07:11.241338
2021-02-26T17:48:26
2021-02-26T17:48:26
341,056,294
0
0
null
null
null
null
UTF-8
Python
false
false
3,744
py
import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeClassifier from sklearn import tree from sklearn.model_selection import GridSearchCV ''' pruning gini vs info gain ''' PARAMS = { 'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [10, 5, 2, None], 'min_samples_split': [2], 'min_samples_leaf': [1], 'min_weight_fraction_leaf': [0.0], 'max_features': [None], 'random_state': [None], 'max_leaf_nodes': [None], 'min_impurity_decrease': [0.0], 'class_weight': [None], 'ccp_alpha': [0.0] } class DecisionTree: def __init__(self): self.clfs = [] def get_classifer(self, x, y): ccp_alphas = self.get_ccp_alphas(x, y) # print('num of ccp alphas ', len(ccp_alphas)) dtrees = [] ccp_alphas = [0.003] for ccp_alpha in ccp_alphas: clf = DecisionTreeClassifier( criterion='gini', splitter='best', max_depth=5, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, class_weight=None, ccp_alpha=0.0 ) dtrees.append(( clf, f'Decision Tree CCP Alpha {ccp_alpha}', f'decision_tree_model' )) return dtrees def save_figures(self, clf): self.clfs.append(clf) r = tree.export_text(clf) print(r) fig = plt.figure(figsize=(25,20)) tree.plot_tree(clf) fig.savefig('figures/decision_tree_figure.png') plt.show() def get_ccp_alphas(self, x, y): clf_dummy = DecisionTreeClassifier(random_state=0) path = clf_dummy.cost_complexity_pruning_path(x, y) ccp_alphas, impurities = path.ccp_alphas, path.impurities self.ccp_alphas = ccp_alphas # self.plot_impurity_alpha(ccp_alphas, impurities) return ccp_alphas @staticmethod def plot_impurity_alpha(ccp_alphas, impurities): fig, ax = plt.subplots() ax.plot(ccp_alphas[:-1], impurities[:-1], marker='o', drawstyle="steps-post") ax.set_xlabel("effective alpha") ax.set_ylabel("total impurity of leaves") ax.set_title("Total Impurity vs effective alpha for training set") fig.savefig('figures/decision_tree_impurity_vs_alpha.png') plt.show() def plot_alpha_accuracy(self, x_train, y_train, x_test, y_test): x_train, y_train, x_test, y_test = np.array(x_train), np.array(y_train), np.array(x_test), np.array(y_test) train_scores = np.array([clf.score(x_train, y_train) for clf in self.clfs]) test_scores = np.array([clf.score(x_test, y_test) for clf in self.clfs]) fig, ax = plt.subplots() ax.set_xlabel("alpha") ax.set_ylabel("accuracy") ax.set_title("Accuracy vs alpha for training and testing sets") ax.plot( self.ccp_alphas, train_scores, marker='o', label="train", drawstyle="steps-post" ) ax.plot(self.ccp_alphas, test_scores, marker='o', label="test", drawstyle="steps-post") fig.savefig('figures/decision_tree_acc_vs_alpha.png') ax.legend() plt.show() # clf = GridSearchCV(DecisionTreeClassifier(), PARAMS, cv=3) # clf.fit(x, y) # print(clf.best_score_) # print(clf.best_params_) # df = pd.DataFrame(clf.cv_results_) # print(df)
[ "mattharris@Matts-MBP.attlocal.net" ]
mattharris@Matts-MBP.attlocal.net
1529e0f00fa252921c2bbb2fe6186fb75e878075
b6a7a849593c654868c9683578d907b0d73c6ef0
/firstpython.py
435e303da45fa50a85a8877aeb566d0f433f0da5
[]
no_license
WahomeMaina/datasciencetestrepo
75047954d993d1622ebb6f7bb2b0860cc93206c2
c4a756521120f01d8c0d1bedec5570f0d18b46e8
refs/heads/main
2023-07-02T22:28:53.604012
2021-07-31T05:59:54
2021-07-31T05:59:54
391,266,376
0
0
null
2021-07-31T05:59:55
2021-07-31T05:43:48
Python
UTF-8
Python
false
false
57
py
#Display the output print("I'll make it no matter what")
[ "noreply@github.com" ]
WahomeMaina.noreply@github.com
2c2719323ff0fc13537815774525f1408082b83b
ef881eab7a9948ca21a9275dd257602bb7344ee4
/rkenv/lib/python3.6/tokenize.py
3a5c7e5d320adcdabcc1490c40950b4c956e4f94
[]
no_license
acanto95/Catgram
220edb8ffe9c65cdc68ba2f5f1e18678a058dcf4
f3ba541d0890ec95082c5beabf27b8c924a937de
refs/heads/master
2022-08-08T09:22:33.088782
2019-06-01T22:54:22
2019-06-01T22:54:22
189,781,259
1
1
null
2022-06-22T07:12:41
2019-06-01T22:01:54
Python
UTF-8
Python
false
false
47
py
/home/canto/anaconda3/lib/python3.6/tokenize.py
[ "acanto95@bitbucket.org" ]
acanto95@bitbucket.org
8b4a96a1fa7674f5d06160f3b06ac5d582ce22c2
f304605383eb6b367ff9c44df827fa28068aeea1
/Project 1/trilocate.py
bca7726b3f76bb7e2582838f8695a25e9f3dd814
[]
no_license
cbadami/CSC-842
5c1ca7ed1de347e448955bd0e2103a563d2996be
3790c6bdbde6e1657ae834e143b924d511a15648
refs/heads/master
2020-05-27T03:02:21.448474
2019-07-21T02:10:45
2019-07-21T02:10:45
188,459,663
0
0
null
null
null
null
UTF-8
Python
false
false
2,642
py
# Author: Charles Badami # Date: 5/23/19 # Program Name: trilocate.py ''' Description/Purpose: This program reads in a file of comma-separated records, representing triangulation data from three cellular base stations (longitude, latitude, radius to target device based on signal strength) measured at a certain point in time. The program then calculates and displays the approximate latitude and longitude of the target device, as well as an approximate address. ''' from geopy.geocoders import Nominatim import math #Convert geographical coordinates to flat cartesian def convertToXandY(lon, lat): x = lon * (math.pi/180) * 6378137 y = lat * (math.pi/180) * 6378137 return (x,y) #Compute the intersection of three base station signals def circIntersect(x1, y1, r1, x2, y2, r2, x3, y3, r3): x1, y1 = convertToXandY(x1,y1) x2, y2 = convertToXandY(x2,y2) x3, y3 = convertToXandY(x3,y3) yA = (x2-x3) * ((x2*x2 - x1*x1) + (y2*y2 - y1*y1) + (r1*r1 - r2*r2)) yB = (x1-x2) * ((x3*x3 - x2*x2) + (y3*y3 - y2*y2) + (r2*r2 - r3*r3)) yC = 2 * ((y1-y2)*(x2-x3) - (y2-y3)*(x1-x2)) y = -1*((yA-yB)/yC) xA = (y2-y3) * ((y2*y2 - y1*y1) + (x2*x2 - x1*x1) + (r1*r1 - r2*r2)) xB = (y1-y2) * ((y3*y3 - y2*y2) + (x3*x3 - x2*x2) + (r2*r2 - r3*r3)) xC = 2 * ((x1-x2)*(y2-y3) - (x2-x3)*(y1-y2)) x = -1*((xA-xB)/xC) lat = y/((math.pi/180)*6378137) lon = x/((math.pi/180)*6378137) return (lat, lon) #Begin main program #Default data file path try: dataFile = open("bsdata.txt","r") except FileNotFoundError: print("There is a problem finding your data file. Please check and try again.") quit() #Loop through records in file, give user option to quit for line in dataFile: next = input("\nTriangulate next record? ") #Exit loop if user does not want to continue if (next.lower() != 'y'): dataFile.close() break #Convert data from strings to floats data = line.split(',') data = [float(i) for i in data] latResult, lonResult = circIntersect(data[0],data[1],data[2],data[3],data[4],data[5],data[6],data[7],data[8]) print ("target latitude="+str(latResult)+" target longitude="+str(lonResult)) if (not(-90<latResult<90) or not(-180<lonResult<180)): print("Sorry, one or both coordinates out of range, check your data.") break #GeoPy usage to translate coordinates geolocator = Nominatim(user_agent="trilocate.py") location = geolocator.reverse(str(latResult)+", "+str(lonResult)) print(location.address) print("\nAll finished, goodbye!")
[ "cbadami@nwmissouri.edu" ]
cbadami@nwmissouri.edu
9b1527fb43a9d9e90cd494a22c11a2404f600900
dbf885d7b65f9af2c5b1f5b9e4c51823de853ce6
/pdf_decanter/alpha.py
ba2822c34d7a34f76b949d01e28631e067241b88
[ "Apache-2.0" ]
permissive
hmeine/pdfdecanter
b3d10cf7c7e05bb87dab2acbacc21df5123f3054
d33f8c577b3afcc6152f8d6af44b4bb746b6bd06
refs/heads/master
2021-01-18T15:22:01.030028
2019-03-13T10:52:49
2019-03-13T10:52:49
4,405,884
0
0
null
null
null
null
UTF-8
Python
false
false
2,250
py
# Copyright 2012-2014 Hans Meine <hans_meine@gmx.net> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy from .dynqt import QtGui, qimage2ndarray def unblend_alpha_1d(rgb, bg, c): """Given an Nx3 `rgb` array, a compatible background `bg` (e.g. Nx3 or just a 3-tuple with a fixed bg color), and a foreground color, return alpha array.""" rgb = numpy.require(rgb, dtype = numpy.int32) * 256 bg = numpy.require(bg, dtype = numpy.int32) * 256 c = numpy.require(c, dtype = numpy.int32) * 256 alpha = (numpy.sum(numpy.abs(rgb + 128 - bg), 1) * 255 / numpy.sum(numpy.abs(c - bg), 1)) alpha = alpha.clip(0, 255) return alpha def unblend_alpha(rgb, bg, c): rgb, bg = numpy.broadcast_arrays(rgb, bg) diff = rgb - bg diff *= (c - bg) if not diff.any(): return None changed_y, changed_x = numpy.nonzero(diff.any(-1)) rgb_i = rgb[changed_y, changed_x] bg_i = bg [changed_y, changed_x] alpha_i = unblend_alpha_1d(rgb_i, bg_i, c) alpha = numpy.zeros(bg.shape[:2], numpy.uint8) alpha[changed_y, changed_x] = alpha_i return alpha def blend_images(bg, alpha, c): result = numpy.zeros(alpha.shape + (3, ), numpy.uint8) bg = numpy.require(bg, dtype = numpy.uint16) * (255 - alpha)[...,None] fg = numpy.require(c, dtype = numpy.uint16) * alpha[...,None] return ((bg + fg) / 255).astype(numpy.uint8) def verified_unblend(rgb, bg, c, maxAbsDiff = 1): alpha = unblend_alpha(rgb, bg, c) if alpha is None: return None composed = blend_images(bg, alpha, c) diff = (composed - rgb).view(numpy.int8) if numpy.all(numpy.abs(diff) <= maxAbsDiff): return alpha return None
[ "hans_meine@gmx.net" ]
hans_meine@gmx.net
b0fd4265dbd1053b842f096f183b9d5a8d9253c2
5c3d99e7abe8b846d8c01b42af6127d68bfca71e
/cifar10.py
4a5ce4075350c136d7211c1f23d9508bafe7b34a
[]
no_license
suta0001/tbi-mice-detection
0ce6ac1e12ecafee931746fdf8b4f357e0e5da23
a5d5a31f5ddd46cbca5a2f5ec3b13d020e4e82e5
refs/heads/master
2022-07-04T03:27:38.490365
2020-10-11T21:27:55
2020-10-11T21:27:55
180,941,851
1
0
null
2022-06-16T23:30:19
2019-04-12T06:10:40
Python
UTF-8
Python
false
false
14,724
py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Builds the CIFAR-10 network. Summary of available functions: # Compute input images and labels for training. If you would like to run # evaluations, use inputs() instead. inputs, labels = distorted_inputs() # Compute inference on the model inputs to make a prediction. predictions = inference(inputs) # Compute the total loss of the prediction with respect to the labels. loss = loss(predictions, labels) # Create a graph to run one step of training with respect to the loss. train_op = train(loss, global_step) """ # pylint: disable=missing-docstring from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import re import sys import tarfile from six.moves import urllib import tensorflow as tf import cifar10_input FLAGS = tf.app.flags.FLAGS # Basic model parameters. tf.app.flags.DEFINE_integer('batch_size', 128, """Number of images to process in a batch.""") tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data', """Path to the CIFAR-10 data directory.""") tf.app.flags.DEFINE_boolean('use_fp16', False, """Train the model using fp16.""") # Global constants describing the CIFAR-10 data set. IMAGE_SIZE = cifar10_input.IMAGE_SIZE NUM_CLASSES = cifar10_input.NUM_CLASSES NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL # Constants describing the training process. MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average. NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays. LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor. INITIAL_LEARNING_RATE = 0.1 # Initial learning rate. # If a model is trained with multiple GPUs, prefix all Op names with tower_name # to differentiate the operations. Note that this prefix is removed from the # names of the summaries when visualizing a model. TOWER_NAME = 'tower' DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz' def _activation_summary(x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measures the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) tf.summary.histogram(tensor_name + '/activations', x) tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) def _variable_on_cpu(name, shape, initializer): """Helper to create a Variable stored on CPU memory. Args: name: name of the variable shape: list of ints initializer: initializer for Variable Returns: Variable Tensor """ with tf.device('/cpu:0'): dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype) return var def _variable_with_weight_decay(name, shape, stddev, wd): """Helper to create an initialized Variable with weight decay. Note that the Variable is initialized with a truncated normal distribution. A weight decay is added only if one is specified. Args: name: name of the variable shape: list of ints stddev: standard deviation of a truncated Gaussian wd: add L2Loss weight decay multiplied by this float. If None, weight decay is not added for this Variable. Returns: Variable Tensor """ dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 var = _variable_on_cpu( name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)) if wd is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var def distorted_inputs(): """Construct distorted input for CIFAR training using the Reader ops. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. Raises: ValueError: If no data_dir """ if not FLAGS.data_dir: raise ValueError('Please supply a data_dir') data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') images, labels = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return images, labels def inputs(eval_data): """Construct input for CIFAR evaluation using the Reader ops. Args: eval_data: bool, indicating if one should use the train or eval data set. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. Raises: ValueError: If no data_dir """ if not FLAGS.data_dir: raise ValueError('Please supply a data_dir') data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') images, labels = cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return images, labels def inference(images): """Build the CIFAR-10 model. Args: images: Images returned from distorted_inputs() or inputs(). Returns: Logits. """ # We instantiate all variables using tf.get_variable() instead of # tf.Variable() in order to share variables across multiple GPU training runs. # If we only ran this model on a single GPU, we could simplify this function # by replacing all instances of tf.get_variable() with tf.Variable(). # # conv1 with tf.variable_scope('conv1') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64], stddev=5e-2, wd=None) conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0)) pre_activation = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(pre_activation, name=scope.name) _activation_summary(conv1) # pool1 pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1') # norm1 norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1') # conv2 with tf.variable_scope('conv2') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64], stddev=5e-2, wd=None) conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1)) pre_activation = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(pre_activation, name=scope.name) _activation_summary(conv2) # norm2 norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2') # pool2 pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2') # local3 with tf.variable_scope('local3') as scope: # Move everything into depth so we can perform a single matrix multiply. reshape = tf.reshape(pool2, [images.get_shape().as_list()[0], -1]) dim = reshape.get_shape()[1].value weights = _variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1)) local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name) _activation_summary(local3) # local4 with tf.variable_scope('local4') as scope: weights = _variable_with_weight_decay('weights', shape=[384, 192], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name) _activation_summary(local4) # linear layer(WX + b), # We don't apply softmax here because # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits # and performs the softmax internally for efficiency. with tf.variable_scope('softmax_linear') as scope: weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES], stddev=1/192.0, wd=None) biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0)) softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name) _activation_summary(softmax_linear) return softmax_linear def loss(logits, labels): """Add L2Loss to all the trainable variables. Add summary for "Loss" and "Loss/avg". Args: logits: Logits from inference(). labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape [batch_size] Returns: Loss tensor of type float. """ # Calculate the average cross entropy loss across the batch. labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) # The total loss is defined as the cross entropy loss plus all of the weight # decay terms (L2 loss). return tf.add_n(tf.get_collection('losses'), name='total_loss') def _add_loss_summaries(total_loss): """Add summaries for losses in CIFAR-10 model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.summary.scalar(l.op.name + ' (raw)', l) tf.summary.scalar(l.op.name, loss_averages.average(l)) return loss_averages_op def train(total_loss, global_step): """Train CIFAR-10 model. Create an optimizer and apply to all trainable variables. Add moving average for all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training. """ # Variables that affect learning rate. num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY) # Decay the learning rate exponentially based on the number of steps. lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True) tf.summary.scalar('learning_rate', lr) # Generate moving averages of all losses and associated summaries. loss_averages_op = _add_loss_summaries(total_loss) # Compute gradients. with tf.control_dependencies([loss_averages_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(total_loss) # Apply gradients. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # Add histograms for trainable variables. for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) # Add histograms for gradients. for grad, var in grads: if grad is not None: tf.summary.histogram(var.op.name + '/gradients', grad) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( MOVING_AVERAGE_DECAY, global_step) with tf.control_dependencies([apply_gradient_op]): variables_averages_op = variable_averages.apply(tf.trainable_variables()) return variables_averages_op def maybe_download_and_extract(): """Download and extract the tarball from Alex's website.""" dest_directory = FLAGS.data_dir if not os.path.exists(dest_directory): os.makedirs(dest_directory) filename = DATA_URL.split('/')[-1] filepath = os.path.join(dest_directory, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin') if not os.path.exists(extracted_dir_path): tarfile.open(filepath, 'r:gz').extractall(dest_directory)
[ "sutandia@tigger-mbp1.local" ]
sutandia@tigger-mbp1.local
8e0afcf06732e2ab66c09d4b9601d665cc031875
6f1034b17b49f373a41ecf3a5a8923fb4948992b
/docs/user_guide/operation/scripts/examples/argus/extraction/jan/CO2Analysis.py
994a75deb969550f3862563df6298f042c9c22f2
[ "Apache-2.0" ]
permissive
NMGRL/pychron
a6ec1854488e74eb5d3ff53eee8537ecf98a6e2f
8cfc8085393ace2aee6b98d36bfd6fba0bcb41c6
refs/heads/main
2023-08-30T07:00:34.121528
2023-06-12T17:43:25
2023-06-12T17:43:25
14,438,041
38
28
Apache-2.0
2023-08-09T22:47:17
2013-11-15T23:46:10
Python
UTF-8
Python
false
false
2,167
py
""" """ def main(do_cleanup=True, degas=False): set_motor('beam',beam_diameter) accum = 0 if analysis_type=='blank': info('is blank. not heating') ''' sleep cumulative time to account for blank during a multiple position analysis ''' close(description='Microbone to Turbo') numPositions=len(position) sleep(duration*max(1,numPositions)) else: ''' this is the most generic what to move and fire the laser position is always a list even if only one hole is specified ''' info('enable laser') enable() # make sure light is on before moving with video_recording('{}/{}'.format(load_identifier,run_identifier)): for i,pi in enumerate(position): ''' position the laser at pi, pi can be a holenumber or (x,y) ''' with lighting(50): sleep(2) accum+=2 move_to_position(pi, autocenter=True) sleep(2) accum+=2 if i==0: close(description='Microbone to Turbo') sleep(1) accum+=1 if degas: do_extraction() else: with grain_polygon(): do_extraction() if disable_between_positions: end_extract() end_extract() disable() if do_cleanup: sleep(max(0,cleanup-accum)) else: sleep(accum) def do_extraction(): info('begin interval') begin_interval(duration) if ramp_duration>0: info('ramping to {} at {} {}/s'.format(extract_value, ramp_rate, extract_units)) ramp(setpoint=extract_value, duration=ramp_duration, period=0.5) else: info('set extract to {}, {}'.format(extract_value, extract_units)) extract(extract_value, extract_units) #sleep(2) if pattern: info('executing pattern {}'.format(pattern)) execute_pattern(pattern) complete_interval()
[ "jirhiker@gmail.com" ]
jirhiker@gmail.com
aa14adc12aeaa5034c2d7cd473bd877685d7fde1
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
/ml-flask/Lib/site-packages/sklearn/cross_decomposition/tests/test_pls.py
b0544ed25038f3e63e4e89c778518e7c2f2b3b61
[ "MIT" ]
permissive
YaminiHP/SimilitudeApp
8cbde52caec3c19d5fa73508fc005f38f79b8418
005c59894d8788c97be16ec420c0a43aaec99b80
refs/heads/master
2023-06-27T00:03:00.404080
2021-07-25T17:51:27
2021-07-25T17:51:27
389,390,951
0
0
null
null
null
null
UTF-8
Python
false
false
130
py
version https://git-lfs.github.com/spec/v1 oid sha256:3d406cccd42fe36d7d2cdbf50adf5dac408ba8d41546fac7b174b4383836bf2d size 20584
[ "yamprakash130@gmail.com" ]
yamprakash130@gmail.com
4ad1ceee00203feb6c0899b508840cd4964e4d01
0e8c2cb2822166f5175f2f811e2275d348abc73c
/create_real_to_fake_dataset_mixed.py
a9af663af84b6f1855dbabab84737cf29157d289
[]
no_license
kittyyinhui/detect_deep_fake
0904d4df74110075b6adaaae8e25718a58d45f13
9a8c14cd38038fbe46057866249e011bcb03b840
refs/heads/master
2022-02-25T18:45:25.215111
2019-10-15T21:16:16
2019-10-15T21:16:16
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,219
py
import os import glob import random import shutil import math as m from sklearn.model_selection import train_test_split IMAGES_PER_CLASS = 10000 subsets = ['train', 'val', 'test'] subsets_prop = [0.05, 0.2, 0.75] ds1_path_train = '/media/jcneves/DATASETS/100K_FAKE/byimg_alignedlib_0.3/' ds1_path_test = '/media/jcneves/DATASETS/NVIDIA_FakeFace/byimg_alignedlib_0.3/' ds2_path_train = '/media/jcneves/DATASETS/CASIA-WebFace/byid_alignedlib_0.3/' ds2_path_test = '/media/jcneves/DATASETS/VGG_FACE_2/byid_alignedlib_0.3_train/' out_path = '/media/jcneves/DATASETS/real2fake_mixed/' ds1_files_train = [f for f in glob.glob(ds1_path_train + "*", recursive=True)] ds1_files_test = [f for f in glob.glob(ds1_path_test + "*", recursive=True)] ds2_files_train = [f for f in glob.glob(ds2_path_train + "*", recursive=True)] ds2_files_test = [f for f in glob.glob(ds2_path_test + "*", recursive=True)] print("Found " + str(len(ds1_files_train)) + " in dataset 1 (train)") print("Found " + str(len(ds1_files_test)) + " in dataset 1 (test)") print("Found " + str(len(ds2_files_train)) + " in dataset 2") print("Found " + str(len(ds2_files_test)) + " in dataset 2") # DIVIDE INTO SUBSETS BY IDENTITY ds1_files_subsets = dict() ds1_files_subsets[subsets[0]] = ds1_files_train ds1_files_subsets[subsets[1]], ds1_files_subsets[subsets[2]] = \ train_test_split(ds1_files_test, test_size=subsets_prop[2], shuffle=True, random_state=42) ds2_files_subsets = dict() ds2_files_subsets[subsets[0]] = ds2_files_train ds2_files_subsets[subsets[1]], ds2_files_subsets[subsets[2]] = \ train_test_split(ds2_files_test, test_size=subsets_prop[2], shuffle=True, random_state=42) n_img = dict() n_img[subsets[0]] = IMAGES_PER_CLASS*subsets_prop[0] n_img[subsets[1]] = IMAGES_PER_CLASS*subsets_prop[1] n_img[subsets[2]] = IMAGES_PER_CLASS*subsets_prop[2] # CLASS 0 for s in subsets: try: os.makedirs(out_path + s + '/0') except OSError: print("Creation of the directory %s failed" % out_path) else: print("Successfully created the directory %s " % out_path) idx = 0 for i in range(len(ds1_files_subsets[s])): img_file = ds1_files_subsets[s][i] # img_files = [f for f in glob.glob(id_path + "/*.jpg")] # img_files = img_files[:imgs_per_id] img_name = 'img_{:06d}.jpg'.format(idx) shutil.copyfile(img_file, out_path + s + '/0/' + img_name) idx = idx + 1 if idx >= n_img[s]: break # CLASS 1 for s in subsets: try: os.makedirs(out_path + s + '/1') except OSError: print("Creation of the directory %s failed" % out_path) else: print("Successfully created the directory %s " % out_path) idx = 0 for i in range(len(ds2_files_subsets[s])): id_path = ds2_files_subsets[s][i] img_files = [f for f in glob.glob(id_path + "/*.jpg")] #img_files = img_files[:imgs_per_id] for f in img_files: img_name = 'img_{:06d}.jpg'.format(idx) shutil.copyfile(f, out_path + s + '/1/' + img_name) idx = idx + 1 if idx >= n_img[s]: break if idx >= n_img[s]: break
[ "joao_crn@hotmail.com" ]
joao_crn@hotmail.com
3847b9c40874b7611323d85de6b794c53ac36b9a
d37984382cbeb4e41b979ae1831235a0858bf483
/XBlock Integration Files/xdjangobb/xblock/bin/pbr
8f4ce226b2cea235ec1cf64baba10813047cf38e
[ "MIT" ]
permissive
DANCEcollaborative/forum-xblock
e62f4a612c070c6d1ac6060f1661de37a9dd1c8c
fe8a062b4e45966a5faa5282d85799be479ec28a
refs/heads/master
2021-05-04T09:47:25.126546
2020-01-27T16:31:38
2020-01-27T16:31:38
47,434,223
7
0
null
null
null
null
UTF-8
Python
false
false
261
#!/home/akashb/dev-stuff/XBlock/xblock-djangobb/xblock/bin/python # -*- coding: utf-8 -*- import re import sys from pbr.cmd.main import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "sreecharan93@gmail.com" ]
sreecharan93@gmail.com
181d3cd17e58e13f5559de14189f02d9aac71b2f
7974d2761b73ea9ceb47d7036418cea93e85f56c
/src/raspberry/Webserver/externals/notification/test_notificationsender.py
81acd7d7a3385ca7b7bc28185acbf606b1e15321
[]
no_license
fongie/FlowerPower
8ed933f46fc5a7f7e69c009f7ff129da00fe44fc
69243cfc4ad7269470231f57ff050382d890b8e0
refs/heads/master
2021-04-12T06:00:15.645671
2018-05-30T07:12:49
2018-05-30T07:12:49
null
0
0
null
null
null
null
UTF-8
Python
false
false
349
py
import pytest from .notificationsender import NotificationSender def test_sendNotification(): n = NotificationSender(1, "flowerpowerkth@gmail.com") assert n.sendDrynessNotification(533) == True def test_sendWateringNotification(): n = NotificationSender(1, "flowerpowerkth@gmail.com") assert n.sendWateringNotification() == True
[ "mkorlinge@hotmail.com" ]
mkorlinge@hotmail.com
ada46cbaf56a84d5767d7967feeffc133d1a64f6
a136a55c87565d564f15838073762efca09e90c2
/1x5condition_pivot/process_omop.py
8cfeb34ea97ef6fd7c4d6f309cf8451e0d99f8a9
[]
no_license
yy6linda/synthetic_EHR_data
c9c327aedf46c482d0b501ef6836c0fc7242857b
c72e25b82ef6d648b63233043751bb0398e7b4c0
refs/heads/master
2020-05-24T04:05:06.173064
2020-03-29T18:45:55
2020-03-29T18:45:55
187,085,747
0
0
null
null
null
null
UTF-8
Python
false
false
3,982
py
import sys import _pickle as pickle import numpy as np from datetime import datetime import csv '''this takes in two condition_occurrence.csv files respectively for alive and deceased patients and outputs two matrices for the two cohort. The output matrix doesn't have headers nor column index''' if __name__ == '__main__': conditionliveFile = sys.argv[1] conditiondeathFile = sys.argv[2] outFile_live = sys.argv[3] outFile_death = sys.argv[4] binary_count = sys.argv[5] if binary_count != 'binary' and binary_count != 'count': print('You must choose either binary or count.') sys.exit() live_person_condition = {} death_person_condition = {} '''condition_type is a list of unique concept_id''' condition_type = [] '''condition_index is {434354: 2}, [concept_id, order of concept_id]''' condition_index = {} live_person = [] death_person = [] '''for alive cohorts''' print('read condition_alive') infd = open(conditionliveFile, 'r') infd.readline() i = 0 for line in infd: i = i+1 print('line number: {}'.format(i)) tokens = line.strip().split(',') person_id = int(float(tokens[1])) condition = int(float(tokens[2])) if condition not in condition_type: condition_index[condition] = len(condition_type) condition_type.append(condition) if person_id not in live_person: live_person.append(person_id) if person_id in live_person_condition: live_person_condition[person_id].append(condition) else: live_person_condition[person_id] = [condition] infd.close() #print(person_condition) w1 = csv.writer(open("live_person_id_list.csv", "w")) w1.writerows(map(lambda x: [x], live_person)) '''for deceased cohorts''' infd = open(conditiondeathFile, 'r') infd.readline() i = 0 for line in infd: i = i+1 print('line number: {}'.format(i)) tokens = line.strip().split(',') person_id = int(float(tokens[1])) condition = int(float(tokens[2])) if condition not in condition_type: condition_index[condition] = len(condition_type) condition_type.append(condition) if person_id not in death_person: death_person.append(person_id) if person_id in death_person_condition: death_person_condition[person_id].append(condition) else: death_person_condition[person_id] = [condition] infd.close() w1 = csv.writer(open("death_person_id_list.csv", "w")) w1.writerows(map(lambda x: [x], death_person)) w2 = csv.writer(open("condition_id_list.csv", "w")) for key, val in condition_index.items(): w2.writerow([key, val]) print('Constructing the matrix for deceased patients ') num_person = len(death_person) num_condition = len(condition_type) matrix = np.zeros((num_person, num_condition)).astype('float32') for i, person_id in enumerate(death_person): for code in death_person_condition[person_id]: code_index = condition_index[code] if binary_count == 'binary': matrix[i][code_index] = 1 else: matrix[i][code_index] += 1 #print(matrix) np.save(outFile_death,matrix) print('Constructing the matrix for alive patients ') num_person = len(live_person) num_condition = len(condition_type) matrix = np.zeros((num_person, num_condition)).astype('float32') for i, person_id in enumerate(live_person): for code in live_person_condition[person_id]: code_index = condition_index[code] if binary_count == 'binary': matrix[i][code_index] = 1 else: matrix[i][code_index] += 1 #print(matrix) np.save(outFile_live,matrix) #pickle.dump(matrix, open(outFile+'.matrix', 'wb'), -1)
[ "yy6linda@gmail.com" ]
yy6linda@gmail.com
224ba3d8ce19e43d06f7731760ffd3185fa30a80
782de1b97644773691934d4668e37ccf37c81708
/docs/source/conf.py
6d91a3c2803a9b3f4f921bc162741d682e716967
[ "MIT" ]
permissive
cssd2019/trafficjam
d2b92eadae1d7cb4e1e1ef6492f614a628a429ec
aeb17a456ead5d5b76cb8449e3e054a641bd3eef
refs/heads/master
2021-12-01T00:49:09.008481
2019-03-14T12:31:05
2019-03-14T12:31:05
174,993,413
2
1
MIT
2021-11-17T18:06:19
2019-03-11T12:15:42
Python
UTF-8
Python
false
false
5,718
py
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import recommonmark from recommonmark.transform import AutoStructify import os import sys sys.path.insert(0, os.path.abspath('../../trafficjam/')) # -- Project information ----------------------------------------------------- project = 'Traffic Jam' copyright = '2019, CSSD2019' author = 'CSSD2019' # The short X.Y version version = '' # The full version, including alpha/beta/rc tags release = '' # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.githubpages', 'recommonmark', 'sphinx.ext.napoleon', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] # source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # html_theme = 'pyramid' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['.static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'TrafficJamdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'TrafficJam.tex', 'Traffic Jam Documentation', 'CSSD2019', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'trafficjam', 'Traffic Jam Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'TrafficJam', 'Traffic Jam Documentation', author, 'TrafficJam', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # -- Extension configuration ------------------------------------------------- def setup(app): app.add_config_value('recommonmark_config', { 'url_resolver': lambda url: github_doc_root + url, 'auto_toc_tree_section': 'Contents', }, True) app.add_transform(AutoStructify)
[ "c.m.jones@dur.ac.uk" ]
c.m.jones@dur.ac.uk
956561ff3b37b66a21027676f8da8935b4ca8f81
2d3ce0f749d04b66a07c8d97b5663f89e01a4321
/movie/urls.py
63123de7c2929ad462cf48d71511ebb2138e275c
[]
no_license
timdvd/Django_movies
1e7b057bf020c06a4932452313905e92af2af87a
bdbdf43d05986ed6b53cf0f4b284f7aa9018cd6e
refs/heads/main
2023-04-13T19:46:57.036602
2021-04-16T12:15:46
2021-04-16T12:15:46
335,655,761
0
0
null
null
null
null
UTF-8
Python
false
false
546
py
from django.urls import path, include from .views import MovieSlugView, MovieListView, FilterMoviesView, AddStarRating, SearchListView, movieComment urlpatterns = [ path('', MovieListView.as_view(), name='home'), path('filter/', FilterMoviesView.as_view(), name='filter'), path("add-rating/", AddStarRating.as_view(), name='add_rating'), path('movie/<slug:slug>/', MovieSlugView.as_view(), name='movie_detail'), path('search/', SearchListView.as_view(), name='search'), path('review', movieComment, name='PostComment'), ]
[ "artemsabadash4@gmail.com" ]
artemsabadash4@gmail.com
84e9f0c78b55827ad2ec651e35b65bf45d5e280b
e09092b96eb5b9297d21bd2cb66d21a1017630a5
/tinkoff/investments/model/operations.py
6f54717a252fa009a293d6789671d7c3e0cab139
[ "Apache-2.0" ]
permissive
zcoder/tinkoff-api
70af3ea422cb19934fcd6c0659a077dd5be24112
362f73a64e16fa4da87e7f1a59d290037108252d
refs/heads/master
2021-02-22T18:47:00.835488
2020-03-09T16:32:52
2020-03-09T16:32:52
245,382,676
0
0
Apache-2.0
2020-03-20T18:43:10
2020-03-06T09:46:23
Python
UTF-8
Python
false
false
1,979
py
from dataclasses import dataclass from enum import Enum from typing import List, Optional from tinkoff.investments.model.base import ( BaseModel, ISODateTime, MoneyAmount, Currency, FigiName, InstrumentType, ) OperationID = str TradeID = str class OperationStatus(Enum): DONE = 'Done' DECLINE = 'Decline' PROGRESS = 'Progress' class OperationType(Enum): BUY = 'Buy' SELL = 'Sell' class OperationTypeWithCommission(Enum): BUY = 'Buy' BUY_CARD = 'BuyCard' SELL = 'Sell' BROKER_COMMISSION = 'BrokerCommission' EXCHANGE_COMMISSION = 'ExchangeCommission' SERVICE_COMMISSION = 'ServiceCommission' MARGIN_COMMISSION = 'MarginCommission' OTHER_COMMISSION = 'OtherCommission' PAY_IN = 'PayIn' PAY_OUT = 'PayOut' TAX = 'Tax' TAX_LUCRE = 'TaxLucre' TAX_DIVIDEND = 'TaxDividend' TAX_COUPON = 'TaxCoupon' TAX_BACK = 'TaxBack' REPAYMENT = 'Repayment' PART_REPAYMENT = 'PartRepayment' COUPON = 'Coupon' DIVIDEND = 'Dividend' SECURITY_IN = 'SecurityIn' SECURITY_OUT = 'SecurityOut' @dataclass class OperationTrade(BaseModel): tradeId: TradeID date: ISODateTime price: float quantity: int @dataclass class Operation(BaseModel): id: OperationID status: OperationStatus currency: Currency payment: float isMarginCall: bool date: ISODateTime trades: Optional[List[OperationTrade]] = None commission: Optional[MoneyAmount] = None price: Optional[float] = None quantity: Optional[int] = None figi: Optional[FigiName] = None instrumentType: Optional[InstrumentType] = None operationType: Optional[OperationTypeWithCommission] = None @dataclass class Operations(BaseModel): operations: List[Operation] __all__ = [ 'OperationID', 'TradeID', 'OperationStatus', 'OperationType', 'OperationTypeWithCommission', 'OperationTrade', 'Operation', 'Operations', ]
[ "fatal1ty.rnd@gmail.com" ]
fatal1ty.rnd@gmail.com
37d0b92cb46a7e7b7f5100eabf0c733065b3cab5
1141c0c420f0c57dd31ae28a82bea29557a235d4
/crustcrawler_gazebo/scripts/pick_and_place.py
cd46dbcafcd05aff520567e5022cf341232cb2c2
[]
no_license
bluetronics-India/crustcrawler
bdac92236e5de28dcc227742d63beaedbf0b58a4
18ed43fef4507101447f7c5968b8bc1c24949a39
refs/heads/master
2020-03-18T04:08:51.724588
2018-02-27T17:26:50
2018-02-27T17:26:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
13,945
py
#!/usr/bin/env python import rospy from moveit_commander import RobotCommander, PlanningSceneInterface from moveit_commander import roscpp_initialize, roscpp_shutdown from actionlib import SimpleActionClient, GoalStatus from geometry_msgs.msg import Pose, PoseStamped, PoseArray, Quaternion from moveit_msgs.msg import PickupAction, PickupGoal from moveit_msgs.msg import PlaceAction, PlaceGoal from moveit_msgs.msg import PlaceLocation from moveit_msgs.msg import MoveItErrorCodes from moveit_simple_grasps.msg import GenerateGraspsAction, GenerateGraspsGoal, GraspGeneratorOptions from tf.transformations import quaternion_from_euler import sys import copy import numpy # Create dict with human readable MoveIt! error codes: moveit_error_dict = {} for name in MoveItErrorCodes.__dict__.keys(): if not name[:1] == '_': code = MoveItErrorCodes.__dict__[name] moveit_error_dict[code] = name class Pick_Place: def __init__(self): # Retrieve params: self._table_object_name = rospy.get_param('~table_object_name', 'Grasp_Table') self._grasp_object_name = rospy.get_param('~grasp_object_name', 'Grasp_Object') self._grasp_object_width = rospy.get_param('~grasp_object_width', 0.01) self._arm_group = rospy.get_param('~arm', 'arm') self._gripper_group = rospy.get_param('~gripper', 'gripper') self._approach_retreat_desired_dist = rospy.get_param('~approach_retreat_desired_dist', 0.2) self._approach_retreat_min_dist = rospy.get_param('~approach_retreat_min_dist', 0.1) # Create (debugging) publishers: self._grasps_pub = rospy.Publisher('grasps', PoseArray, queue_size=1, latch=True) self._places_pub = rospy.Publisher('places', PoseArray, queue_size=1, latch=True) # Create planning scene and robot commander: self._scene = PlanningSceneInterface() self._robot = RobotCommander() rospy.sleep(1.0) # Clean the scene: self._scene.remove_world_object(self._table_object_name) self._scene.remove_world_object(self._grasp_object_name) # Add table and Coke can objects to the planning scene: self._pose_table = self._add_table(self._table_object_name) self._pose_coke_can = self._add_grasp_block_(self._grasp_object_name) rospy.sleep(1.0) # Define target place pose: self._pose_place = Pose() #self._pose_place.position.x = self._pose_coke_can.position.x #self._pose_place.position.y = self._pose_coke_can.position.y - 0.06 #self._pose_place.position.z = self._pose_coke_can.position.z self._pose_place.position.x = 0.2 self._pose_place.position.y = 0.3 self._pose_place.position.z = 0.0 self._pose_place.orientation = Quaternion(*quaternion_from_euler(0.0, 0.0, 0.0)) # Retrieve groups (arm and gripper): self._arm = self._robot.get_group(self._arm_group) self._gripper = self._robot.get_group(self._gripper_group) # Create grasp generator 'generate' action client: self._grasps_ac = SimpleActionClient('/moveit_simple_grasps_server/generate', GenerateGraspsAction) if not self._grasps_ac.wait_for_server(rospy.Duration(5.0)): rospy.logerr('Grasp generator action client not available!') rospy.signal_shutdown('Grasp generator action client not available!') return # Create move group 'pickup' action client: self._pickup_ac = SimpleActionClient('/pickup', PickupAction) if not self._pickup_ac.wait_for_server(rospy.Duration(5.0)): rospy.logerr('Pick up action client not available!') rospy.signal_shutdown('Pick up action client not available!') return # Create move group 'place' action client: self._place_ac = SimpleActionClient('/place', PlaceAction) if not self._place_ac.wait_for_server(rospy.Duration(5.0)): rospy.logerr('Place action client not available!') rospy.signal_shutdown('Place action client not available!') return # Pick Coke can object: while not self._pickup(self._arm_group, self._grasp_object_name, self._grasp_object_width): rospy.logwarn('Pick up failed! Retrying ...') rospy.sleep(1.0) rospy.loginfo('Pick up successfully') self._scene.remove_world_object(self._table_object_name) # Place Coke can object on another place on the support surface (table): while not self._place(self._arm_group, self._grasp_object_name, self._pose_place): rospy.logwarn('Place failed! Retrying ...') rospy.sleep(1.0) rospy.loginfo('Place successfully') def __del__(self): # Clean the scene: self._scene.remove_world_object(self._grasp_object_name) self._scene.remove_world_object(self._table_object_name) def _add_table(self, name): p = PoseStamped() p.header.frame_id = self._robot.get_planning_frame() p.header.stamp = rospy.Time.now() p.pose.position.x = 0.2 p.pose.position.y = 0.0 p.pose.position.z = 0.0 q = quaternion_from_euler(0.0, 0.0, 0.0) p.pose.orientation = Quaternion(*q) # Table size from ~/.gazebo/models/table/model.sdf, using the values # for the surface link. self._scene.add_box(name, p, (0.5, 1.5, 0.02)) return p.pose def _add_grasp_block_(self, name): p = PoseStamped() p.header.frame_id = self._robot.get_planning_frame() p.header.stamp = rospy.Time.now() p.pose.position.x = 0.2 p.pose.position.y = -0.3 p.pose.position.z = 0.0 q = quaternion_from_euler(0.0, 0.0, 0.0) p.pose.orientation = Quaternion(*q) # Coke can size from ~/.gazebo/models/coke_can/meshes/coke_can.dae, # using the measure tape tool from meshlab. # The box is the bounding box of the coke cylinder. # The values are taken from the cylinder base diameter and height. self._scene.add_box(name, p, (0.03, 0.03, 0.09)) return p.pose def _generate_grasps(self, pose, width): """ Generate grasps by using the grasp generator generate action; based on server_test.py example on moveit_simple_grasps pkg. """ # Create goal: goal = GenerateGraspsGoal() goal.pose = pose goal.width = width options = GraspGeneratorOptions() # simple_graps.cpp doesn't implement GRASP_AXIS_Z! #options.grasp_axis = GraspGeneratorOptions.GRASP_AXIS_Z options.grasp_direction = GraspGeneratorOptions.GRASP_DIRECTION_UP options.grasp_rotation = GraspGeneratorOptions.GRASP_ROTATION_FULL # @todo disabled because it works better with the default options #goal.options.append(options) # Send goal and wait for result: state = self._grasps_ac.send_goal_and_wait(goal) if state != GoalStatus.SUCCEEDED: rospy.logerr('Grasp goal failed!: %s' % self._grasps_ac.get_goal_status_text()) return None grasps = self._grasps_ac.get_result().grasps # Publish grasps (for debugging/visualization purposes): self._publish_grasps(grasps) return grasps def _generate_places(self, target): """ Generate places (place locations), based on https://github.com/davetcoleman/baxter_cpp/blob/hydro-devel/ baxter_pick_place/src/block_pick_place.cpp """ # Generate places: places = [] now = rospy.Time.now() for angle in numpy.arange(0.0, numpy.deg2rad(360.0), numpy.deg2rad(1.0)): # Create place location: place = PlaceLocation() place.place_pose.header.stamp = now place.place_pose.header.frame_id = self._robot.get_planning_frame() # Set target position: place.place_pose.pose = copy.deepcopy(target) # Generate orientation (wrt Z axis): q = quaternion_from_euler(0.0, 0.0, angle ) place.place_pose.pose.orientation = Quaternion(*q) # Generate pre place approach: place.pre_place_approach.desired_distance = self._approach_retreat_desired_dist place.pre_place_approach.min_distance = self._approach_retreat_min_dist place.pre_place_approach.direction.header.stamp = now place.pre_place_approach.direction.header.frame_id = self._robot.get_planning_frame() place.pre_place_approach.direction.vector.x = 0 place.pre_place_approach.direction.vector.y = 0 place.pre_place_approach.direction.vector.z = 0.2 # Generate post place approach: place.post_place_retreat.direction.header.stamp = now place.post_place_retreat.direction.header.frame_id = self._robot.get_planning_frame() place.post_place_retreat.desired_distance = self._approach_retreat_desired_dist place.post_place_retreat.min_distance = self._approach_retreat_min_dist place.post_place_retreat.direction.vector.x = 0 place.post_place_retreat.direction.vector.y = 0 place.post_place_retreat.direction.vector.z = 0.2 # Add place: places.append(place) # Publish places (for debugging/visualization purposes): self._publish_places(places) return places def _create_pickup_goal(self, group, target, grasps): """ Create a MoveIt! PickupGoal """ # Create goal: goal = PickupGoal() goal.group_name = group goal.target_name = target goal.possible_grasps.extend(grasps) goal.allowed_touch_objects.append(target) goal.support_surface_name = self._table_object_name # Configure goal planning options: goal.allowed_planning_time = 7.0 goal.planning_options.planning_scene_diff.is_diff = True goal.planning_options.planning_scene_diff.robot_state.is_diff = True goal.planning_options.plan_only = False goal.planning_options.replan = True goal.planning_options.replan_attempts = 20 return goal def _create_place_goal(self, group, target, places): """ Create a MoveIt! PlaceGoal """ # Create goal: goal = PlaceGoal() goal.group_name = group goal.attached_object_name = target goal.place_locations.extend(places) # Configure goal planning options: goal.allowed_planning_time = 7.0 goal.planning_options.planning_scene_diff.is_diff = True goal.planning_options.planning_scene_diff.robot_state.is_diff = True goal.planning_options.plan_only = False goal.planning_options.replan = True goal.planning_options.replan_attempts = 20 return goal def _pickup(self, group, target, width): """ Pick up a target using the planning group """ # Obtain possible grasps from the grasp generator server: grasps = self._generate_grasps(self._pose_coke_can, width) # Create and send Pickup goal: goal = self._create_pickup_goal(group, target, grasps) state = self._pickup_ac.send_goal_and_wait(goal) if state != GoalStatus.SUCCEEDED: rospy.logerr('Pick up goal failed!: %s' % self._pickup_ac.get_goal_status_text()) return None result = self._pickup_ac.get_result() # Check for error: err = result.error_code.val if err != MoveItErrorCodes.SUCCESS: rospy.logwarn('Group %s cannot pick up target %s!: %s' % (group, target, str(moveit_error_dict[err]))) return False return True def _place(self, group, target, place): """ Place a target using the planning group """ # Obtain possible places: places = self._generate_places(place) # Create and send Place goal: goal = self._create_place_goal(group, target, places) state = self._place_ac.send_goal_and_wait(goal) if state != GoalStatus.SUCCEEDED: rospy.logerr('Place goal failed!: %s' % self._place_ac.get_goal_status_text()) return None result = self._place_ac.get_result() # Check for error: err = result.error_code.val if err != MoveItErrorCodes.SUCCESS: rospy.logwarn('Group %s cannot place target %s!: %s' % (group, target, str(moveit_error_dict[err]))) return False return True def _publish_grasps(self, grasps): """ Publish grasps as poses, using a PoseArray message """ if self._grasps_pub.get_num_connections() > 0: msg = PoseArray() msg.header.frame_id = self._robot.get_planning_frame() msg.header.stamp = rospy.Time.now() for grasp in grasps: p = grasp.grasp_pose.pose msg.poses.append(Pose(p.position, p.orientation)) self._grasps_pub.publish(msg) def _publish_places(self, places): """ Publish places as poses, using a PoseArray message """ if self._places_pub.get_num_connections() > 0: msg = PoseArray() msg.header.frame_id = self._robot.get_planning_frame() msg.header.stamp = rospy.Time.now() for place in places: msg.poses.append(place.place_pose.pose) self._places_pub.publish(msg) def main(): p = Pick_Place() rospy.spin() if __name__ == '__main__': roscpp_initialize(sys.argv) rospy.init_node('pick_and_place') main() roscpp_shutdown()
[ "ghanimmukhtar@gmail.com" ]
ghanimmukhtar@gmail.com
67022d884040ac0bea1e479ccba8c4beed336dff
0667af1539008f9c6c0dcde2d3f50e8bbccf97f3
/source/rttov_test/profile-datasets-py/div52_zen30deg/049.py
014b3e6b8fe6e2eda40e57e97cf86c2fc8d20d1e
[ "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause" ]
permissive
bucricket/projectMAScorrection
bc6b90f07c34bf3e922225b2c7bd680955f901ed
89489026c8e247ec7c364e537798e766331fe569
refs/heads/master
2021-01-22T03:54:21.557485
2019-03-10T01:47:32
2019-03-10T01:47:32
81,468,938
0
0
null
null
null
null
UTF-8
Python
false
false
8,426
py
""" Profile ../profile-datasets-py/div52_zen30deg/049.py file automaticaly created by prof_gen.py script """ self["ID"] = "../profile-datasets-py/div52_zen30deg/049.py" self["Q"] = numpy.array([ 1.607768, 4.908026, 6.733366, 6.729941, 5.497174, 6.103365, 6.266938, 6.73851 , 6.225587, 6.872485, 6.837179, 6.74052 , 6.169154, 6.034617, 5.829579, 5.663851, 5.53396 , 5.176779, 4.848233, 4.401532, 4.313137, 4.319633, 4.306272, 4.124386, 4.132007, 4.248602, 4.224276, 4.187893, 4.192073, 4.190996, 4.131251, 4.073452, 4.272927, 4.476245, 4.537179, 4.536375, 4.470891, 4.271882, 4.078291, 4.227251, 4.40695 , 4.554993, 4.668807, 4.78664 , 5.228308, 5.65959 , 5.42058 , 4.69921 , 4.09998 , 4.213713, 4.324987, 4.636491, 5.027322, 5.148917, 4.628259, 4.117955, 4.32809 , 4.6135 , 5.271461, 6.238979, 7.311918, 8.84027 , 10.3409 , 12.02073 , 13.69421 , 15.51443 , 17.41121 , 19.1762 , 20.74295 , 22.1121 , 22.55744 , 22.99555 , 22.99909 , 22.99909 , 25.63594 , 28.60045 , 37.32157 , 47.18073 , 68.09528 , 91.02165 , 124.3059 , 159.2714 , 209.2297 , 257.3883 , 302.4606 , 342.3991 , 378.7939 , 408.2828 , 428.29 , 438.2003 , 435.468 , 425.7007 , 402.7602 , 377.0934 , 354.2179 , 331.1122 , 287.748 , 105.4062 , 102.5456 , 99.79895 , 97.16031 ]) self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02, 7.69000000e-02, 1.37000000e-01, 2.24400000e-01, 3.45400000e-01, 5.06400000e-01, 7.14000000e-01, 9.75300000e-01, 1.29720000e+00, 1.68720000e+00, 2.15260000e+00, 2.70090000e+00, 3.33980000e+00, 4.07700000e+00, 4.92040000e+00, 5.87760000e+00, 6.95670000e+00, 8.16550000e+00, 9.51190000e+00, 1.10038000e+01, 1.26492000e+01, 1.44559000e+01, 1.64318000e+01, 1.85847000e+01, 2.09224000e+01, 2.34526000e+01, 2.61829000e+01, 2.91210000e+01, 3.22744000e+01, 3.56504000e+01, 3.92566000e+01, 4.31001000e+01, 4.71882000e+01, 5.15278000e+01, 5.61259000e+01, 6.09895000e+01, 6.61252000e+01, 7.15398000e+01, 7.72395000e+01, 8.32310000e+01, 8.95203000e+01, 9.61138000e+01, 1.03017000e+02, 1.10237000e+02, 1.17777000e+02, 1.25646000e+02, 1.33846000e+02, 1.42385000e+02, 1.51266000e+02, 1.60496000e+02, 1.70078000e+02, 1.80018000e+02, 1.90320000e+02, 2.00989000e+02, 2.12028000e+02, 2.23441000e+02, 2.35234000e+02, 2.47408000e+02, 2.59969000e+02, 2.72919000e+02, 2.86262000e+02, 3.00000000e+02, 3.14137000e+02, 3.28675000e+02, 3.43618000e+02, 3.58966000e+02, 3.74724000e+02, 3.90892000e+02, 4.07474000e+02, 4.24470000e+02, 4.41882000e+02, 4.59712000e+02, 4.77961000e+02, 4.96630000e+02, 5.15720000e+02, 5.35232000e+02, 5.55167000e+02, 5.75525000e+02, 5.96306000e+02, 6.17511000e+02, 6.39140000e+02, 6.61192000e+02, 6.83667000e+02, 7.06565000e+02, 7.29886000e+02, 7.53627000e+02, 7.77789000e+02, 8.02371000e+02, 8.27371000e+02, 8.52788000e+02, 8.78620000e+02, 9.04866000e+02, 9.31523000e+02, 9.58591000e+02, 9.86066000e+02, 1.01395000e+03, 1.04223000e+03, 1.07092000e+03, 1.10000000e+03]) self["CO2"] = numpy.array([ 402.6514, 402.65 , 402.6493, 402.6493, 402.6498, 402.6495, 402.6495, 402.6493, 402.6495, 402.6492, 402.6492, 402.6493, 402.6495, 402.6496, 402.6497, 402.6497, 402.6498, 402.6499, 402.65 , 402.6502, 402.6503, 402.6503, 402.6503, 402.6503, 402.6503, 402.6503, 402.6503, 402.6503, 402.6503, 402.6503, 402.6503, 402.6504, 402.6503, 402.6502, 402.6502, 402.6502, 402.6502, 402.6503, 402.6504, 402.6503, 402.6502, 402.6502, 402.8541, 403.0691, 403.2929, 403.5277, 403.7728, 404.0281, 404.2953, 404.5723, 404.8612, 405.1611, 405.473 , 405.7959, 406.1311, 406.4773, 406.8362, 407.2071, 407.5899, 407.9855, 408.393 , 408.8134, 409.2468, 409.6931, 410.1514, 410.6236, 411.1078, 411.6061, 412.1175, 412.6429, 412.6427, 412.6425, 412.6425, 412.6425, 412.6414, 412.6402, 412.6366, 412.6325, 412.6239, 412.6144, 412.6007, 412.5863, 412.5657, 412.5458, 412.5272, 412.5107, 412.4957, 412.4835, 412.4753, 412.4712, 412.4723, 412.4763, 412.4858, 412.4964, 412.5058, 412.5154, 412.5333, 412.6085, 412.6097, 412.6108, 412.6119]) self["T"] = numpy.array([ 191.9 , 194.971, 214.676, 232.668, 247.134, 257.929, 267.219, 275.26 , 269.608, 256.747, 238.008, 217.863, 204.403, 201.578, 202.417, 203.532, 204.792, 206.342, 207.787, 209.12 , 211.257, 212.987, 214.253, 214.548, 215.387, 216.528, 218.117, 219.678, 220.765, 221.726, 221.621, 221.519, 221.818, 222.124, 222.632, 223.218, 223.781, 224.315, 224.835, 225.588, 226.347, 226.808, 226.944, 227.102, 228.452, 229.77 , 229.975, 229.376, 228.928, 229.404, 229.869, 230.256, 230.606, 230.394, 228.827, 227.291, 227.133, 227.128, 227.256, 227.496, 227.656, 227.525, 227.397, 227.016, 226.613, 226.23 , 225.861, 225.593, 225.49 , 225.433, 225.612, 225.789, 226.285, 226.776, 227.483, 228.211, 228.928, 229.634, 230.414, 231.199, 232.261, 233.357, 234.722, 236.098, 237.526, 238.872, 240.145, 241.146, 241.906, 242.42 , 242.648, 242.721, 242.486, 242.136, 241.812, 241.448, 240.382, 232.582, 232.582, 232.582, 232.582]) self["O3"] = numpy.array([ 0.9018584 , 0.9253054 , 0.9724086 , 1.053735 , 1.221718 , 1.408538 , 1.628711 , 1.903526 , 2.572281 , 3.725894 , 5.2405 , 6.655694 , 7.27563 , 7.336641 , 7.260848 , 7.043845 , 6.794437 , 6.604772 , 6.506652 , 6.487888 , 6.510639 , 6.495311 , 6.443474 , 6.314456 , 6.117427 , 5.884136 , 5.641625 , 5.407327 , 5.205067 , 5.010887 , 4.836422 , 4.667606 , 4.427792 , 4.192354 , 3.894453 , 3.574684 , 3.246262 , 2.889858 , 2.543128 , 2.193935 , 1.85263 , 1.550604 , 1.290632 , 1.040684 , 0.9736954 , 0.9082754 , 0.8842645 , 0.8902574 , 0.8971674 , 0.9108957 , 0.9243223 , 0.9590142 , 1.002112 , 1.009824 , 0.9328668 , 0.8574364 , 0.7027789 , 0.5420919 , 0.4055246 , 0.2894571 , 0.1885779 , 0.1397144 , 0.09173698, 0.07123856, 0.05412134, 0.04552073, 0.04211557, 0.04043137, 0.04159171, 0.04313536, 0.04677356, 0.05035215, 0.05464051, 0.0588663 , 0.06264894, 0.06631882, 0.06924002, 0.07196378, 0.07383947, 0.07551883, 0.07640026, 0.07714695, 0.07727824, 0.0773451 , 0.07727043, 0.07718047, 0.07710588, 0.07735876, 0.07777884, 0.078181 , 0.07815527, 0.077355 , 0.07544401, 0.07282737, 0.07122564, 0.07165137, 0.07417858, 0.07730014, 0.07730036, 0.07730057, 0.07730078]) self["CTP"] = 500.0 self["CFRACTION"] = 0.0 self["IDG"] = 0 self["ISH"] = 0 self["ELEVATION"] = 0.0 self["S2M"]["T"] = 232.582 self["S2M"]["Q"] = 105.460586896 self["S2M"]["O"] = 0.077300136162 self["S2M"]["P"] = 1013.42 self["S2M"]["U"] = 1.91405 self["S2M"]["V"] = 3.93846 self["S2M"]["WFETC"] = 100000.0 self["SKIN"]["SURFTYPE"] = 0 self["SKIN"]["WATERTYPE"] = 1 self["SKIN"]["T"] = 228.51 self["SKIN"]["SALINITY"] = 35.0 self["SKIN"]["FOAM_FRACTION"] = 0.0 self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3]) self["ZENANGLE"] = 30.0 self["AZANGLE"] = 0.0 self["SUNZENANGLE"] = 0.0 self["SUNAZANGLE"] = 0.0 self["LATITUDE"] = 76.807 self["GAS_UNITS"] = 2 self["BE"] = 0.0 self["COSBK"] = 0.0 self["DATE"] = numpy.array([1993, 12, 15]) self["TIME"] = numpy.array([0, 0, 0])
[ "bucricket@gmail.com" ]
bucricket@gmail.com
c185feffbf83d2dc23be9133e83ddf70b12a619d
968104eadb9ee676a907ac97b6950f52c0186850
/clairvoyance/apps.py
9e8ce3925a4585d4461610597d93a3f59bf7ccc4
[]
no_license
CasacoVermelho/Tiresias
3193ceb66cc8c3f9f2631517d88506c42cd793d9
0a71278ebc2659d8c9c34272215c7a56c6cf4cee
refs/heads/master
2022-11-29T18:43:02.596161
2020-07-23T02:19:17
2020-07-23T02:19:17
236,080,355
0
0
null
2022-11-22T05:16:34
2020-01-24T20:44:43
JavaScript
UTF-8
Python
false
false
99
py
from django.apps import AppConfig class ClairvoyanceConfig(AppConfig): name = 'clairvoyance'
[ "alexandrebmjr@gmail.com" ]
alexandrebmjr@gmail.com
21b690382ed272ee0d80a05bc289a485d453263f
f98fbc26f8e68d1e35ac1317b8eaa1dffe61a3c6
/resources/item.py
2f72f7cd283cbc74583dbc9c9988d77a0236ffe7
[]
no_license
latfamt/stores-rest-api-test
cbac8b767abe2d7245d8c3b21c467941da8dfa4e
f5d510b06a5727eb43db0059f350a641f4b44f62
refs/heads/master
2020-03-10T00:01:16.259573
2018-04-11T11:29:28
2018-04-11T11:29:28
129,072,029
0
0
null
null
null
null
UTF-8
Python
false
false
1,865
py
from flask_restful import Resource, reqparse from flask_jwt import jwt_required from models.item import ItemModel # noinspection PyMethodMayBeStatic class Item(Resource): parser = reqparse.RequestParser() parser.add_argument('price', type=float, required=True, help="This field cannot be left blank!") parser.add_argument('store_id', type=int, required=True, help="Every item needs a store id.") @jwt_required() # нужен jwt в хэдере, чтоб получать корр. ответы на запросы def get(self, name): item = ItemModel.find_by_name(name) if item: return item.json() return {'message': 'Item not found'}, 404 # noinspection PyMethodMayBeStatic def post(self, name): if ItemModel.find_by_name(name): return {'message': "An item with name '{}' already exists.".format(name)}, 400 data = Item.parser.parse_args() item = ItemModel(name, **data) try: item.save_to_db() except: return {"message": "An error occurred inserting the item."}, 500 return item.json(), 201 def delete(self, name): item = ItemModel.find_by_name(name) if item: item.delete_from_db() return {'message': 'Item deleted'} def put(self, name): data = Item.parser.parse_args() item = ItemModel.find_by_name(name) if item is None: item = ItemModel(name, **data) else: item.price = data['price'] item.save_to_db() return item.json() class ItemList(Resource): def get(self): return {'items': [x.json() for x in ItemModel.query.all()]}
[ "spirina@bradburylab.com" ]
spirina@bradburylab.com
5e4c077b8c07fb81f995271c73fec4e20eae5052
ee3e0a69093e82deff1bddf607f6ce0dde372c48
/BOJ/Greedy/BOJ_1931.py
a9e899115d8fd8f195ea833774f17eecb63e34a7
[]
no_license
cndqjacndqja/algorithm_python
202f9990ea367629aecdd14304201eb6fa2aa37e
843269cdf8fb9d4c215c92a97fc2d007a8f96699
refs/heads/master
2023-06-24T08:12:29.639424
2021-07-24T05:08:46
2021-07-24T05:08:46
255,552,956
0
0
null
null
null
null
UTF-8
Python
false
false
302
py
n = int(input()) data = [] for _ in range(n): data.append(list(map(int, input().split()))) data.sort(key=lambda x: x[0]) data.sort(key=lambda x: x[1]) time = data[0][1] count = 1 for i in range(1, len(data)): if data[i][0] >= time: count += 1 time = data[i][1] print(count)
[ "cndqjacndqja@naver.com" ]
cndqjacndqja@naver.com
e58b7a43eb61bde822062cc463c22bece162affd
5ece3476103a28ead61ada84c9de11e18266a44c
/day19.py
d44bff46503ac2079730004441cff59472239c80
[]
no_license
BenjiKCF/Codewars
4950b134d9be1cec131ecfe3a2ac277dd35b7daf
7c5cd51ce0ba81a7013731bed29c2f849b5dd875
refs/heads/master
2021-01-21T08:02:28.923264
2018-10-09T08:16:17
2018-10-09T08:16:17
83,334,242
1
0
null
null
null
null
UTF-8
Python
false
false
1,304
py
s = "abcd\nefgh\nijkl\nmnop" print s + '\n' def diag_1_sym(s): splited = s.split('\n') matelem = len(splited) #4 elemlen = len(splited[0]) #4 diag = [''.join ([splited[i][j] for i in range(matelem)]) + '\n' for j in range(matelem)] last = diag[-1][:-1] diag.pop() diag.append(last) return''.join(diag) print diag_1_sym(s) print '\n' + '\n' def rot_90_clock(s): splited = s.split('\n') matelem = len(splited) #4 elemlen = len(splited[0]) #4 ndiag = ''.join([''.join ([splited[i][j] for i in range(matelem)]) + '\n' for j in range(matelem)]) nsplited = ndiag.split('\n') return '\n'.join([nsplited[i][::-1] for i in range(matelem)]) print rot_90_clock(s) print '\n' + '\n' def selfie_and_diag1(s): splited = s.split('\n') matelem = len(splited) elemlen = len(splited[0]) diag1 = diag_1_sym(s) diag2 = diag1.split('\n') return '\n'.join([splited[i] + '|' + diag2[i] for i in range(matelem)]) print selfie_and_diag1(s) def oper(fct, s): return fct(s) ############################ rot_90_clock = lambda s: zip(*s[::-1]) diag_1_sym = lambda s: zip(*s) selfie_and_diag1 = lambda s: (tuple(l + '|') + r for l, r in zip(s, diag_1_sym(s))) oper = lambda func, s: '\n'.join(map(''.join, func(s.split('\n'))))
[ "noreply@github.com" ]
BenjiKCF.noreply@github.com
e29a580539d21683bfcdc0617b8abd960098f9f3
7bf4d85808daf8ee3c36ea08ae989a24855cb900
/api/views/driver_views.py
42a3231ecfafc62f5c120893d8f5ffd10c35411a
[ "BSD-3-Clause" ]
permissive
newtonjain/hacktheplanet
4dc4d8c0ef9551c73ab7d2371fb3a83850216f70
c93f9afcc3fc435ee4646a294397120ed9a41f15
refs/heads/master
2016-09-05T10:43:08.881129
2015-09-25T06:24:12
2015-09-25T06:24:12
40,760,284
5
1
null
2015-09-16T23:38:59
2015-08-15T13:11:22
JavaScript
UTF-8
Python
false
false
1,052
py
from rest_framework.generics import ( RetrieveUpdateDestroyAPIView, ListCreateAPIView ) from django.http import Http404 from bmw.models import Driver from api.serializers import drivers class DriverListCreateView(ListCreateAPIView): serializer_class = drivers.DriverSerializer queryset = Driver.objects.all() def list(self, request, *args, **kwargs): return ListCreateAPIView.list(self, request, *args, **kwargs) def create(self, request, *args, **kwargs): return ListCreateAPIView.create(self, request, *args, **kwargs) class DriverDetailView(RetrieveUpdateDestroyAPIView): serializer_class = drivers.DriverSerializer queryset = Driver.objects.all() def get_object(self): facebook_id = int(self.kwargs.get('pk')) obj = Driver.objects.filter(facebook_id=facebook_id).first() if not obj: raise Http404 return obj def retrieve(self, request, *args, **kwargs): return RetrieveUpdateDestroyAPIView.retrieve(self, request, *args, **kwargs)
[ "moteygo@gmail.com" ]
moteygo@gmail.com
eca4a497a33c98b58d02ea6d3bfc5cf0f95e71b9
db6e64b04c0965e577034773b38a1039a264afbf
/source/schedule/migrations/0013_nbareddit_created.py
0bfb26b14cb289f74cc62fcdd2ba4db76a14d804
[]
no_license
suizo12/hs17-bkuehnis
264bdc53e7e8fb3da00a1ff7271318649b20ff66
7cab2071cd798072ef1bc0f934b97b3ed8ccc1d3
refs/heads/master
2023-01-05T21:47:12.623324
2017-12-31T16:58:43
2017-12-31T16:58:43
102,214,689
0
0
null
2022-12-27T14:57:59
2017-09-02T18:07:11
JavaScript
UTF-8
Python
false
false
478
py
# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2017-11-29 19:12 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('schedule', '0012_nbareddit'), ] operations = [ migrations.AddField( model_name='nbareddit', name='created', field=models.FloatField(default=0), preserve_default=False, ), ]
[ "benjaminkuehnis@Benjamins-MacBook-Pro.local" ]
benjaminkuehnis@Benjamins-MacBook-Pro.local
cf93fbd876708c4e349b036a5b95d404569516b0
ad5864ed68f520e0737915aee4bf721835374ca9
/products/models.py
3f96c4313d6e5496534f0506a289d15d631eb752
[]
no_license
birtasu/shop
076ba6a77a29d773e4f1d7013d83891fd65d2408
5d22b20781363e16b2e763dfb9c3a55b84086bab
refs/heads/master
2022-12-05T08:50:16.121544
2020-08-24T19:15:01
2020-08-24T19:15:01
290,011,965
0
0
null
null
null
null
UTF-8
Python
false
false
1,190
py
from django.db import models class Product(models.Model): name = models.CharField(max_length=64, blank=True, null=True, default=None) description = models.TextField(blank=True, null=True, default=None) price = models.DecimalField(max_digits=10, decimal_places=2, default=0) is_active = models.BooleanField(default=True) created = models.DateTimeField(auto_now_add=True, auto_now=False) updated = models.DateTimeField(auto_now_add=False, auto_now=True) def __str__(self): return "%s" % self.name class Meta: verbose_name = 'Товар' verbose_name_plural = 'Товары' class ProductImage(models.Model): product = models.ForeignKey(Product, blank=True, null=True, default=None, on_delete=models.CASCADE) image = models.ImageField(upload_to='products_images/') is_active = models.BooleanField(default=True) created = models.DateTimeField(auto_now_add=True, auto_now=False) updated = models.DateTimeField(auto_now_add=False, auto_now=True) def __str__(self): return "%s" % self.id class Meta: verbose_name = 'Фотографія' verbose_name_plural = 'Фотографії'
[ "birt.asu@gmail.com" ]
birt.asu@gmail.com
32c077badc2f7d9512e3a68206522eb2f284bfe4
50ffc2724864d30e3b658e3f811cac513c91769f
/oops.py
daae0365b8f57d2603afca0227d5fba9a7e59fd0
[]
no_license
sreekripa/core2
bd71486f3e4185dbcecdb830255d06d6cb7ee983
6eed9dae2ef0cb72309d5982e728649444ea1b10
refs/heads/master
2023-05-04T11:15:47.786904
2021-05-20T12:33:20
2021-05-20T12:33:20
369,200,852
0
0
null
null
null
null
UTF-8
Python
false
false
315
py
class student: def __init__(self,name,mark): self.name=name self.mark=mark def getdata(self): self.name=input("enter the name") self.mark=input("enter the mark") def putdata(self): print(self.name,"\n",self.mark) obj=student('','') obj.getdata() obj.putdata()
[ "kripas1990@gmail.com" ]
kripas1990@gmail.com
33db89fb0758dcdce2642c390cefa673200abdec
bd455e366e474682faf738b536f4c30f7dc28277
/rayvision_sync/tests/__init__.py
145df2799f231756a0a34e96e04e1042b1d3ce5e
[ "Apache-2.0" ]
permissive
renderbus/rayvision_sync
a21018bb279a82457ba4a976648e66bf93795e23
0ddc138f2eef62349385fde2222d1045cc4cd59c
refs/heads/master
2023-05-25T06:20:51.679338
2021-08-03T10:40:56
2021-08-03T10:40:56
221,392,908
1
1
Apache-2.0
2023-05-22T23:20:33
2019-11-13T06:55:49
Python
UTF-8
Python
false
false
37
py
# -*- coding: utf-8 -*- """Tests."""
[ "ding625yutao@163.com" ]
ding625yutao@163.com
4633fff12c40bec683d0fca712a2a69bd0bdda3e
53ec961695a5663f5b42dd6e5289b8d1dd7e35e1
/robber.py
49c9477181c9c6119024da2b983cacddb0c292e5
[]
no_license
fchikwekwe/InterviewPrep
a5ab4123142d1f46e3223348a31ad96d03fd6716
dce45d87dbc55fa2a55435d0feec9e907d622135
refs/heads/master
2020-04-13T19:52:51.022675
2019-07-24T18:13:17
2019-07-24T18:13:17
163,414,755
0
0
null
null
null
null
UTF-8
Python
false
false
2,159
py
""" The thief has found himself a new place for his thievery again. There is only one entrance to this area, called the "root." Besides the root, each house has one and only one parent house. After a tour, the smart thief realized that "all houses in this place forms a binary tree". It will automatically contact the police if two directly-linked houses were broken into on the same night. Determine the maximum amount of money the thief can rob tonight without alerting the police. Example 1: Input: [3,2,3,null,3,null,1] 3 / \ 2 3 \ \ 3 1 Output: 7 Explanation: Maximum amount of money the thief can rob = 3 + 3 + 1 = 7. Example 2: Input: [3,4,5,1,3,null,1] [8,9,5,1,3,null,1] 3 / \ 4 5 / \ \ 1 3 1 Output: 9 Explanation: Maximum amount of money the thief can rob = 4 + 5 = 9. """ # 2 * index + 1 to get left child # 2 * index + 2 to get right child # Pseudocode ''' Pseudocode - max_vals = [] - loop over all nodes and add to queue - for node in queue - create a robbed_houses set {} - traverse all other nodes (while there are nodes in tree to check) - check that node is neither parent nor child of any node in robbed houses if not: then add value and add node to robbed houses - add the total value to max vals, return the max of max_vals write a more complicated algo to keep vals in order ''' def max_robbery(tree): max_vals = [] for node_index, node in enumerate(tree): if node: # store houses robbed on that night w/ index in dict robbed_houses = {} robbed_houses[node_index] = node for i in range(len(tree)): for ind, node in robbed_houses: left_child = 2 * ind + 1 # index right_child = 2 * ind + 2 if ind == 1 or ind == 2: parent = 0 elif ind % 2 == 1: # odd parent = (ind - 1) / 2 else: # even parent = (ind - 2) / 2 if i == left_child or i == right_child and i != parent:
[ "faithchikwekwe01@gmail.com" ]
faithchikwekwe01@gmail.com
4724acadc22fed4801330a23b3851fd883c4a845
b565eb043002c89f4d6d667069e14ab28083b76e
/Working with MongoDB/range_queries.py
94b428e1ce8087356e562e0607b46a0a10f101be
[]
no_license
jreiher2003/Data-Wrangling-with-MongoDB
002a240e7a350d2cfc3514c89bc7f42c435fe5eb
95b46f30421dbfc11298aaafbca04829b58e1212
refs/heads/master
2021-01-10T22:05:47.907618
2015-05-23T20:19:34
2015-05-23T20:19:34
35,671,850
0
0
null
null
null
null
UTF-8
Python
false
false
791
py
from pymongo import MongoClient from pprint import pprint from datetime import datetime client = MongoClient("mongodb://localhost:27017") db = client.uda def find(): # returns pop between 250k and 500k # query = {"populationTotal": {"$gt": 250000, "$lte": 500000}} # returns all city names that start with the letter X # query = {"name": {"$gt": "X", "$lte": "Y"}} # returns all cities with founding date between these # query = {"foundingDate": {"$gt": datetime(1800, 1, 1), # "$lt": datetime(1937, 12, 31) # } # } query = {"country_label" : {"$ne": "United States"}} cities = db.cities.find(query) num_cities = 0 for c in cities: pprint(c) num_cities += 1 print "number of cities matching: %d\n" % num_cities if __name__ == "__main__": find()
[ "jeffreiher@gmail.com" ]
jeffreiher@gmail.com
229f9e9e7a5fcbf9280d61fe89ad3345a29a70b6
009e3b3bc22b2cb9d2b24740a97470691fc5c449
/AlexeiSorokin/views.py
14b0e7e151aa9541f073a735c0c791bb1aadc77e
[]
no_license
ivanm11/codeSamples
e925cec66119621ac6c59315c8dc859b74076790
40016f4a5a38408fbe433f9859b45ea25fb9f96a
refs/heads/master
2021-01-23T11:33:47.219705
2017-06-02T09:37:15
2017-06-02T09:37:15
93,149,950
0
0
null
null
null
null
UTF-8
Python
false
false
13,416
py
import datetime import time import io import csv from rest_framework import generics, permissions, status from rest_framework.authentication import SessionAuthentication from rest_framework_jwt.authentication import JSONWebTokenAuthentication from rest_framework_jwt.serializers import JSONWebTokenSerializer, RefreshJSONWebTokenSerializer from rest_framework_jwt.settings import api_settings from rest_framework.response import Response from rest_framework_jwt.views import JSONWebTokenAPIView from rest_framework_jwt.utils import jwt_decode_handler from accounts.api.utils.jwt import jwt_payload_handler from accounts.api.v1.serializers import UserFullSerializer from django.http import HttpResponse from django.core.servers.basehttp import FileWrapper from jwt.exceptions import ExpiredSignatureError from accounts.licensing.util import increment_limitable_feature_usage from accounts.models import CustomUser from accounts.licensing.models import ProductFeatureDailyCounter, LicensingWarning from accounts.licensing.util import get_usage_count_from_db, check_record_limit from logger import logger from . import serializers from django.contrib.auth.tokens import default_token_generator from django.utils.http import urlsafe_base64_decode from django.core.exceptions import ValidationError from accounts.api.utils import email_helper jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER jwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER class JSONWebTokenBase(object): """ Modified standard JWT methods. """ def get_request_data(self, request): if hasattr(request, 'data'): data = request.data elif hasattr(request, 'DATA'): data = request.DATA else: data = None return data class ObtainJSONWebToken(JSONWebTokenAPIView, JSONWebTokenBase): """ API View that receives a POST with a user's username and password. Returns a JSON Web Token that can be used for authenticated requests. """ serializer_class = JSONWebTokenSerializer def post(self, request): data = self.get_request_data(request) if data is None: return Response({'error': 'No data in response'}, status=status.HTTP_400_BAD_REQUEST) serializer = self.get_serializer( data=data ) if serializer.is_valid(): user = serializer.object.get('user') or request.user token = serializer.object.get('token') response_data = jwt_response_payload_handler(token, user, request) try: increment_limitable_feature_usage(user, 'SIGN_IN', used=1) except AttributeError: return Response({u'non_field_errors': [u'In order to log in, user should belong to Organization. Please contact support team.']}, status=status.HTTP_400_BAD_REQUEST) warnings = LicensingWarning.objects.filter(user=user, seen=False) for warning in warnings: warning.seen = True warning.save() return Response(response_data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class PasswordResetView(generics.GenericAPIView): authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication,) permission_classes = [ permissions.AllowAny ] serializer_class = serializers.ResetPasswordSerializer def put(self, request, *args, **kwargs): uidb64 = kwargs.get('uidb64') token = kwargs.get('token') try: uid = urlsafe_base64_decode(uidb64) user = CustomUser.objects.get(pk=uid) if user and default_token_generator.check_token(user, token): serializer = serializers.ResetPasswordSerializer(user) serializer.save(validated_data=request.data, instance=user) payload = jwt_payload_handler(user) return Response({'token': jwt_encode_handler(payload)}) else: return Response({'non_field_errors': 'Password reset has expired'}, status=status.HTTP_400_BAD_REQUEST) except (TypeError, ValueError, OverflowError) as e: logger.exception(e) return Response(status=status.HTTP_404_NOT_FOUND) class PasswordResetRequestView(generics.GenericAPIView): authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication,) permission_classes = [ permissions.AllowAny ] serializer_class = serializers.ResetPasswordRequestSerializer def post(self, request): serializer = self.get_serializer( data=request.data ) if serializer.is_valid(): serializer.save(validated_data=request.data) return Response(status=status.HTTP_200_OK) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class UserCreate(generics.CreateAPIView): """ POST /api/v1/account/token/register/ """ authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication,) permission_classes = [ permissions.AllowAny ] serializer_class = serializers.UserCreateSerializer class MaskBase(object): """ Base class for mask methods. """ def get_http_auth_token(self, request): if not 'HTTP_AUTHORIZATION' in request.META: raise ValidationError({'No HTTP_AUTHORIZATION in request.META'}) return request.META['HTTP_AUTHORIZATION'][4:] def encode_token(self, payload): payload = jwt_payload_handler(payload) return jwt_encode_handler(payload) def decode_token(self, token): return jwt_decode_handler(token) class RefreshJSONWebToken(JSONWebTokenAPIView, MaskBase, JSONWebTokenBase): """ GET /api/v1/account/token/refresh/ POST /api/v1/account/token/refresh/ """ serializer_class = RefreshJSONWebTokenSerializer def post(self, request): data = self.get_request_data(request) if data is None: return Response(data={'error': 'No data in response'}, status=status.HTTP_400_BAD_REQUEST) serializer = self.get_serializer( data=data ) if serializer.is_valid(): try: payload = self.decode_token(data['token']) except ExpiredSignatureError: return Response(data={'error': 'Expired token'}, status=status.HTTP_400_BAD_REQUEST) user = serializer.object.get('user') or request.user if 'is_masked' in payload: token = self.encode_token(payload) else: token = serializer.object.get('token') response_data = jwt_response_payload_handler(token, user, request) return Response(response_data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class MaskUser(generics.GenericAPIView, MaskBase): """ POST /api/v1/account/mask-user/ """ authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication) permission_classes = (permissions.IsAuthenticated, permissions.IsAdminUser) def post(self, request, format=None): if 'email' in request.data: email = request.data.get('email') if not CustomUser.objects.get(email=request.user.email).is_staff: return Response(status=status.HTTP_400_BAD_REQUEST, data={'error': 'Only admin users allowed'}) try: masked_user = CustomUser.objects.get(email=email) except CustomUser.DoesNotExist as e: return Response(status=status.HTTP_400_BAD_REQUEST, data={'error': 'No user with %s email' % email}) original_payload = self.decode_token(self.get_http_auth_token(request)) masked_payload = UserFullSerializer(masked_user).data if 'original_user' not in original_payload: masked_payload['original_user'] = original_payload['email'] else: masked_payload['original_user'] = original_payload['original_user'] masked_payload['is_masked'] = True masked_payload['is_staff'] = True masked_user_token = self.encode_token(masked_payload) return Response({'token': masked_user_token}) return Response(status=status.HTTP_400_BAD_REQUEST, data={'error': 'No user email in request'}) def get_queryset(self): return CustomUser.objects.all() class UnmaskUser(generics.GenericAPIView, MaskBase): """ POST /api/v1/account/unmask-user/ """ authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication) permission_classes = (permissions.IsAuthenticated,) def post(self, request, format=None): payload = self.decode_token(self.get_http_auth_token(request)) if 'original_user' in payload: payload['email'] = payload['original_user'] try: user = CustomUser.objects.get(email=payload['original_user']) except CustomUser.DoesNotExist as e: logger.exception(e) return Response(status=status.HTTP_400_BAD_REQUEST, data={'error': 'No user with %s email' % payload['original_user']}) token = self.encode_token(user) del payload['original_user'] del payload['is_masked'] return Response({'token': token}) return Response(status=status.HTTP_400_BAD_REQUEST, data={'error': 'No original_user in payload'}) class UserListView(generics.ListAPIView, MaskBase): """ GET /api/v1/account/users/ """ authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication) permission_classes = (permissions.IsAuthenticated,) serializer_class = UserFullSerializer def get_queryset(self): return CustomUser.objects.filter(organization_id=self.request.user.organization_id).select_related( 'organization') class UserRetrieveUpdateAPIView(generics.RetrieveUpdateAPIView): """ PUT /api/v1/account/users/<user_id>/ """ authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication) permission_classes = (permissions.IsAuthenticated,) serializer_class = serializers.UserFullSerializer def get_queryset(self): return CustomUser.objects.filter(organization_id=self.request.user.organization_id) def reports_beta(request): def _get_product_last_used(organization, product_name): pfdc = ProductFeatureDailyCounter.objects.filter(organization=organization, feature__product__name=product_name)\ .exclude(counter=0).order_by('date').last() if not pfdc: return None return pfdc.date.strftime("%Y-%m-%d %H:%M:%S") if request.GET.get('beta') != 'false': users = CustomUser.objects.filter(pk__gt=0, organization__is_beta=True) else: users = CustomUser.objects.filter(pk__gt=0) headers = [ u'account_id', u'username', u'last_login', u'logins_count', u'dc_last_used', u'dc_run_match_count', u'uu_last_used', u'uu_run_match_count', u'w2l_last_used', u'w2l_submissions' ] today = datetime.datetime.now().date() week_delta = datetime.timedelta(days=7) range_start = today - week_delta range_end = today with io.BytesIO() as f: csv_writer = csv.DictWriter(f, headers) csv_writer.writeheader() for user in users: user_dict = { 'account_id': user.organization.rl_salesforce_account_id, 'username': user.email, 'last_login': user.last_client_login.strftime("%Y-%m-%d %H:%M:%S") if user.last_client_login else '', 'logins_count': get_usage_count_from_db(user.organization, 'GENERAL', 'SIGN_IN', range_start, range_end), 'dc_last_used': _get_product_last_used(user.organization, 'DC'), 'uu_last_used': _get_product_last_used(user.organization, 'UU'), 'dc_run_match_count': get_usage_count_from_db(user.organization, 'DC', 'RUN_MATCH', range_start, range_end), 'uu_run_match_count': get_usage_count_from_db(user.organization, 'UU', 'RUN_MATCH', range_start, range_end), 'w2l_last_used': _get_product_last_used(user.organization, 'W2L'), 'w2l_submissions': 0 } print '=' * 50 print u'User {}'.format(user.email) print user_dict print '=' * 50 csv_writer.writerow(user_dict) f.seek(0) wrapper = FileWrapper(f) response = HttpResponse(wrapper, content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=reports_beta_'+time.strftime("%Y%m%d-%H%M%S")+'.csv' return response
[ "ivan@coretech.io" ]
ivan@coretech.io
9849284726b176e10ef02919e1e5ca3f027e480b
9fb7a51b32282541435111de70ef905a77463852
/string/14_longest_common_prefix.py
9bec8bda7a409a1c5094a40b6fea24ff00488db8
[]
no_license
dlin94/leetcode
874b9213e72e7baa8b75c8de8bc24afb253f3fbd
8e0ce1c4321125dc67813c2543dd7e21eefe38b1
refs/heads/master
2021-01-18T22:23:58.744203
2017-04-05T19:59:08
2017-04-05T19:59:08
87,051,753
0
0
null
null
null
null
UTF-8
Python
false
false
723
py
class Solution(object): def longestCommonPrefix(self, strs): """ :type strs: List[str] :rtype: str """ if strs == []: return "" min = None for string in strs: if min == None: min = len(string) else: if min > len(string): min = len(string) prefix = "" for i in range(0, min): for string in strs: if len(prefix) == i: prefix += string[i] else: if prefix[i] != string[i]: prefix = prefix[:-1] return prefix return prefix
[ "devin.c.lin.94@gmail.com" ]
devin.c.lin.94@gmail.com
f88b58cb39682671a044c1c50e1980bf7b0a26a3
acc4b52e9eb1012fba5e90de36ca29c1077e1df1
/HackerRank_Puzzles/collections-counter.py
db30732e2dfe073e9c2d829c2ae83e34a671ea57
[]
no_license
catomania/Random-late-night-time-wasters
41182d7d19a97fcc453915b45daf71fef1dc410e
7a0d83cca294b0c7232b9999f10773ffca45a760
refs/heads/master
2021-01-17T14:09:37.180173
2016-03-07T06:54:25
2016-03-07T06:54:25
41,021,346
0
0
null
2015-08-23T07:18:08
2015-08-19T08:04:53
Python
UTF-8
Python
false
false
709
py
# https://www.hackerrank.com/challenges/collections-counter # where can I use Counter? from collections import Counter number_of_shoes = int(raw_input()) shoe_sizes = list(map(int, raw_input().split(" "))) customers = int(raw_input()) money_earned = [] for x in range(0, customers): size, price = map(int, raw_input().split(" ")) # check to see if the size is available in our inventory if size in shoe_sizes: shoe_sizes.remove(size) # if yes, then remove it from our inventory records # and add it to our profit money_earned.append(price) else: pass # print out money earned print sum(money_earned) #print Counter(money_earned) # I don't really see where I need to use the Counter?
[ "catomania@users.noreply.github.com" ]
catomania@users.noreply.github.com
624701b78801d3a93b864bc83b98ba77a6f100c5
1a669b7b1311a44b168f4a43dfe67963e017d181
/Simon_discuss_01/Simon_discuss_01_1.py
ab84e1a05d03335552b5b1e921fa8a5477f42a0d
[]
no_license
JumiHsu/HeadFirst_python
4a52433000b2ca2292742a0902ad65b51e6a4cbe
110ce5f639f23ed525f41500f33890d354bbcac3
refs/heads/master
2020-04-01T18:26:05.011410
2019-05-31T10:23:58
2019-05-31T10:24:04
153,492,193
0
0
null
null
null
null
UTF-8
Python
false
false
2,776
py
import random import math A=[] B=[] # #先隨機抽一個N N落在[1,100000]內,令N1=100000 # #A的長度在100000以內 # #A的每個元素會落在[0,10000]內,令N2=10000 # # ==================================================================== # # 想辦法生出一個長度隨機<N1,元素值隨機但必<N2 的A向量 # # ==================================================================== # N1=8 #100000 # N2=5 #10000 # print("長度N1=",N1,",元素值上限N2=",N2) # print("\n") # #不知道怎麼宣告一個空的int變數,先令他=0 # fn=0 # k=0 # #令一個矩陣,while下雙條件且 # while k<N1: # #N3必定<N2 # N3=random.sample(range(1,N2),1) # N3=int(N3[0]) # print("第",k+1,"個元素=向量位置k=",k,",第k個位置的值=N3=",N3) # A.append(N3) # fn += 2**A[k] # k += 1 # print("A向量=",A) # print("binarianA= fn=",fn) # ==================================================================== # 接下來要用fn來回推B # ==================================================================== # 令一個固定的A向量作為測試對象 A = [1,0,2,0,0,2] print(A) # 計算出該 A向量 的fn是多少 n = len(A) fn = 0 sum=0 while sum<n: fn += 2**A[sum] sum += 1 print("A向量=",A,",A向量長度=",n,";對應的fn=",fn) print("\n") bn=0 sumb=0 while bn<fn: #對fn取log2,取完的值取整,放入B[0] print("fn =",fn) B.append( int(math.log(fn,2)) ) #先把取log2後取整的值放入B[0] bn += 2**B[sumb] #再順手計算 bn,此時sumb=0 sumb += 1 #計數+1 print("B[0] =",B[0]) print("\n") #對fn-2**B[0]再取log2,一樣取完的值取整,放入B[1] print("fn-2**B[0] =",fn-2**B[0]) B.append( int(math.log(fn-2**B[0],2)) ) print("B[1] =",B[1]) print("\n") #對fn-2**B[1]-2**B[0]再取log2…重複以上循環 print("fn-2**B[1]-2**B[0] =",fn-2**B[1]-2**B[0]) B.append( int(math.log(fn-2**B[1]-2**B[0],2)) ) print("B[2] =",B[2]) print("\n") #直到 fn-sum(2**B[i])=0,此循環停止 # 計算此時 B向量 的fn是多少 n_b = len(B) fn_b = 0 sum_b=0 while sum_b<n_b: fn_b += 2**B[sum_b sum_b += 1 '''把生成的B,其長度len(B)存進整數值b,並將向量B存成B ''' '''遞迴做這件事,每次做,都拿len(B)都跟前一次的b做比較 如果新的比較小 就把小的再令給b,並且把新的B向量替換掉舊的B向量 遞迴N1次 ''' ''' g=type(A[1]) print(g) B=random.sample(A,len(A)-2) print(B) #前面產生了浮點數所以fn變成了浮點數XD fn=2**A[0]+2**A[1]+2**A[2]+2**A[3]+2**A[4]+2**A[5] print(fn) '''
[ "scm80507211@gmail.com" ]
scm80507211@gmail.com
576542d6ea686ab041697d184c8f9e89051dccf3
237d8320c3222b53f1a101d9ee1519626b90359c
/run.py
4a2145e43472222aed958cec46485aff15714140
[]
no_license
berni-pl/test10
d29d445676611a06f301b146895e4f5b3d906fac
cef2c0fe70239100a2b41347571cba4690487c2c
refs/heads/master
2022-12-27T00:40:14.900751
2020-10-08T08:46:50
2020-10-08T08:46:50
302,276,558
0
0
null
null
null
null
UTF-8
Python
false
false
119
py
from app import app from db import db db.init_app(app) @app.before_first_request def create_tables(): db.create_all()
[ "bernard.wojcik@orange.com" ]
bernard.wojcik@orange.com
2e39a57194aa25779d2d48153df0c71e8707d7a7
e9d6619469178eb01e9c3c21c3551496465bbf52
/postgres_tutorial/update_vendor.py
245ea3c811bde05080240f5edeec930586713461
[]
no_license
jojoprison/OSP_parser
f4787b91979bf83c0b907d1ce9963c8da07e3f1a
671ee942e238f2f1bc75a6da1c8ffa26dd102085
refs/heads/master
2023-02-23T16:13:03.353300
2020-08-04T08:46:35
2020-08-04T08:46:35
282,286,184
0
0
null
null
null
null
UTF-8
Python
false
false
800
py
import psycopg2 from postgres_tutorial import config def update_vendor(vendor_id, vendor_name): """ update vendor name based on the vendor id """ sql = """ UPDATE vendors SET vendor_name = %s WHERE vendor_id = %s """ conn = None updated_rows = 0 try: params = config.config() conn = psycopg2.connect(**params) cur = conn.cursor() cur.execute(sql, (vendor_name, vendor_id)) updated_rows = cur.rowcount conn.commit() cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() # count of rows return updated_rows if __name__ == '__main__': print(update_vendor(5, "UPDATED VENDOR"))
[ "egyabig2@gmail.com" ]
egyabig2@gmail.com
d7df173677d16a737e72b21039c5e416a2a8aac4
f1614f3531701a29a33d90c31ab9dd6211c60c6b
/test/menu_sun_integration/handlers/test_dequeue_order_brf_handler.py
b0b9d80b5b6946e5e623a62ea39f57dd5a4f585e
[]
no_license
pfpacheco/menu-sun-api
8a1e11543b65db91d606b2f3098847e3cc5f2092
9bf2885f219b8f75d39e26fd61bebcaddcd2528b
refs/heads/master
2022-12-29T13:59:11.644409
2020-10-16T03:41:54
2020-10-16T03:41:54
304,511,679
0
0
null
null
null
null
UTF-8
Python
false
false
4,207
py
import pytest import responses import json import os from mock import patch from menu_sun_api.application.order_service import OrderService from menu_sun_api.domain.model.order.order import OrderStatusType from menu_sun_api.domain.model.order.order_repository import OrderRepository from menu_sun_api.domain.model.seller.seller import IntegrationType from menu_sun_integration.application.services.order_integration_service import OrderIntegrationService from menu_sun_integration.infrastructure.aws.sqs.order_sqs_queue import OrderSQSQueue from test.menu_sun_api.db.customer_factory import CustomerFactory, CustomerMetafield from test.menu_sun_api.db.order_factory import OrderFactory, OrderStatusFactory from test.menu_sun_api.db.seller_factory import SellerFactory from test.menu_sun_api.integration_test import IntegrationTest from test.menu_sun_integration.infrastructure.aws.sqs.mocks.order_queue_brf_mock import mock_queue_make_api_call from datetime import datetime here = os.path.dirname(os.path.realpath(__file__)) class TestDequeueOrderService(IntegrationTest): @responses.activate @patch('botocore.client.BaseClient._make_api_call', new=mock_queue_make_api_call) def test_dequeue_order_from_aws_sqs_to_brf(self, session): seller = SellerFactory.create(id=21, integration_type=IntegrationType.BRF) session.commit() payment_code = CustomerMetafield(key="payment_code", value="007", namespace="Pagamento com 07 dias com Boleto Bancário") grade = CustomerMetafield(key="grade", value="Seg,Qua,Sex,", namespace="grade") customer = CustomerFactory.create(seller_id=seller.id, cep='09185030', uf='SP', document='00005234000121') customer.change_metafield(payment_code) customer.change_metafield(grade) json_file = open( os.path.join( here, '../infrastructure/brf/brf_response/get_customer_response.json')) response = json.load(json_file) responses.add(responses.GET, f'https://{os.getenv("BRF_API_URL")}/clients/v1/Client/?document={customer.document}' f'&CEP={customer.cep}', json=response, status=200) order = OrderFactory.create(seller_id=seller.id, customer=customer, integration_date=None, delivery_date=datetime.utcnow().isoformat(), statuses=[OrderStatusFactory(status=OrderStatusType.NEW), OrderStatusFactory(status=OrderStatusType.APPROVED)]) session.commit() json_file = open( os.path.join( here, '../infrastructure/brf/brf_response/send_order_response.json')) response = json.load(json_file) responses.add(responses.POST, 'https://{}/orders/v1/Order' .format(os.getenv("BRF_API_URL")), json=response, status=200) order_sqs_queue = OrderSQSQueue(url="https://sqs.us-west-2.amazonaws.com/976847220645/order-queue") domain_repository = OrderRepository() domain_service = OrderService(repository=domain_repository) integration_service = OrderIntegrationService(session, platform_service=order_sqs_queue, order_service=domain_service) integration_service.post_orders_to_seller() result = domain_service.get_order(seller.id, order.order_id) assert result.value.integration_date is not None assert result.value.commissioned assert result.value.metafields[0].namespace == "COMMISSION_ATTRIBUTES" assert result.value.metafields[0].key == "CUSTOMER_STATUS" assert result.value.metafields[0].value == "ALREADY_REGISTERED" assert result.value.metafields[1].namespace == "COMMISSION_ATTRIBUTES" assert result.value.metafields[1].key == "LAST_ORDERED_DATE" assert result.value.metafields[1].value == "20190105"
[ "pfpacheco@gmail.com" ]
pfpacheco@gmail.com
72f34ff1c55d46a4f0a287f128cdd5471cfef0eb
9b59c5ce1b57b8bd066fcee4b55c821893bc50fb
/balloon_learning_environment/env/gym.py
11a1d2a8f067924755b1bb004f5652117e69edcd
[ "Apache-2.0" ]
permissive
google/balloon-learning-environment
b485c62bab04ce8308ed8de3358d4303e601cf18
72082feccf404e5bf946e513e4f6c0ae8fb279ad
refs/heads/master
2023-08-31T04:41:02.819901
2022-12-19T17:14:38
2022-12-19T17:18:57
418,619,484
108
14
Apache-2.0
2023-08-16T23:21:29
2021-10-18T18:20:09
Python
UTF-8
Python
false
false
1,787
py
# coding=utf-8 # Copyright 2022 The Balloon Learning Environment Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Balloon Learning Environment gym utilities.""" import contextlib def register_env() -> None: """Register the Gym environment.""" # We need to import Gym's registration module inline or else we'll # get a circular dependency that will result in an error when importing gym from gym.envs import registration # pylint: disable=g-import-not-at-top env_id = 'BalloonLearningEnvironment-v0' env_entry_point = 'balloon_learning_environment.env.balloon_env:BalloonEnv' # We guard registration by checking if our env is already registered # This is necesarry because the plugin system will load our module # which also calls this function. If multiple `register()` calls are # made this will result in a warning to the user. registered = env_id in registration.registry.env_specs if not registered: with contextlib.ExitStack() as stack: # This is a workaround for Gym 0.21 which didn't support # registering into the root namespace with the plugin system. if hasattr(registration, 'namespace'): stack.enter_context(registration.namespace(None)) registration.register(id=env_id, entry_point=env_entry_point)
[ "joshgreaves@google.com" ]
joshgreaves@google.com
1d5e2354f5745da2b2c4a57982568842e198e1bc
f9bbee3557056875b323369e9f8408bf173274de
/top/api/rest/OpenimChatlogsGetRequest.py
45d22536235d895066739c3d40d7d49e760346c7
[ "MIT" ]
permissive
Akagi201/pycsc
9655ff334c0911c94027adc7dcfc4eb7b79286b6
fb3fb2613e65604a38761c866f910db335298f80
refs/heads/master
2016-09-06T17:02:32.368648
2015-09-18T08:36:32
2015-09-18T08:36:32
42,708,583
1
0
null
null
null
null
UTF-8
Python
false
false
409
py
''' Created by auto_sdk on 2015.09.11 ''' from top.api.base import RestApi class OpenimChatlogsGetRequest(RestApi): def __init__(self,domain='gw.api.taobao.com',port=80): RestApi.__init__(self,domain, port) self.begin = None self.count = None self.end = None self.next_key = None self.user1 = None self.user2 = None def getapiname(self): return 'taobao.openim.chatlogs.get'
[ "akagi201@gmail.com" ]
akagi201@gmail.com
518a2ae9072026482b6974fb55a3ad79a336c96e
42017610bcda1e52f65a0ad922281b4ac186e817
/account/mixins.py
e3ad9cb030f44a9328e4c2f0774b0a092775b5b7
[]
no_license
AmirRezaNasir/juylamish
04a9dfad7139acbdde93668b77ee4852c1d9e20e
2e3b24446904c5d0c7511cf528db7887be980859
refs/heads/master
2023-02-25T07:35:46.619449
2021-02-02T07:31:26
2021-02-02T07:31:26
335,049,307
1
0
null
null
null
null
UTF-8
Python
false
false
1,583
py
from django.http import Http404 from django.shortcuts import get_object_or_404, redirect from blog.models import Article class FieldsMixin(): def dispatch(self, request, *args, **kwargs): self.fields = [ "title", "slug", "category", "description", "thumbnail", "publish", "is_special", "status", ] if request.user.is_superuser: self.fields.append("author") return super().dispatch(request, *args, **kwargs) class FormValidMixin(): def form_valid(self, form): if self.request.user.is_superuser: form.save() else: self.obj = form.save(commit=False) self.obj.author = self.request.user if not self.obj.status == 'i': self.obj.status = 'd' return super().form_valid(form) class AuthorAccessMixin(): def dispatch(self, request, pk, *args, **kwargs): article = get_object_or_404(Article, pk=pk) if article.author == request.user and article.status in ['b', 'd'] or\ request.user.is_superuser: return super().dispatch(request, *args, **kwargs) else: raise Http404("You can't see this page.") class AuthorsAccessMixin(): def dispatch(self, request, *args, **kwargs): if request.user.is_authenticated: if request.user.is_superuser or request.user.is_author: return super().dispatch(request, *args, **kwargs) else: return redirect("account:profile") else: return redirect("account:login") class SuperUserAccessMixin(): def dispatch(self, request, *args, **kwargs): if request.user.is_superuser: return super().dispatch(request, *args, **kwargs) else: raise Http404("You can't see this page.")
[ "AmirRezaNasiri830@gmail.com" ]
AmirRezaNasiri830@gmail.com
2a36feee1ef45f6fc4a9241269d207a45aaf7b8e
56c5c7ac9ac16a22d540ffe5dad5288f063dd7c2
/run_raster2file.py
3257ff07092e4141b8c4959084f8b6c131f99257
[]
no_license
multimap-geoservice/mapscript_publisher
5566523c989f788f19133d7e4715b15b41596868
bc8c0b632cf48788bfedd9f0f375eba882b97813
refs/heads/master
2021-07-04T11:41:10.095827
2019-10-03T12:33:56
2019-10-03T12:33:56
173,811,037
1
0
null
null
null
null
UTF-8
Python
false
false
753
py
# -*- coding: utf-8 -*- # encoding: utf-8 import os, sys from map_pub import BuildMapRes, PubMapWEB if __name__ == "__main__": """ script_name db_host """ mapjsonfile = "./maps/raster2file.json" db_host = sys.argv[1] debug_path = "{}/GIS/mapserver/debug".format(os.environ["HOME"]) # build map builder = BuildMapRes() builder.load4file(mapjsonfile) #builder.debug = True builder.debug = '{}/build.log'.format(debug_path) builder.mapjson["VARS"]["db_host"] = db_host builder.build() # run web pubmap = PubMapWEB(builder.mapdict, port=3008) pubmap.debug_json_file(debug_path) pubmap.debug_python_mapscript(debug_path) pubmap.debug_map_file(debug_path) pubmap.wsgi()
[ "old_bay@mail.ru" ]
old_bay@mail.ru
93e50eefc2234eec196602851b35aeefcc71aaf7
0839b96b6f5642deb307430838528886d39a216d
/lightbbs/models/role.py
8523442dd3fbeddacd8d15051d3845900dd285ff
[]
no_license
duwen135/lightbbs
e76636f44e437e45be82ccc66f6b441716b644df
798d846b57b8470ec2f6702abf9683a3fece22f2
refs/heads/master
2022-10-01T16:43:56.073984
2019-12-20T15:23:13
2019-12-20T15:23:13
120,587,207
1
0
null
2022-09-16T17:45:48
2018-02-07T08:38:04
HTML
UTF-8
Python
false
false
1,395
py
# -*- coding:utf-8 -*- __author__ = 'duwen' from lightbbs import db class Permission: FOLLOW = 0x01 COMMENT = 0x02 WRITE_ARTICLES = 0x04 MODERATE_COMMENTS = 0x08 ADMINISTER = 0x80 class Role(db.Model): __tablename__ = 'lb_roles' id = db.Column(db.Integer, primary_key=True) role_name = db.Column(db.String(64), unique=True) default = db.Column(db.Boolean, default=False, index=True) permissions = db.Column(db.Integer) user_num = db.Column(db.Integer) users = db.relationship('User', backref='role', lazy='dynamic') @staticmethod def insert_roles(): roles = { 'User': (Permission.FOLLOW | Permission.COMMENT | Permission.WRITE_ARTICLES, True), 'Moderator': (Permission.FOLLOW | Permission.COMMENT | Permission.WRITE_ARTICLES | Permission.MODERATE_COMMENTS, False), 'Administrator': (0xff, False) } for r in roles: role = Role.query.filter_by(role_name=r).first() if role is None: role = Role(role_name=r) role.permissions = roles[r][0] role.default = roles[r][1] db.session.add(role) db.session.commit() def __repr__(self): return '<Role %r>' % self.name
[ "duwen135@163.com" ]
duwen135@163.com
5539adec8f877ecb9ac9cc5c4e3ad33124241453
679b58480bd1907f179233b10684ca4ce963d689
/Advanced Algorithms and Complexity/Programming-Assignment-2/diet/diet.py
7e0e535b49bcba815b7491e516bf4a28a0a76f80
[]
no_license
romandadkov/Data-Structures-and-Algorithms
1eaca22a67c4b0a306123f8b92d8a08f761ea59b
a5cae8bd765a404be270403ba08462e2b16988a3
refs/heads/main
2023-06-04T02:11:05.303454
2021-06-27T09:36:54
2021-06-27T09:36:54
355,097,636
0
0
null
null
null
null
UTF-8
Python
false
false
3,328
py
# python3 from sys import stdin import itertools import copy EPS = 1e-18 PRECISION = 18 class Position: def __init__(self, row, col): self.row = row self.col = col def SelectPivotElement(pivot, a, used_rows): while pivot.row < len(a) and (used_rows[pivot.row] or a[pivot.row][pivot.col] == 0): pivot.row += 1 if pivot.row == len(a): return False else: return pivot # swap row to top of non-pivot rows def SwapLines(a, b, used_rows, pivot): a[pivot.col], a[pivot.row] = a[pivot.row], a[pivot.col] b[pivot.col], b[pivot.row] = b[pivot.row], b[pivot.col] used_rows[pivot.col], used_rows[pivot.row] = used_rows[pivot.row], used_rows[pivot.col] pivot.row = pivot.col def ProcessPivotElement(a, b, pivot, used_rows): scale = a[pivot.row][pivot.col] if scale != 1: for i in range(len(a)): a[pivot.row][i] /= scale b[pivot.row] /= scale for i in range(len(a)): if i != pivot.row: multiple = a[i][pivot.col] for j in range(len(a)): a[i][j] -= a[pivot.row][j] * multiple b[i] -= b[pivot.row] * multiple used_rows[pivot.row] = True def FindSubsets(n, m): lst = list(range(n + m + 1)) subsets = list(map(set, itertools.combinations(lst, m))) return subsets def GaussianElimination(subset, A, B): # make equation a = [] b = [] for i in subset: a.append(copy.deepcopy(A[i])) b.append(copy.deepcopy(B[i])) # solve equation size = len(a) used_rows = [False] * size for i in range(size): pivot = Position(0, i) pivot = SelectPivotElement(pivot, a, used_rows) if not pivot: return None else: SwapLines(a, b, used_rows, pivot) ProcessPivotElement(a, b, pivot, used_rows) return b def CheckSolution(solution, A, B, m): for i in range(len(A)): sum = 0 for j in range(m): sum += A[i][j] * solution[j] if sum - B[i] > 0.00001: return False return True def solve_diet_problem(n, m, A, B, c): for i in range(m): lst = [0] * m lst[i] = -1 A.append(lst) B.append(0) A.append([1] * m) B.append(1000000001) subsets = FindSubsets(n, m) solutions = [] for subset in subsets: solution = GaussianElimination(subset, A, B) if solution is not None: if CheckSolution(solution, A, B, m): solutions.append(solution) if len(solutions) == 0: return [-1, [0] * m] else: best = float('-inf') result = None for s in solutions: p = 0 for i in range(m): p += c[i] * s[i] if p > best: best = p result = s temp = 0 for e in result: temp += e if temp > 1000000000: return [1, [0] * m] else: return [0, result] n, m = list(map(int, stdin.readline().split())) A = [] for i in range(n): A += [list(map(int, stdin.readline().split()))] b = list(map(int, stdin.readline().split())) c = list(map(int, stdin.readline().split())) anst, ansx = solve_diet_problem(n, m, A, b, c) if anst == -1: print("No solution") if anst == 0: print("Bounded solution") print(' '.join(list(map(lambda x : '%.18f' % x, ansx)))) if anst == 1: print("Infinity")
[ "r.dadkov@gmail.com" ]
r.dadkov@gmail.com
6559b442e05fe8a5d03fa1a64d5212a1da67aaa6
467f9e8d2181c6cfba59afc4c596e328fa1ddbbc
/api/account_serializers.py
e444315e61ac2edc45a134b47cb87b72041bfb11
[]
no_license
jphalis/oby
40af213349d7d68e748d34b25c14653876874110
20d4b43f7918c98044f82e7bdb9c34dcc10a7994
refs/heads/master
2021-05-21T11:13:58.612908
2016-06-15T22:05:20
2016-06-15T22:05:20
36,603,721
0
0
null
null
null
null
UTF-8
Python
false
false
3,560
py
from rest_framework import serializers from rest_framework.reverse import reverse as api_reverse from accounts.models import Advertiser, Follower, MyUser from photos.models import Photo from .photo_serializers import PhotoSerializer class FollowerCreateSerializer(serializers.ModelSerializer): user = serializers.CharField(source='user.username', read_only=True) class Meta: model = Follower fields = ('user', 'followers',) class FollowerSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Follower fields = ('get_followers_count', 'get_following_count', 'get_followers_info', 'get_following_info',) class AccountCreateSerializer(serializers.ModelSerializer): class Meta: model = MyUser fields = ('id', 'username', 'email', 'password',) extra_kwargs = {'password': {'write_only': True}} def create(self, validated_data): user = MyUser.objects.create( username=validated_data['username'], email=validated_data['email'] ) user.set_password(validated_data['password']) user.save() return user class MyUserSerializer(serializers.HyperlinkedModelSerializer): account_url = serializers.SerializerMethodField() follower = FollowerSerializer(read_only=True) photo_set = serializers.SerializerMethodField() username = serializers.CharField(read_only=True) is_active = serializers.BooleanField(read_only=True) is_admin = serializers.BooleanField(read_only=True) is_verified = serializers.BooleanField(read_only=True) follow_url = serializers.SerializerMethodField() is_advertiser = serializers.SerializerMethodField() class Meta: model = MyUser fields = ('id', 'account_url', 'username', 'email', 'full_name', 'bio', 'website', 'edu_email', 'gender', 'photo_set', 'profile_picture', 'follow_url', 'follower', 'is_active', 'is_admin', 'is_verified', 'date_joined', 'modified', 'is_advertiser',) def get_account_url(self, obj): request = self.context['request'] kwargs = {'username': obj.username} return api_reverse('user_account_detail_api', kwargs=kwargs, request=request) def get_follow_url(self, obj): request = self.context['request'] kwargs = {'user_pk': obj.pk} return api_reverse('follow_create_api', kwargs=kwargs, request=request) def get_photo_set(self, request): queryset = Photo.objects.own(request.pk) serializer = PhotoSerializer(queryset, context=self.context, many=True, read_only=True) return serializer.data def validate_edu_email(self, value): if value: value = value.lower() username, domain = value.split('@') if not domain.endswith('.edu'): raise serializers.ValidationError( "Please use a valid university email.") # if domain not in APPROVED_UNIVERSITIES: # raise serializers.ValidationError( # "Sorry, this university isn't registered with us yet. " # "Email us to get it signed up! universities@obystudio.com") return value else: pass def get_is_advertiser(self, request): return Advertiser.objects.filter( user__username=request.username, is_active=True).exists()
[ "jphalisnj@gmail.com" ]
jphalisnj@gmail.com
d9837f06f45adda0950e7aa3239b9d6903b7d8cb
48dffe77dd08813fe644d5130ff49a5a8b3d2bb6
/venv/Scripts/futurize-script.py
4353467a399ddd5449b17e602580add419bc19ba
[]
no_license
mattdickers/MFD
e62b5ea0db1e1e10c451a850bc378d977bf4d097
374b0bd07ae28ca64d7909f29a0309abac54de2f
refs/heads/master
2022-02-08T08:51:54.845939
2019-08-15T16:51:21
2019-08-15T16:51:21
188,462,548
0
0
null
null
null
null
UTF-8
Python
false
false
418
py
#!C:\Users\mattd\Desktop\MFD\venv\Scripts\python.exe # EASY-INSTALL-ENTRY-SCRIPT: 'future==0.17.1','console_scripts','futurize' __requires__ = 'future==0.17.1' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('future==0.17.1', 'console_scripts', 'futurize')() )
[ "mattdickers@gmail.com" ]
mattdickers@gmail.com
262247848853b2f40335bfbbd91c6fde6c4a696a
54e1a6322d37e264b95f7b59d60480c23fda545e
/book_rating_api/wsgi.py
e2b9ceeb95f6a7968150b697988cefabf8eb99ce
[]
no_license
Last-dreamer/Book-Rating-App
269777c50f3c4fc11bb11b56bdcc9caecfa650c0
80d14e1e56b1d44abf7e9e1861d042cbc5ce01c5
refs/heads/master
2022-06-28T20:13:12.028725
2020-05-10T21:52:03
2020-05-10T21:52:03
261,874,418
1
0
null
2020-05-10T21:52:05
2020-05-06T20:44:05
Python
UTF-8
Python
false
false
408
py
""" WSGI config for book_rating_api project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "book_rating_api.settings") application = get_wsgi_application()
[ "asim0000.ak@gmail.com" ]
asim0000.ak@gmail.com
406d38cb6672ca784d26ca77fb9a5119682d990f
d720345cabe437161ec2060ab53dbc8b3a93c9ae
/myapp/urls.py
adac4dce0e4adba0f1c8cbc8e44b1749319e9af7
[]
no_license
azmanabdlh/django-auth-code
5cef46b33f0d61c8b8fe7868830ac3767ad51973
15502069f9a45b4421fe4e22fdfdf01ee9c83b8b
refs/heads/master
2023-03-17T18:56:26.589536
2021-03-05T08:37:48
2021-03-05T08:46:38
344,747,874
0
0
null
null
null
null
UTF-8
Python
false
false
296
py
from django.contrib import admin from django.urls import path, include from django.views.generic import TemplateView urlpatterns = [ path('admin/', admin.site.urls), path('', TemplateView.as_view(template_name = 'index.html')), # module apps path('', include('account.urls')) ]
[ "azmanabdlh@yahoo.com" ]
azmanabdlh@yahoo.com
6065ba5b60f210b815ce57b5b1d263bbaaf0bca0
0722ff43cd5b64d619122beecf1eee1d56d9fdc3
/blog/forms.py
7f4b95cb524f64f8336f997f1a7cb4bcb8111c56
[]
no_license
ttaerrim/studyCRUDfirstweek
cee9d2638cdbbc940f9e70179ace2c70ae89b8de
46ac70b48c814b184960539972bcba5b9e85e2b8
refs/heads/master
2022-12-10T12:12:58.557801
2019-07-24T07:14:25
2019-07-24T07:14:25
195,625,277
0
0
null
2022-12-08T05:55:17
2019-07-07T07:59:07
JavaScript
UTF-8
Python
false
false
443
py
from django import forms from .models import Blog, Comment class BlogForm(forms.ModelForm): class Meta: model = Blog fields = ['title', 'body'] class CommentForm(forms.ModelForm): #text = forms.TextInput(label='댓글') class Meta : model = Comment fields = ['text'] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['text'].label = '댓글'
[ "ltr0121@naver.com" ]
ltr0121@naver.com
2ab83f6e57a277e5962dea39ecf1dacb75b86a2e
f2cb9b54e51e693e1a1f1c1b327b5b40038a8fbe
/src/bin/shipyard_airflow/tests/unit/control/conftest.py
7f5b0100c9300d2eb866dd8ceba3d8f0359325cb
[ "Apache-2.0" ]
permissive
airshipit/shipyard
869b0c6d331e5b2d1c15145aee73397184290900
81066ae98fe2afd3a9c8c5c8556e9438ac47d5a2
refs/heads/master
2023-08-31T11:46:13.662886
2023-07-01T06:42:55
2023-08-30T16:04:47
133,844,902
6
2
Apache-2.0
2023-09-12T19:09:02
2018-05-17T17:07:36
Python
UTF-8
Python
false
false
1,006
py
# Copyright 2017 AT&T Intellectual Property. All other rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from falcon import testing import pytest from shipyard_airflow.control.start_shipyard import start_shipyard @pytest.fixture() def api_client(): """Testing client for the Shipyard API""" cur_dir = os.path.dirname(__file__) filename = os.path.join(cur_dir, 'test.conf') return testing.TestClient( start_shipyard(default_config_files=[filename]) )
[ "bryan.strassner@gmail.com" ]
bryan.strassner@gmail.com
e386497fed6b4e99ed16997d4577350cdf94eeb4
67a7c314fc99d9cd7a677fcb6bc2b6dfa20a9cff
/spambayes-1.0.4/Outlook2000/oastats.py
c1d415965921fde205934bc289d41cf5bd1794ce
[ "LicenseRef-scancode-unknown-license-reference", "Python-2.0" ]
permissive
Xodarap/Eipi
7ebbb9fd861fdb411c1e273ea5d2a088aa579930
d30997a737912e38316c198531f7cb9c5693c313
refs/heads/master
2016-09-11T06:28:01.333832
2011-05-03T15:35:20
2011-05-03T15:35:20
1,367,645
0
0
null
null
null
null
UTF-8
Python
false
false
3,625
py
# oastats.py - Outlook Addin Stats class Stats: def __init__(self, config): self.config = config self.Reset() def Reset(self): self.num_ham = self.num_spam = self.num_unsure = 0 self.num_deleted_spam = self.num_deleted_spam_fn = 0 self.num_recovered_good = self.num_recovered_good_fp = 0 def RecordClassification(self, score): score *= 100 # same units as our config values. if score >= self.config.filter.spam_threshold: self.num_spam += 1 elif score >= self.config.filter.unsure_threshold: self.num_unsure += 1 else: self.num_ham += 1 def RecordManualClassification(self, recover_as_good, score): score *= 100 # same units as our config values. if recover_as_good: self.num_recovered_good += 1 # If we are recovering an item that is in the "spam" threshold, # then record it as a "false positive" if score > self.config.filter.spam_threshold: self.num_recovered_good_fp += 1 else: self.num_deleted_spam += 1 # If we are deleting as Spam an item that was in our "good" range, # then record it as a false neg. if score < self.config.filter.unsure_threshold: self.num_deleted_spam_fn += 1 def GetStats(self): num_seen = self.num_ham + self.num_spam + self.num_unsure if num_seen==0: return ["SpamBayes has processed zero messages"] chunks = [] push = chunks.append perc_ham = 100.0 * self.num_ham / num_seen perc_spam = 100.0 * self.num_spam / num_seen perc_unsure = 100.0 * self.num_unsure / num_seen format_dict = dict(perc_spam=perc_spam, perc_ham=perc_ham, perc_unsure=perc_unsure, num_seen = num_seen) format_dict.update(self.__dict__) push("SpamBayes has processed %(num_seen)d messages - " \ "%(num_ham)d (%(perc_ham).0f%%) good, " \ "%(num_spam)d (%(perc_spam).0f%%) spam " \ "and %(num_unsure)d (%(perc_unsure).0f%%) unsure" % format_dict) if self.num_recovered_good: push("%(num_recovered_good)d message(s) were manually " \ "classified as good (with %(num_recovered_good_fp)d " \ "being false positives)" % format_dict) else: push("No messages were manually classified as good") if self.num_deleted_spam: push("%(num_deleted_spam)d message(s) were manually " \ "classified as spam (with %(num_deleted_spam_fn)d " \ "being false negatives)" % format_dict) else: push("No messages were manually classified as spam") return chunks if __name__=='__main__': class FilterConfig: unsure_threshold = 15 spam_threshold = 85 class Config: filter = FilterConfig() # processed zero s = Stats(Config()) print "\n".join(s.GetStats()) # No recovery s = Stats(Config()) s.RecordClassification(.2) print "\n".join(s.GetStats()) s = Stats(Config()) s.RecordClassification(.2) s.RecordClassification(.1) s.RecordClassification(.4) s.RecordClassification(.9) s.RecordManualClassification(True, 0.1) s.RecordManualClassification(True, 0.9) s.RecordManualClassification(False, 0.1) s.RecordManualClassification(False, 0.9) print "\n".join(s.GetStats())
[ "eipi@mybox.(none)" ]
eipi@mybox.(none)
0753ae28da11b2b3e2b4b95791fe1e45d45f59fc
b4b3106c36558bb65ff865cf192003895c2ff25d
/ros_bag_utils/bag2video/bag2video_16bit.py
38fe20e2c2e135b3b5b21eed7c7d628921313442
[ "BSD-3-Clause" ]
permissive
HenryZh47/my_utils
fd19ff2638d0784ebb74bd4259b6df8154378432
d21c49d862451b186168b3fb8a4583e07f4705e8
refs/heads/main
2023-05-31T07:36:49.256747
2021-05-27T03:42:36
2021-05-27T03:42:36
300,409,839
0
0
null
null
null
null
UTF-8
Python
false
false
5,001
py
#!/usr/bin/env python from __future__ import division import rosbag, rospy, numpy as np import sys, os, cv2, glob from itertools import izip, repeat import argparse # try to find cv_bridge: try: from cv_bridge import CvBridge except ImportError: # assume we are on an older ROS version, and try loading the dummy manifest # to see if that fixes the import error try: import roslib; roslib.load_manifest("bag2video") from cv_bridge import CvBridge except: print "Could not find ROS package: cv_bridge" print "If ROS version is pre-Groovy, try putting this package in ROS_PACKAGE_PATH" sys.exit(1) def get_info(bag, topic=None, start_time=rospy.Time(0), stop_time=rospy.Time(sys.maxint)): size = (0,0) times = [] # read the first message to get the image size msg = bag.read_messages(topics=topic).next()[1] size = (msg.width, msg.height) # now read the rest of the messages for the rates iterator = bag.read_messages(topics=topic, start_time=start_time, end_time=stop_time)#, raw=True) for _, msg, _ in iterator: time = msg.header.stamp times.append(time.to_sec()) size = (msg.width, msg.height) diffs = 1/np.diff(times) return np.median(diffs), min(diffs), max(diffs), size, times def calc_n_frames(times, precision=10): # the smallest interval should be one frame, larger intervals more intervals = np.diff(times) return np.int64(np.round(precision*intervals/min(intervals))) def write_frames(bag, writer, total, clahe_obj, topic=None, nframes=repeat(1), start_time=rospy.Time(0), stop_time=rospy.Time(sys.maxint), viz=False, encoding='bgr8'): MIN_INTENSITY = 16000 MAX_INTENSITY = 25000 bridge = CvBridge() if viz: cv2.namedWindow('win') count = 1 iterator = bag.read_messages(topics=topic, start_time=start_time, end_time=stop_time) for (topic, msg, time), reps in izip(iterator, nframes): print '\rWriting frame %s of %s at time %s' % (count, total, time), # henryzh47: cv bridge has issue with 16UC1 msg.encoding = 'mono16' img = np.asarray(bridge.imgmsg_to_cv2(msg, 'mono16')) # cap to min and max img = np.clip(img, MIN_INTENSITY, MAX_INTENSITY) img = img - MIN_INTENSITY img = img / (MAX_INTENSITY - MIN_INTENSITY) img = (img * 255).astype(np.uint8) img = clahe_obj.apply(img) # img = cv2.equalizeHist(img) img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for rep in range(reps): writer.write(img) imshow('win', img) count += 1 def imshow(win, img): cv2.imshow(win, img) cv2.waitKey(1) def noshow(win, img): pass if __name__ == '__main__': parser = argparse.ArgumentParser(description='Extract and encode video from bag files.') parser.add_argument('--outfile', '-o', action='store', default=None, help='Destination of the video file. Defaults to the location of the input file.') parser.add_argument('--precision', '-p', action='store', default=10, type=int, help='Precision of variable framerate interpolation. Higher numbers\ match the actual framerater better, but result in larger files and slower conversion times.') parser.add_argument('--viz', '-v', action='store_true', help='Display frames in a GUI window.') parser.add_argument('--start', '-s', action='store', default=rospy.Time(0), type=rospy.Time, help='Rostime representing where to start in the bag.') parser.add_argument('--end', '-e', action='store', default=rospy.Time(sys.maxint), type=rospy.Time, help='Rostime representing where to stop in the bag.') parser.add_argument('--encoding', choices=('rgb8', 'bgr8', 'mono8'), default='bgr8', help='Encoding of the deserialized image.') parser.add_argument('topic') parser.add_argument('bagfile') args = parser.parse_args() if not args.viz: imshow = noshow for bagfile in glob.glob(args.bagfile): print bagfile outfile = args.outfile if not outfile: outfile = os.path.join(*os.path.split(bagfile)[-1].split('.')[:-1]) + '.avi' bag = rosbag.Bag(bagfile, 'r') print 'Calculating video properties' rate, minrate, maxrate, size, times = get_info(bag, args.topic, start_time=args.start, stop_time=args.end) nframes = calc_n_frames(times, args.precision) print(size) writer = cv2.VideoWriter(outfile, cv2.VideoWriter_fourcc(*'DIVX'), np.ceil(maxrate*args.precision), size) print 'Writing video' my_clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) write_frames(bag, writer, len(times), my_clahe, topic=args.topic, nframes=nframes, start_time=args.start, stop_time=args.end, encoding=args.encoding) writer.release() print '\n'
[ "hzhang0407@gmail.com" ]
hzhang0407@gmail.com
38a627238172ddbc69005395e86535c3845cf197
62c78c7289d6c94ffa959c40e9a24dd83a4f6037
/shops/admin.py
a410ba2f0ec7aae22ea6f232b7434e9bb2cdefc5
[]
no_license
geelweb/geodjango-tests
d41f808b0ac79dbc5167e6a07a0c655065c8dcd3
ec24cef70de1f7d7c832f5c3a22c1ed9f1ed2132
refs/heads/master
2021-01-22T03:13:33.957737
2014-05-05T19:10:01
2014-05-05T19:10:01
null
0
0
null
null
null
null
UTF-8
Python
false
false
119
py
from django.contrib import admin from shops.models import Shop # Register your models here. admin.site.register(Shop)
[ "guillaume@geelweb.org" ]
guillaume@geelweb.org
c84323a5316bab0165f80ee04c6a1fe80ae335b6
4a5f11b55e23999a82b62f5c72b44e9a36d24f63
/simplemooc/core/views.py
4cd3e744a004ade4daddc01dec6fba412395974c
[]
no_license
diogo-alves/simplemooc
dca62bfcb2ea6357a551a5760778537f083b675c
cfec59f99888e4e23d41f020ff06bfdf39f70203
refs/heads/master
2022-05-10T10:32:18.686313
2019-06-04T19:30:43
2019-06-04T19:30:43
190,260,470
0
0
null
2022-04-22T21:34:44
2019-06-04T18:46:43
Python
UTF-8
Python
false
false
192
py
from django.shortcuts import render def home(request): return render(request, 'home.html', {"usuario": "Diogo Alves"}) def contact(request): return render(request, 'contact.html')
[ "diogo.alves.ti@gmail.com" ]
diogo.alves.ti@gmail.com
4ffd9449006d47387e59278291b5f49950c2c0b0
3f4b535e537666b669b4cfbfca05c7529d2fb631
/Algorithms/distinct_letters.py
f013f1888b567699951ea216cd4316e4f20be4e5
[]
no_license
iliachigogidze/Python
ded0a78a1751a536fcdf1fd864fc296ef52f6164
6db759b3ee4f4b866421b2cb3a775b7aec32b0c9
refs/heads/master
2020-04-09T08:15:07.107069
2019-03-11T10:35:23
2019-03-11T10:35:23
160,187,366
0
0
null
null
null
null
UTF-8
Python
false
false
430
py
''' 17. Given list of distinct letters, generate all possible words so that each letter is used only once per word. Example: ['b', 'a', 'c'] -> abc acb bac bca cab cba ''' def perms(s): if(len(s)==1): return [s] result=[] for i,v in enumerate(s): print('i: ',i, ' || v: ',v) result += [v+p for p in perms(s[:i]+s[i+1:])] print(result) return result print(perms('abc'))
[ "iliachigogidze@gmail.com" ]
iliachigogidze@gmail.com
8cc8bfbdb6d67331445fce9890576672e1983a43
227b20dbbd9b8270910029e4158f501fee0ca677
/applications/models/trade_pool.py
95cd1e63700be18d2611cdfc13fc7042735dbb28
[]
no_license
mattguozhijun/flsk_nft
3bc926cff868a5501d101e42ab45780cbd5440e9
66518feab1ea353585bace3c511595e9a77e80ff
refs/heads/master
2023-08-24T08:59:39.003627
2021-10-26T04:17:26
2021-10-26T04:17:26
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,104
py
# -*- coding: utf-8 -*- # @Time : 2021/10/22 10:30 上午 # @Author : HL # @Email : 1277078753@qq.com # @Describe : import datetime from applications.extensions import db class TradePool(db.Model): __tablename__ = 'trade_pool' id = db.Column(db.Integer, primary_key=True) ico_id = db.Column(db.Integer, db.ForeignKey("inital_digital_offering.id"), comment='ico主键') min = db.Column(db.Integer, comment='最小认购额') max = db.Column(db.Integer, comment='最大认购额') type = db.Column(db.String(20), comment='认购类型(public/private)') market_value = db.Column(db.Integer, comment='市值') circulation = db.Column(db.Integer, comment='开盘流通量') subscribe = db.Column(db.Integer, comment='总认购额') unlock = db.Column(db.Text, comment='解锁规则') start_at = db.Column(db.DateTime, default=datetime.datetime.now, comment="开始时间") end_at = db.Column(db.DateTime, default=datetime.datetime.now, comment="结束时间") create_at = db.Column(db.DateTime, default=datetime.datetime.now, comment="创建时间")
[ "906897944@qq.com" ]
906897944@qq.com
8c02f8221d5f8f8346963f364be23909f8d5991a
960eee7be3078f96e75ecd7f1997a32cef9f12fd
/models/model_ops.py
427a69aac36c37d85ebfc66005b4e37f6bece736
[ "MIT" ]
permissive
wslc1314/atec_nlp_sim
0c7c24aeee5ae13a8d0f7824fd0c557044fae495
6294295a76a65e8535f31d295088f195ba0eadf1
refs/heads/master
2020-03-25T14:48:25.027882
2018-12-17T09:29:37
2018-12-17T09:29:37
143,865,917
2
1
null
null
null
null
UTF-8
Python
false
false
13,472
py
import tensorflow as tf def entry_stop_gradients(target, mask): mask_h = 1-mask mask = tf.cast(mask, dtype=target.dtype) mask_h = tf.cast(mask_h, dtype=target.dtype) return tf.stop_gradient(mask_h * target) + mask * target def embedded(X,embeddings,oov_mask,trainable,scope="embedded",reuse=False): with tf.variable_scope(scope,reuse=reuse): # embedding_matrix=tf.get_variable(name="embedding_matrix", trainable=trainable, # shape=embeddings.shape,dtype=tf.float32, # initializer=tf.constant_initializer(value=embeddings)) embedding_matrix=tf.get_variable(name="embedding_matrix", trainable=True, shape=embeddings.shape,dtype=tf.float32, initializer=tf.constant_initializer(value=embeddings)) if not trainable: embedding_matrix=entry_stop_gradients(embedding_matrix,oov_mask) X_embedded = tf.nn.embedding_lookup(embedding_matrix, X) return X_embedded,embeddings.shape[1] def build_loss(labels,logits,focal=True,alpha=0.75,gamma=2): logits=tf.reshape(logits,[-1,]) ce_loss=tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.cast(labels,dtype=tf.float32),logits=logits) if focal: probs = tf.sigmoid(logits) alpha_t = tf.ones_like(logits) * alpha alpha_t = tf.where(labels > 0, alpha_t, 1.0 - alpha_t) probs_t = tf.where(labels > 0, probs, 1.0 - probs) weight_matrix = alpha_t * tf.pow((1.0 - probs_t), gamma) loss = tf.reduce_sum(weight_matrix * ce_loss) else: loss=tf.reduce_sum(ce_loss) return loss def build_summaries(): loss, f1, acc, pre, recall = None, None, None, None, None summaries = tf.Summary() summaries.value.add(tag='Loss', simple_value=loss) summaries.value.add(tag='F1-score', simple_value=f1) summaries.value.add(tag='Accuracy', simple_value=acc) summaries.value.add(tag='Precision', simple_value=pre) summaries.value.add(tag='Recall', simple_value=recall) return summaries def attention_han(inputs, attention_size, initializer=tf.glorot_uniform_initializer(), scope="attention_han",reuse=False): with tf.variable_scope(scope, reuse=reuse): hidden_size = inputs.shape[-1].value w_omega = tf.get_variable(name="weights", shape=[hidden_size, attention_size], dtype=tf.float32, initializer=initializer) b_omega = tf.get_variable(name="biases", shape=[attention_size,], dtype=tf.float32, initializer=tf.zeros_initializer()) u_omega = tf.get_variable(name="context_vector", shape=[attention_size,], dtype=tf.float32, initializer=initializer) with tf.name_scope('v'): v = tf.tanh(tf.tensordot(inputs, w_omega, axes=1) + b_omega) vu = tf.tensordot(v, u_omega, axes=1, name='vu') # (B,T) shape alphas = tf.nn.softmax(vu, name='alphas') # (B,T) shape output = tf.reduce_sum(v * tf.expand_dims(alphas, -1), axis=1) return output,attention_size def gru(inputs, inputs_len, state_size_list, return_mode, initializer=tf.glorot_uniform_initializer(),keep_prob=1.0, scope='gru', reuse=False): """:param return_mode: 0 - 返回序列 1 - 返回序列最后一个时间步的输出 """ assert return_mode in [0, 1], "Invalid return mode!" with tf.variable_scope(scope, reuse=reuse): batch_size, seq_len = tf.shape(inputs)[0], tf.shape(inputs)[1] cells_fw= [] for i in range(len(state_size_list)): state_size = state_size_list[i] cell_fw = tf.nn.rnn_cell.GRUCell(state_size,kernel_initializer=initializer) cell_fw = tf.nn.rnn_cell.DropoutWrapper(cell_fw, output_keep_prob=keep_prob) cells_fw.append(cell_fw) if len(cells_fw) > 1: cells_fw = tf.nn.rnn_cell.MultiRNNCell(cells_fw) else: cells_fw= cells_fw[0] rnn_outputs, final_state = tf.nn.dynamic_rnn(cell=cells_fw,inputs=inputs, sequence_length=inputs_len, dtype=tf.float32) rnn_outputs_dim = state_size_list[-1] if return_mode == 0: pass else: rnn_outputs = tf.gather_nd(params=rnn_outputs, indices=tf.stack([tf.range(batch_size), inputs_len - 1], axis=1)) return rnn_outputs, rnn_outputs_dim def bi_gru(inputs, inputs_len, state_size_list, return_mode, initializer=tf.glorot_uniform_initializer(),keep_prob=1.0, scope='bi_gru', reuse=False): """:param return_mode: 0 - 分别返回前向、后向序列 1 - 返回拼接后序列 2 - 返回拼接后序列最后一个时间步的输出 """ assert return_mode in [0,1,2],"Invalid return mode!" with tf.variable_scope(scope,reuse=reuse): batch_size,seq_len=tf.shape(inputs)[0],tf.shape(inputs)[1] cells_fw,cells_bw = [],[] for i in range(len(state_size_list)): state_size=state_size_list[i] cell_fw = tf.nn.rnn_cell.GRUCell(state_size,kernel_initializer=initializer) cell_bw = tf.nn.rnn_cell.GRUCell(state_size,kernel_initializer=initializer) cell_fw = tf.nn.rnn_cell.DropoutWrapper(cell_fw, output_keep_prob=keep_prob) cell_bw = tf.nn.rnn_cell.DropoutWrapper(cell_bw, output_keep_prob=keep_prob) cells_fw.append(cell_fw) cells_bw.append(cell_bw) if len(cells_fw) > 1: cells_fw = tf.nn.rnn_cell.MultiRNNCell(cells_fw) cells_bw = tf.nn.rnn_cell.MultiRNNCell(cells_bw) else: cells_fw,cells_bw= cells_fw[0],cells_bw[0] (rnn_outputs_fw, rnn_outputs_bw), final_state = \ tf.nn.bidirectional_dynamic_rnn(cell_fw=cells_fw, cell_bw=cells_bw, inputs=inputs, sequence_length=inputs_len, dtype=tf.float32) rnn_outputs_dim = 2 * state_size_list[-1] if return_mode==0: rnn_outputs=(rnn_outputs_fw,rnn_outputs_bw) elif return_mode==1: rnn_outputs = tf.concat([rnn_outputs_fw, rnn_outputs_bw], axis=-1) else: rnn_outputs = tf.concat([rnn_outputs_fw, rnn_outputs_bw], axis=-1) rnn_outputs = tf.gather_nd(params=rnn_outputs,indices=tf.stack([tf.range(batch_size), inputs_len - 1], axis=1)) return rnn_outputs, rnn_outputs_dim def conv_with_max_pool(X_embedded,filter_size_list, filter_num,with_max_pooling, activation=tf.nn.selu,initializer=tf.glorot_uniform_initializer(), scope="conv_with_max_pool",reuse=False): with tf.variable_scope(scope,reuse=reuse): batch_size,seq_len=tf.shape(X_embedded)[0],tf.shape(X_embedded)[1] h_total = [] for filter_size in filter_size_list: h = tf.layers.conv1d(inputs=X_embedded, filters=filter_num, kernel_size=filter_size, strides=1, padding='same', data_format='channels_last', activation=activation, use_bias=True, kernel_initializer=initializer) if with_max_pooling: h=tf.reduce_max(h,axis=-2) h_total.append(h) out_dim=filter_num*len(h_total) if len(h_total) > 1: h = tf.concat(h_total, axis=-1) if with_max_pooling: h = tf.reshape(h, shape=[batch_size, out_dim]) else: h = tf.reshape(h, shape=[batch_size, seq_len, out_dim]) else: h = h_total[0] return h,out_dim def attention_to(Q, A, initializer=tf.glorot_uniform_initializer(), scope="attention_to", reuse=False): with tf.variable_scope(scope, reuse=reuse): Q_dim = Q.shape[-1].value A_dim = A.shape[-1].value assert Q_dim == A_dim weights = tf.get_variable(name="weights", shape=[Q_dim, A_dim], dtype=tf.float32, initializer=initializer) biases = tf.get_variable(name="biases", shape=[A_dim, ], dtype=tf.float32, initializer=tf.zeros_initializer()) G = tf.nn.softmax( tf.matmul( A, tf.map_fn(lambda x: tf.matmul(x, weights) + biases, Q), transpose_b=True)) H = tf.matmul(G, Q) return H,A_dim def linear_transform(inputs,out_dim, activation=None,initializer=tf.glorot_uniform_initializer(), scope="linear_transform",reuse=False): with tf.variable_scope(scope,reuse=reuse): in_dim=inputs.shape[-1].value W = tf.get_variable(name="weights", shape=[in_dim, out_dim], dtype=tf.float32, initializer=initializer) b = tf.get_variable(name="biases", shape=[out_dim], dtype=tf.float32, initializer=tf.zeros_initializer()) outputs = tf.tensordot(inputs, W, axes=1) + b if activation is None: return outputs,out_dim else: return activation(outputs),out_dim def crnn(X_embedded, X_len, filter_size_list=(3,),filter_num=128,state_size_list=(128,), activation=tf.nn.selu,initializer=tf.glorot_uniform_initializer(),keep_prob=1.0, scope="crnn", reuse=False): with tf.variable_scope(scope, reuse=reuse): h,h_dim=conv_with_max_pool(X_embedded,filter_size_list,filter_num,False,activation,initializer,"conv") batch_size,seq_len=tf.shape(X_embedded)[0],tf.shape(X_embedded)[1] h = tf.reshape(h, shape=[batch_size, seq_len, h_dim]) out,out_dim=bi_gru(h,X_len,state_size_list,2,initializer,keep_prob,"bi_gru") return out,out_dim def rcnn(X_embedded, X_len, state_size_list=(128,),hidden_size=256, activation=tf.nn.tanh, initializer=tf.glorot_uniform_initializer(),keep_prob=1.0, scope="rcnn", reuse=False): with tf.variable_scope(scope, reuse=reuse): batch_size = tf.shape(X_embedded)[0] cells_fw, cells_bw, cells_fw_init, cells_bw_init = [], [], [], [] for i in range(len(state_size_list)): state_size = state_size_list[i] cell_fw = tf.nn.rnn_cell.BasicRNNCell(num_units=state_size) cell_bw = tf.nn.rnn_cell.BasicRNNCell(num_units=state_size) init_fw_ = tf.get_variable(name="cell_fw_init_state_" + str(i), dtype=tf.float32, shape=[1, state_size], trainable=True, initializer=initializer) init_fw = tf.tile(init_fw_, multiples=[batch_size, 1]) init_bw_ = tf.get_variable(name="cell_bw_init_state_" + str(i), dtype=tf.float32, shape=[1, state_size], trainable=True, initializer=initializer) init_bw = tf.tile(init_bw_, multiples=[batch_size, 1]) cell_fw = tf.nn.rnn_cell.DropoutWrapper(cell_fw, output_keep_prob=keep_prob) cell_bw = tf.nn.rnn_cell.DropoutWrapper(cell_bw, output_keep_prob=keep_prob) cells_fw.append(cell_fw) cells_bw.append(cell_bw) cells_fw_init.append(init_fw) cells_bw_init.append(init_bw) if len(cells_fw) > 1: cells_fw = tf.nn.rnn_cell.MultiRNNCell(cells_fw) cells_bw = tf.nn.rnn_cell.MultiRNNCell(cells_bw) cells_fw_init = tf.nn.rnn_cell.MultiRNNCell(cells_fw_init) cells_bw_init = tf.nn.rnn_cell.MultiRNNCell(cells_bw_init) else: cells_fw, cells_bw, cells_fw_init, cells_bw_init = cells_fw[0], cells_bw[0], cells_fw_init[0], \ cells_bw_init[0] (rnn_outputs_fw, rnn_outputs_bw), final_state = \ tf.nn.bidirectional_dynamic_rnn(cell_fw=cells_fw, cell_bw=cells_bw, inputs=X_embedded, sequence_length=X_len, initial_state_fw=cells_fw_init, initial_state_bw=cells_bw_init) rnn_outputs_fw = tf.concat([tf.expand_dims(cells_fw_init, axis=1), rnn_outputs_fw[:, :-1, :]], axis=1) rnn_outputs_bw = tf.concat([rnn_outputs_bw[:, 1:, :], tf.expand_dims(cells_bw_init, axis=1)], axis=1) h = tf.concat([rnn_outputs_fw, X_embedded, rnn_outputs_bw], axis=-1) h,h_dim = linear_transform(h,hidden_size,activation,initializer,"linear_transform") out = tf.reduce_max(h, axis=-2) return out,h_dim def han(X_embedded, X_len, state_size_list=(64,),attention_dim=128, initializer=tf.glorot_uniform_initializer(),keep_prob=1.0, scope="han", reuse=False): """ Only 1-level attention is used here. """ with tf.variable_scope(scope, reuse=reuse): h,h_dim=bi_gru(X_embedded,X_len,state_size_list,1,initializer,keep_prob,"bi_gru") return attention_han(h, attention_dim,initializer,"attention")
[ "lichen25@360buyad.local" ]
lichen25@360buyad.local
28cab5cfa39e380d081e71684ff12c838c04ba2f
caab7b34857d090a3c0031665b9b87ada076e459
/src/lab0/client.py
57118035ff4763b299679c13a51ccea758c62f06
[]
no_license
PeyWn/tddd25-dist-sys
eeac81e0a4d99ed10e08b73679578e3d09e7558b
69519d05da783df94e1d7aaa86fdcd6ed135cab2
refs/heads/master
2023-03-16T13:25:55.890120
2020-03-09T08:26:26
2020-03-09T08:26:26
348,323,881
1
0
null
null
null
null
UTF-8
Python
false
false
2,286
py
#!/usr/bin/env python3 # ----------------------------------------------------------------------------- # Distributed Systems (TDDD25) # ----------------------------------------------------------------------------- # Author: Sergiu Rafiliu (sergiu.rafiliu@liu.se) # Modified: 24 July 2013 # # Copyright 2012 Linkoping University # ----------------------------------------------------------------------------- """Client reader/writer for a fortune database.""" import argparse import sys sys.path.append("../modules") from Server.database import Database # ----------------------------------------------------------------------------- # Initialize and read the command line arguments # ----------------------------------------------------------------------------- description = """\ Client for a fortune database. It reads a random fortune from the database.\ """ parser = argparse.ArgumentParser(description=description) parser.add_argument( "-w", "--write", metavar="FORTUNE", dest="fortune", help="Write a new fortune to the database." ) parser.add_argument( "-i", "--interactive", action="store_true", dest="interactive", default=False, help="Interactive session with the fortune database." ) opts = parser.parse_args() # ----------------------------------------------------------------------------- # The main program # ----------------------------------------------------------------------------- # Create the database object db = Database("dbs/fortune.db") if not opts.interactive: # Run in the normal mode if opts.fortune is not None: db.write(opts.fortune) else: print(db.read()) else: # Run in the interactive mode def menu(): print("""\ Choose one of the following commands: r :: read a random fortune from the database, w <FORTUNE> :: write a new fortune into the database, h :: print this menu, q :: exit.\ """) command = "" menu() while command != "q": command = input("Command> ") if command == "r": print(db.read()) elif (len(command) > 1 and command[0] == "w" and command[1] in [" ", "\t"]): db.write(command[2:].strip()) elif command == "h": menu()
[ "bjohv276@student.liu.se" ]
bjohv276@student.liu.se
c0e5467451825393d6fb9a5612e6e63b9b4bec71
097408c75211e529d5ed64efea9549485a5103e7
/extjs_lib/old/form.py
313d96e3a444cbe1af0a7164291a28902b48a7b3
[]
no_license
chrislyon/django-test-extjs
bcec59a0df9b991d104f6d77fbdab025abfcb74e
66f5a54ad9120fc28e90a349fdc020442728c816
refs/heads/master
2021-01-22T18:18:27.440734
2013-11-17T16:49:50
2013-11-17T16:49:50
33,318,337
0
0
null
null
null
null
UTF-8
Python
false
false
5,892
py
### ------------------ ### FORMULAIRE EXTJS ### ------------------ import sys from django.utils import simplejson class ExtForm(object): """ La classe pour les formulaires """ def __init__(self): self.titre = "TITRE A CHANGER" self.width = 600 self.height = 300 self.bodyPadding = 10 self.renderTo = 'Ext.getBody()' self.mode = 'cr' self.url = '/' self.defaultType = 'textfield' self.zones = [] self.data = [] def add_zone(self, zone): self.zones.append(zone) def liste_zones(self): return ",".join([ c.to_form() for c in self.zones ]) def render(self): F_DEBUT = """ Ext.require('Ext.form.Panel'); Ext.require('Ext.form.field.Date'); Ext.onReady(function() { var CSRF_TOKEN = Ext.util.Cookies.get('csrftoken'); """ S = "" DEF_STORE = """ var %s = Ext.create('Ext.data.Store', { fields: ['value', 'display'], data : [ %s ] }); """ combos = [ z for z in self.zones if z.xtype == 'combo' ] for c in combos: store_name = "ST_%s" % c.name store_data = c.data_to_json() S += DEF_STORE % ( store_name, store_data ) S += "Ext.create('Ext.form.Panel', {" S += "renderTo: %s, " % self.renderTo S += "url: '%s', " % self.url S += "height: %s, " % self.height S += "width: %s, " % self.width S += "bodyPadding: %s, " % self.bodyPadding S += "title: '%s', " % self.titre S += "defaultType: '%s', " % self.defaultType S += """ items: [ """ S += self.liste_zones() S += """ ], baseParams: {'csrfmiddlewaretoken':CSRF_TOKEN}, buttons: [ { text: 'Submit', handler: function() { var form = this.up('form').getForm(); // get the basic form if (form.isValid()) { // make sure the form contains valid data before submitting form.submit({ success: function(form, action) { Ext.Msg.alert('Success', action.result.msg); }, failure: function(form, action) { Ext.Msg.alert('Failed', action.result.msg); } }); } else { // display error alert if the data is invalid Ext.Msg.alert('Invalid Data', 'Please correct form errors.') } } } ] }); """ F_FIN = "});" return F_DEBUT+S+F_FIN class Zone(object): """ Colonne de la grille """ def __init__(self, name, **kwargs): self.name = name self.fieldLabel = kwargs.get('fieldLabel', 'Zone %s ' % name ) self.width = kwargs.get('width',100) self.hidden = kwargs.get('hidden', False) self.xtype = kwargs.get('xtype', None) self.data = kwargs.get('data', None) def data_to_json(self): T = ','.join([ "{'%s':'%s'}" % d for d in self.data ]) #T = "{"+T+"}" return T def to_form(self): if self.xtype == "combo": d = """ Ext.create('Ext.form.field.ComboBox', { fieldLabel: 'Colour', store: ['Red', 'Yellow', 'Green', 'Brown', 'Blue', 'Pink', 'Black'] }) """ d = """ Ext.create('Ext.form.field.ComboBox', { fieldLabel: 'Colour', store: { fields: ['value', 'display'], data : [ {value:'PRO',display:'PROFESSIONEL'},{value:'PERSO',display:'PERSONNEL'}, {value:'VIP',display:'VIP'},{value:'AUTRE',display:'AUTRE'} ] }, queryMode: 'local', displayField: 'display', valueField: 'value' }) """ s = """ new Ext.form.ComboBox({ fieldLabel: '%s', store: new Ext.data.Store({ fields: ['value', 'display'], data : [ {'PRO':'PROFESSIONEL'},{'PERSO':'PERSONNEL'},{'VIP':'VIP'},{'AUTRE':'AUTRE'} ] }), queryMode: 'local', displayField: 'display', valueField: 'value', renderTo: Ext.getBody() }) """ % ( self.fieldLabel ) f = """ { fieldLabel: '%s', name:'%s', xtype:'combo', store:ST_%s, queryMode: 'local', displayField: 'display', valueField: 'value', } """ % ( self.fieldLabel, self.name, self.name ) return d else: d = { 'fieldLabel':self.fieldLabel, 'name':self.name } return simplejson.dumps(d) def test(): TYP_CTC = ( ( 'PRO', 'PROFESSIONEL'), ( 'PERSO', 'PERSONNEL' ), ( 'VIP', 'VIP'), ( 'AUTRE', 'AUTRE'), ) #print simplejson.dumps(T) #sys.exit() f = ExtForm() f.add_zone(Zone( 'Nom' )) f.add_zone(Zone( 'prenom', fieldLabel="Prenom" )) f.add_zone(Zone( 'description', fieldLabel="Commentaire" )) f.add_zone(Zone( 'typ_contact', fieldLabel="Type Contact", xtype='combo', data = TYP_CTC )) print f.render() if __name__ == "__main__": test()
[ "chris.lyon.tech@gmail.com" ]
chris.lyon.tech@gmail.com
fcf261b32e262a1295f13a98fe44e0389cecd98f
ac064be78dccedb03809c389dd3c66ce9b1a3fad
/main.py
3e94185dd2addb2f3ed6ef36b07bfd53986b2acb
[]
no_license
ltell/Tip-Percentages-by-Party-Size
ae73570683a02355df6834f3602d4d2da7c9b368
5c8c0ab4dc462f47d1fba2c3dd82bab6fbabe97c
refs/heads/master
2022-11-12T19:36:16.674188
2020-07-07T20:47:33
2020-07-07T20:47:33
277,919,023
0
0
null
null
null
null
UTF-8
Python
false
false
1,075
py
import csv import numpy as np import matplotlib.pyplot as plt with open("tips.csv", "r") as file: data = csv.reader(file,delimiter=",") headers = next(data) data_list = list(data) data_numpy = np.array(data_list) size = data_numpy[:,6] # select every row (:), and grabbing index position 1. CSV file is string so convert to float in order to perform calculations. tips = np.array(data_numpy[:,1], dtype=float) # select every row (:), and grab the 0 position index, which will pull the "total_bill" values from the dataset. bills = np.array(data_numpy[:,0], dtype=float) tip_percentages = (tips/bills) # print(tips_percentage) print(f"The average bill amount is ${round(np.mean(bills), 2)}") print(f"The median bill amount is ${round(np.median(bills), 2)}") print(f"The smallest bill is ${round(np.min(bills), 2) }") print(f"The largest bill is ${round(np.max(bills), 2)}") plt.scatter(size, tip_percentages, color="orange") plt.xlabel("Dinner Party Size") plt.ylabel("Tip Percentage") plt.title("Tip Percentages by Party Size") plt.savefig("tip_percentages.png")
[ "replituser@example.com" ]
replituser@example.com
8acb9e120c24d78cdb056a635da218f8c9670a05
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/otherforms/_humanest.py
48d7b67f12286175e2622199076e17231ea435b7
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
222
py
#calss header class _HUMANEST(): def __init__(self,): self.name = "HUMANEST" self.definitions = human self.parents = [] self.childen = [] self.properties = [] self.jsondata = {} self.basic = ['human']
[ "xingwang1991@gmail.com" ]
xingwang1991@gmail.com
ccd957c3ecb292462a57712a6ed5c108ee2d01bd
1f0a9aa8cbe2c558fd2751eec29930ce86018f10
/twonicornweb/views/ss.py
4050a0cb868d90b00a66dd30e5b50e67e334f521
[ "Apache-2.0" ]
permissive
UnblockedByOps/twonicorn
26c7cda2b1f1365b32fbd771c79bf9d210d2033f
7e02f9a4de132856c41b539ec1e0483a74cbe9d5
refs/heads/master
2021-06-16T18:03:04.279144
2017-04-12T23:28:21
2017-05-12T20:24:44
32,044,752
0
1
null
null
null
null
UTF-8
Python
false
false
24,692
py
# Copyright 2015 CityGrid Media, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from pyramid.view import view_config from pyramid.httpexceptions import HTTPInternalServerError import logging import re import requests from requests.auth import HTTPBasicAuth import time from twonicornweb.views import ( site_layout, get_user, ) from twonicornweb.views.cp_application import ( create_application, ) from twonicornweb.models import ( DBSession, ArtifactType, JenkinsInstance, JenkinsTemplate, ) log = logging.getLogger(__name__) class UserInput(object): def __init__(self, project_type = None, project_name = None, nodegroup = 'SELF_SERVICE', code_review = None, autosnap = None, job_server = None, job_prefix = None, git_repo_name = None, git_code_repo = None, git_code_repo_url = None, git_conf_repo = None, git_conf_repo_url = None, job_review_name = None, job_autosnap_name = None, job_code_name = None, job_conf_name = None, job_rolling_restart_name = None, job_review_url = None, job_autosnap_url = None, job_code_url = None, job_conf_url = None, job_rolling_restart_url = None, job_ci_base_url = None, job_abs = None, job_abs_name = None, job_abs_base_url = None, job_abs_url = None, deploy_id_code = None, deploy_id_conf = None, dir_app = None, dir_conf = None, app_id = None, app_url = None, ct_class = None): self.project_type = project_type self.project_name = project_name self.nodegroup = nodegroup self.code_review = code_review self.autosnap = autosnap self.job_server = job_server self.job_prefix = job_prefix self.git_repo_name = git_repo_name self.git_code_repo = git_code_repo self.git_code_repo_url = git_code_repo_url self.git_conf_repo = git_conf_repo self.git_conf_repo_url = git_conf_repo_url self.job_review_name = job_review_name self.job_autosnap_name = job_autosnap_name self.job_code_name = job_code_name self.job_conf_name = job_conf_name self.job_rolling_restart_name = job_rolling_restart_name self.job_review_url = job_review_url self.job_autosnap_url = job_autosnap_url self.job_code_url = job_code_url self.job_conf_url = job_conf_url self.job_rolling_restart_url = job_rolling_restart_url self.job_ci_base_url = job_ci_base_url self.job_abs = job_abs self.job_abs_name = job_abs_name self.job_abs_base_url = job_abs_base_url self.job_abs_url = job_abs_url self.deploy_id_code = deploy_id_code self.deploy_id_conf = deploy_id_conf self.dir_app = dir_app self.dir_conf = dir_conf self.app_id = app_id self.app_url = app_url self.ct_class = ct_class def format_user_input(request, ui): ui.project_type = request.POST['project_type'] ui.project_name = request.POST['project_name'] ui.code_review = request.POST['code_review'] ui.job_server = request.POST['job_server'] ui.job_prefix = request.POST['job_prefix'].upper() try: ui.job_abs = request.POST['job_abs'] except: pass try: ui.autosnap = request.POST['autosnap'] ui.code_review = None except: pass try: ui.ct_class = request.POST['ct_class'] except: pass # Convert camel case, spaces and dashes to underscore for job naming and dir creation. a = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))') convert = a.sub(r' \1', ui.project_name).lower().strip() convert = ' '.join(convert.split()) convert = convert.replace(" ","_") convert = convert.replace("-","_") if ui.project_type == 'war': log.info("self service project type is war") ui.dir_app = '/app/tomcat/webapp' ui.dir_conf = '/app/tomcat/conf' if ui.project_type == 'jar': log.info("self service project type is jar") ui.dir_app = '/app/{0}/lib'.format(convert) ui.dir_conf = '/app/{0}/conf'.format(convert) if ui.project_type == 'python': log.info("self service project type is python") ui.dir_app = '/app/{0}/venv'.format(convert) ui.dir_conf = '/app/{0}/conf'.format(convert) if ui.project_type == 'tar': log.info("self service project type is tar") ui.dir_app = '/app/{0}'.format(convert) ui.dir_conf = '/app/{0}/conf'.format(convert) # underscore to dash ui.git_repo_name = convert.replace("_","-") # Camel case to dash b = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))') ui.git_repo_name = b.sub(r'-\1', ui.git_repo_name).lower() ui.git_code_repo = 'ssh://$USER@{0}:29418/{1}'.format(gerrit_server, ui.git_repo_name) ui.git_code_repo_url = 'https://{0}/git/gitweb.cgi?p={1}.git'.format(gerrit_server, ui.git_repo_name) ui.git_conf_repo = 'ssh://$USER@{0}:29418/{1}-conf'.format(gerrit_server, ui.git_repo_name) ui.git_conf_repo_url = 'https://{0}/git/gitweb.cgi?p={1}-conf.git'.format(gerrit_server, ui.git_repo_name) ui.job_ci_base_url = 'https://ci-{0}.prod.cs/'.format(ui.job_server) job_base_name = '{0}_{1}'.format(ui.job_prefix, ui.git_repo_name.capitalize()) ui.job_code_name = job_base_name + '_Build-artifact' ui.job_code_url = '{0}job/{1}'.format(ui.job_ci_base_url, ui.job_code_name) ui.job_conf_name = job_base_name + '_Build-conf' ui.job_conf_url = '{0}job/{1}'.format(ui.job_ci_base_url, ui.job_conf_name) if ui.project_type == 'war': ui.job_rolling_restart_name = job_base_name + '_Rolling-restart' ui.job_rolling_restart_url = '{0}job/{1}'.format(ui.job_ci_base_url, ui.job_rolling_restart_name) if ui.autosnap: ui.job_autosnap_name = job_base_name + '_Build-release' ui.job_autosnap_url = '{0}job/{1}'.format(ui.job_ci_base_url, ui.job_autosnap_name) if ui.code_review == 'true' and not ui.autosnap: ui.job_review_name = job_base_name + '_Build-review' ui.job_review_url = '{0}job/{1}'.format(ui.job_ci_base_url, ui.job_review_name) if ui.job_abs: ui.job_abs_base_url = 'https://abs-{0}.dev.cs/'.format(ui.job_server) ui.job_abs_name = '{0}_{1}_Run'.format(ui.job_prefix, ui.git_repo_name.capitalize()) ui.job_abs_url = '{0}job/{1}'.format(ui.job_abs_base_url, ui.job_abs_name) return ui def check_all_resources(repo_name, jobs): """Make sure that jenkins jobs and git repos don't already exist before beginning""" if check_git_repo(repo_name): try: check_jenkins_jobs(jobs) return True except Exception, e: log.error("Job validation failure: {0}".format(e)) raise return None def check_git_repo(repo_name): """Check and make sure that the code and conf repos don't already exist in gerrit""" r = requests.get('https://{0}/projects/{1}'.format(gerrit_server, repo_name), verify=False) if r.status_code == 404: log.info("repo {0} does not exist, continuing".format(repo_name)) r = requests.get('https://{0}/projects/{1}-conf'.format(gerrit_server, repo_name), verify=False) if r.status_code == 404: log.info("repo {0}-conf does not exist, continuing".format(repo_name)) return True else: msg = "repo {0}-conf already exists, please choose a unique name".format(repo_name) log.error(msg) raise Exception(msg) else: msg = "repo {0} already exists, please choose a unique name".format(repo_name) log.error(msg) raise Exception(msg) return None def check_jenkins_jobs(jobs): """Make sure that jenkins jobs don't already exist before beginning""" for j in jobs: log.info('Verifying job does not already exist: %s' % j) r = requests.get(j, verify=False) if r.status_code == requests.codes.ok: msg = 'Jenkins job: {0} already exists, please choose a unique name.'.format(j) log.error(msg) raise Exception(msg) else: log.info('Jenkins job: %s does not already exist, continuing.' % j) return True def get_last_build(job): """get the last build number of a jenkins job""" log.info('Retrieving last build id') try: r = requests.get('{0}/lastBuild/api/json'.format(job), verify=False) last = r.json() if r.status_code == 200: log.info('Last build id is: {0}'.format(last['number'])) return last['number'] else: msg = 'There was an error querying Jenkins: http_status_code=%s,reason=%s,request=%s' % (r.status_code, r.reason, url) log.info(msg) raise Exception(msg) except: msg = 'Unable to find last build id for job: {0}'.format(job) log.error(msg) raise Exception(msg) def check_create_git_repo(git_job, git_repo_name, last_id): """Make sure the jenkins job completed successfully""" check_id = last_id + 1 final_id = check_id + 4 while (check_id < final_id): count = 0 # Try each id for 30 seconds while (count < 5): log.info('Checking iteration {0} of Job: {1}/{2}'.format(count, git_job, check_id)) # Start with the build id we got passed plus one, go up from there r = requests.get('{0}/{1}/api/json'.format(git_job, check_id), verify=False) if r.status_code == 200: last = r.json() log.info('Checking description: {0} against project name: {1} for SUCCESS'.format(last['description'], git_repo_name)) if last['description'] == git_repo_name and last['result'] == 'SUCCESS': log.info('Found successful git creation job for: {0}'.format(git_repo_name)) return True count = count + 1 time.sleep(5) check_id = check_id + 1 msg = 'Unable to find successful git creation job for: {0}'.format(git_repo_name) log.error(msg) raise Exception(msg) def create_git_repo(ui, git_job, git_token): # Get the last id of the jenkins job to start. last_id = get_last_build(git_job) log.info("Creating git repos for {0}".format(ui.git_repo_name)) code_review = 'No-code-review' if ui.code_review == 'true': code_review = 'Code-review' payload = {'token': git_token, 'PROJECT_TYPE': code_review, 'PROJECT_NAME': ui.git_repo_name, 'PROJECT_DESCRIPTION': 'SELF_SERVICE created {0}'.format(ui.project_name), 'CREATE_CONFIG_REPO': 'true', 'cause': 'ss_{0}'.format(ui.git_repo_name) } try: log.info('Triggering git repo creation job: {0}/buildWithParameters params: {1}'.format(git_job, payload)) r = requests.get('{0}/buildWithParameters'.format(git_job), params=payload, verify=False) except Exception, e: log.error("Failed to trigger git repo creation: {0}".format(e)) raise if r.status_code == 200: # check to make sure the job succeeded log.info("Checking for job success") if check_create_git_repo(git_job, ui.git_repo_name, int(last_id)): log.info("Git repo creation job finished successfully") return True else: log.error("Failed to create git repos.") def populate_git_conf_repo(ui, git_job, git_token): git_conf_project = "{0}-conf".format(ui.git_repo_name) file_suffix = 'properties' if ui.project_type == 'python': file_suffix = 'ini' log.info("Populating git conf repo for {0}".format(git_conf_project)) payload = {'token': git_token, 'PROJECT': git_conf_project, 'PROPERTIES_FILE': '{0}.{1}'.format(ui.git_repo_name,file_suffix), 'cause': 'ss_{0}'.format(git_conf_project) } try: log.info('Triggering git conf repo population job: {0}/buildWithParameters params: {1}'.format(git_job, payload)) r = requests.get('{0}/buildWithParameters'.format(git_job), params=payload, verify=False) except Exception, e: log.error("Failed to trigger git repo creation: {0}".format(e)) raise # FIXME: not sureif we care to validate it succeeds. It's not tragic. if r.status_code == 200: log.info("Git repo population job triggered successfully") return True else: log.error("Failed to trigger git repo population.") def get_deploy_ids(host, uri): try: url = 'http://{0}{1}'.format(host, uri) log.info("Querying application: {0}".format(url)) l = requests.get(url, verify=False) j = l.json() deploy_ids = {j[0]['artifact_type']: j[0]['deploy_id'], j[1]['artifact_type']: j[1]['deploy_id']} return deploy_ids except Exception, e: log.error("Failed to retrieve deploy ids: {0}".format(e)) raise def jenkins_get(url): url = url + 'config.xml' log.info('Requesting data from jenkins: %s' % url) r = requests.get(url, verify=False) if r.status_code == requests.codes.ok: log.info('Response data: %s' % r.status_code) return r else: log.info('There was an error querying Jenkins: ' 'http_status_code=%s,reason=%s,request=%s' % (r.status_code, r.reason, url)) return None def jenkins_post(url, config_xml): try: log.info('Posting data to jenkins: %s' % url) headers = {'Content-Type': 'text/xml'} auth = HTTPBasicAuth(jenkins_user, jenkins_pass) r = requests.post(url, verify=False, headers=headers, auth=auth, data=config_xml) if r.status_code == requests.codes.ok: log.info('Success: %s' % r.status_code) return r else: msg = 'There was an error posting to Jenkins: http_status_code={0}s,reason={1},request={2}'.format(r.status_code, r.reason, url) log.error(msg) raise Exception(msg) except Exception, e: msg = 'Failed to create jenkins conf job: {0}'.format(e) log.error(msg) raise Exception(msg) def get_jenkins_template_url(job_type): log.info("Retrieving jenkins tempalte job from DB for job type: {0}".format(job_type)) try: q = DBSession.query(JenkinsTemplate) q = q.filter(JenkinsTemplate.job_type == job_type) job = q.one() log.info("Tempalte job is: {0}".format(job.job_url)) return job.job_url except Exception, e: msg = 'Failed to retrieve conf template from db: {0}'.format(e) log.error(msg) raise Exception(msg) def jenkins_sub_values(**kwargs): try: url = kwargs.get('url', None) project_name = str(kwargs.get('project_name', None)) git_repo_name = str(kwargs.get('git_repo_name', None)) app_id = str(kwargs.get('app_id', None)) deploy_id = str(kwargs.get('deploy_id', None)) ct_class = str(kwargs.get('ct_class', None)) rolling_restart_job = str(kwargs.get('rolling_restart_job', None)) r = jenkins_get(url) log.info('Substituting values into template: {0}'.format(url)) config_xml = r.content.replace('__CHANGE_ME_PACKAGE_NAME__', project_name) config_xml = config_xml.replace('__CHANGE_ME_GIT_REPO_NAME__', git_repo_name) config_xml = config_xml.replace('__CHANGE_ME_APP_ID__', app_id) config_xml = config_xml.replace('__CHANGE_ME_DEPLOY_ID__', deploy_id) config_xml = config_xml.replace('__CHANGE_ME_CT_CLASS__', ct_class) config_xml = config_xml.replace('__CHANGE_ME_ROLLING_RESTART_JOB__', rolling_restart_job) config_xml = config_xml.replace('<disabled>true</disabled>', '<disabled>false</disabled>') return config_xml except Exception, e: msg = 'Failed jenkins template substitution {0}: {1}'.format(url, e) log.error(msg) raise Exception(msg) def create_jenkins_jobs(ui): log.info("Creating jenkins jobs") try: url = get_jenkins_template_url('conf') config_xml = jenkins_sub_values(url=url, project_name=ui.project_name, git_repo_name=ui.git_repo_name + '-conf', deploy_id=ui.deploy_id_conf) url = '{0}createItem?name={1}'.format(ui.job_ci_base_url, ui.job_conf_name) jenkins_post(url, config_xml) if ui.code_review == 'true' and not ui.autosnap: log.info("Creating code review job: {0}".format(ui.job_review_url)) url = get_jenkins_template_url('{0}_build_review'.format(ui.project_type)) config_xml = jenkins_sub_values(url=url, project_name=ui.project_name, git_repo_name=ui.git_repo_name) url = '{0}createItem?name={1}'.format(ui.job_ci_base_url, ui.job_review_name) jenkins_post(url, config_xml) if ui.autosnap: log.info("Creating autosnap release job: {0} for deploy id: {1}".format(ui.job_autosnap_url, ui.deploy_id_code)) url = get_jenkins_template_url('{0}_build_autosnap_release'.format(ui.project_type)) config_xml = jenkins_sub_values(url=url, project_name=ui.project_name, git_repo_name=ui.git_repo_name, deploy_id=ui.deploy_id_code, rolling_restart_job=ui.job_rolling_restart_name) url = '{0}createItem?name={1}'.format(ui.job_ci_base_url, ui.job_autosnap_name) jenkins_post(url, config_xml) log.info("Creating code autosnap build job: {0} for deploy id: {1}".format(ui.job_code_url, ui.deploy_id_code)) url = get_jenkins_template_url('{0}_build_autosnap'.format(ui.project_type)) else: log.info("Creating code build job: {0} for deploy id: {1}".format(ui.job_code_url, ui.deploy_id_code)) url = get_jenkins_template_url('{0}_build'.format(ui.project_type)) # The main build job config_xml = jenkins_sub_values(url=url, project_name=ui.project_name, git_repo_name=ui.git_repo_name, deploy_id=ui.deploy_id_code, rolling_restart_job=ui.job_rolling_restart_name) url = '{0}createItem?name={1}'.format(ui.job_ci_base_url, ui.job_code_name) jenkins_post(url, config_xml) # wars need the rolling restart job if ui.project_type == 'war': url = get_jenkins_template_url('{0}_rolling_restart'.format(ui.project_type)) config_xml = jenkins_sub_values(url=url, ct_class=ui.ct_class, rolling_restart_job=ui.job_rolling_restart_name) url = '{0}createItem?name={1}'.format(ui.job_ci_base_url, ui.job_rolling_restart_name) jenkins_post(url, config_xml) if ui.job_abs: log.info('Creating skeleton jenkins abs job') url = get_jenkins_template_url('abs') config_xml = jenkins_sub_values(url=url, app_id=ui.app_id) url = '{0}createItem?name={1}'.format(ui.job_abs_base_url, ui.job_abs_name) jenkins_post(url, config_xml) return True except Exception, e: msg = 'Failed to create all jenkins jobs: {0}'.format(e) log.error(msg) raise Exception(msg) @view_config(route_name='ss', permission='view', renderer='twonicornweb:templates/ss.pt') def view_ss(request): # Globalizing these. Otherwise will be passing them all over the # place for no reason. global jenkins_user global jenkins_pass global gerrit_server global verify_ssl jenkins_user = request.registry.settings['ss.jenkins_user'] jenkins_pass = request.registry.settings['ss.jenkins_pass'] gerrit_server = request.registry.settings['ss.gerrit_server'] verify_ssl = request.registry.settings['ss.verify_ssl'] page_title = 'Self Service' subtitle = 'Add an application' user = get_user(request) error_msg = None params = {'mode': None, 'confirm': None, 'processed': None, } for p in params: try: params[p] = request.params[p] except: pass mode = params['mode'] confirm = params['confirm'] processed = params['processed'] ui = UserInput() # Build some lists of choices q = DBSession.query(ArtifactType) q = q.filter(ArtifactType.name != 'conf') artifact_types = q.all() q = DBSession.query(JenkinsInstance) jenkins_instances = q.all() if 'form.edit' in request.POST: log.info("Editing self service") ui = format_user_input(request, ui) if 'form.preprocess' in request.POST: log.info("Pre-processing self service") ui = format_user_input(request, ui) # Set up the list of jobs to check jobs = [ui.job_code_url, ui.job_conf_url] # Optional jobs for j in [ui.job_rolling_restart_url, ui.job_abs_url]: if j: jobs.append(j) if ui.code_review == 'true' and not ui.autosnap: jobs.append(ui.job_review_url) try: check_all_resources(ui.git_repo_name, jobs) confirm = 'true' except Exception, e: error_msg = e if 'form.confirm' in request.POST: log.info("Processing self service request") try: ui = format_user_input(request, ui) # FIXME: Should only set package name for projects that need it. log.info("Creating twonicorn application") ca = {'application_name': ui.project_name, 'nodegroup': 'SELF_SERVICE', 'artifact_types': [ui.project_type, 'conf'], 'deploy_paths': [ui.dir_app, ui.dir_conf], 'package_names': [ui.project_name, ''], 'day_start': '1', 'day_end': '4', 'hour_start': '8', 'minute_start': '0', 'hour_end': '17', 'minute_end': '0', 'updated_by': user['login'], 'ss': True } app = create_application(**ca) ui.app_id = app.location.rsplit('=', 1)[1] ui.app_url = '/deploys?application_id={0}'.format(ui.app_id) if app.status_code == 201: log.info("Successfully created application: {0}".format(app.location)) if create_git_repo(ui, request.registry.settings['ss.git_job'], request.registry.settings['ss.git_token']): populate_git_conf_repo(ui, request.registry.settings['ss.git_conf_populate_job'], request.registry.settings['ss.git_token']) deploy_ids = get_deploy_ids(request.host, app.location) if deploy_ids: ui.deploy_id_conf = deploy_ids['conf'] ui.deploy_id_code = deploy_ids[ui.project_type] create_jenkins_jobs(ui) processed = 'true' except Exception, e: error_msg = "Failed to complete self service: {0}".format(e) log.error(error_msg) raise Exception(error_msg) return {'layout': site_layout(), 'page_title': page_title, 'user': user, 'subtitle': subtitle, 'mode': mode, 'confirm': confirm, 'processed': processed, 'error_msg': error_msg, 'ui': ui, 'artifact_types': artifact_types, 'jenkins_instances': jenkins_instances, }
[ "Aaron.Bandt@citygridmedia.com" ]
Aaron.Bandt@citygridmedia.com
c7a5d62ff9c478f7430982313345c1f0adb82459
494406a733bc460244d8da5888ef461cce0a36ce
/task31.py
6a7f71cd5aa40830b3cab1407c3ddc112cc43f9c
[]
no_license
kanatnadyrbekov/Ch1Part2-Task-31
8dcb398ddf6b64e4dc7f6005cc0313b5146b8e05
8bbc0d3dbd39d08e2cdfecc302d6e32bd2f94ff3
refs/heads/master
2020-12-02T01:42:31.539140
2019-12-30T04:23:46
2019-12-30T04:23:46
230,846,949
0
0
null
null
null
null
UTF-8
Python
false
false
470
py
# Напишите функцию которая подсчитает количество строк, слов и букв в текстовом # файле. text = """Hello my name is Kanat, and I study in Maker's course mjdbvzjk zkjvasukz ksbvzu ubvu jbvab ajbvuzb """ string = text.count("\n") print(f"Text has: {string + 1} string") words = ' ' a = text.count(words)+1 print(f"Text has: {a} words") letter = len(text)-a +1 print(f"Text has: {letter} letters")
[ "kanatnadyrbekov@gmail.com" ]
kanatnadyrbekov@gmail.com
a931d3ef8ecc32157ed01a1bf0210f341b359d6a
b19c38f4864d1cd451ab8215680835ae13e323fb
/12_integer_to_roman/test.py
2e2413643637d76d084242c585bcc27c494fc882
[]
no_license
sh00tg0a1/leet
df412e62b949166e63ce1fec86e8dc3304b1de44
849c1f216dd912e28be35f9579f69975fa7410f9
refs/heads/master
2020-04-09T17:59:16.426779
2019-09-17T08:31:53
2019-09-17T08:31:53
160,498,095
0
0
null
null
null
null
UTF-8
Python
false
false
536
py
import time from s import Solution class test(object): def setUp(self): self.s = Solution() def tearDown(self): pass def test_solution(self): assert(self.s.intToRoman(3) == 'III') def test_solution2(self): assert(self.s.intToRoman(4) == 'IV') def test_solution3(self): assert(self.s.intToRoman(9) == 'IX') def test_solution4(self): assert(self.s.intToRoman(58) == 'LVIII') def test_solution5(self): assert(self.s.intToRoman(1994) == 'MCMXCIV')
[ "chenxiao911@gmail.com" ]
chenxiao911@gmail.com
0f1be9d33b908108f06481fb69e1756a026d8b88
f989c2238e3e39862f700e2f0c9a5a0ffab86dc9
/CodeFights/functionsComposition.py
66cbf8af3582b250c26de017300eea2d0a773e62
[ "MIT" ]
permissive
makramjandar/Test_Code
8000b6dee8d16f02accd0326c6e1b8397b0e33b3
798efc9fc668ef021736a6d9699ef4713cf8b718
refs/heads/master
2020-09-28T21:33:07.390772
2018-06-13T15:26:30
2018-06-13T15:26:30
226,869,864
1
0
MIT
2019-12-09T12:48:05
2019-12-09T12:48:04
null
UTF-8
Python
false
false
1,063
py
#!/usr/local/bin/python # Code Fights Functions Composition Problem from functools import reduce import math def compose(functions): return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x) def functionsComposition(functions, x): return compose(map(eval, functions))(x) def main(): tests = [ [["abs", "math.sin", "lambda x: 3 * x / 2"], math.pi, 1], [["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"], 1, math.sin(math.cos((1**2) * 2))], [["lambda z: z", "lambda z: 1.0 * z / 13"], -1000, (-1000 / 13) * 1.0], [["float"], 1000, 1000], [["abs"], -20, 20] ] for t in tests: res = functionsComposition(t[0], t[1]) ans = t[2] if ans == res: print("PASSED: functionsComposition({}, {}) returned {}" .format(t[0], t[1], res)) else: print("FAILED: functionsComposition({}, {}) returned {}," "answer: {}".format(t[0], t[1], res, ans)) if __name__ == '__main__': main()
[ "heather.kusmierz@gmail.com" ]
heather.kusmierz@gmail.com
1dd53163bf9d9e727e7d119c3407a993fc43a627
4781d9293b59a5072647bb179195b143c60621bd
/백준/7568_덩치/venv/Scripts/pip-script.py
e089de8832fd2f720f059695debffe5d3ee13f54
[]
no_license
chriskwon96/Algorithm_codes
bf98131f66ca9c091fe63db68b220527800069c9
edb7b803370e87493dad4a38ee858bb7bb3fd31d
refs/heads/master
2023-08-15T18:48:26.809864
2021-10-12T13:43:21
2021-10-12T13:43:21
387,803,476
0
0
null
null
null
null
WINDOWS-1252
Python
false
false
428
py
#!C:\Users\ATIV\Desktop\my_work\¹éÁØ\7568_µ¢Ä¡\venv\Scripts\python.exe -x # EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip' __requires__ = 'pip==19.0.3' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('pip==19.0.3', 'console_scripts', 'pip')() )
[ "chriskwon96@naver.com" ]
chriskwon96@naver.com
867e9b01f0b93683e393cc04db7c18c19e6997d1
dd95ca2932da4670aa05ff9548dfd0262bd88a38
/clinte/models.py
af91fb4f02f34ced1c456a5547a1a1c6aa648485
[]
no_license
kenedycpd/Desafio-Preco-Certo
b61e219b8b9801dfd395ea6f331dc3776f8a02b4
0889112cace4f0f721dcd28fe9d4795da7d8dcc7
refs/heads/master
2021-09-23T16:07:51.839861
2020-02-05T01:16:41
2020-02-05T01:16:41
238,100,346
0
0
null
2021-09-22T18:31:28
2020-02-04T01:46:07
Python
UTF-8
Python
false
false
238
py
from django.db import models # Modelo Cliente. class Cliente(models.Model): nome = models.CharField('Nome', max_length=50) class Meta: verbose_name = 'Cliente' verbose_name_plural = 'Clientes' def __str__(self): return self.nome
[ "kenedycelestino89@gmail.com" ]
kenedycelestino89@gmail.com
d51da34c53710f07861163e9b07028efe0fca5a5
67a9cf0184b8f641bf0cd193d32dd6b9c2dbf204
/docs/methods_str/problems_strings/problem_strs_add_input.py
804372ca7518ac022286489373b14208c6fa2a2d
[]
no_license
ruslan-rv-ua/python-starter
b37c95b2568d3429c2b514d5820804e9d646044c
d09da86530c1f49f4bdaa8c2d4f760eec757809c
refs/heads/master
2021-10-23T21:34:25.014741
2021-10-16T06:01:49
2021-10-16T06:01:49
225,604,511
0
1
null
null
null
null
UTF-8
Python
false
false
395
py
''' Напишіть програму яка виконує наступне: 1. Просить користувача ввести два цілих числа розділивши їх пробілом 2. Виводить суму цих чисел Пам'ятайте: 1. отримали вхідні дані 2. обчислили результат 3. вивели результат '''
[ "ruslan.rv.ua@gmail.com" ]
ruslan.rv.ua@gmail.com
fb5213e655f9d6ec263226ab1a221b04f63e7d96
84bc04ff957602c7db6694ad684ede501ed9fcfc
/proficia/__init__.py
10c80e4f7d7bc4f824a5803a3af10eb1032e537a
[ "MIT" ]
permissive
endlesslupita/proficia
e9434e62aa0872599e2f05f31063f2ca3ccb9ef9
b93aceacc94c0cd9caa23855031d74d55f849417
refs/heads/master
2020-04-05T00:44:09.924354
2015-09-25T23:29:04
2015-09-25T23:29:04
42,262,803
0
1
null
2015-09-11T17:07:45
2015-09-10T18:28:47
JavaScript
UTF-8
Python
false
false
341
py
# -*- coding: utf-8 -*- __about__ = """ In addition to what is provided by the "zero" project, this project provides thorough integration with django-user-accounts, adding comprehensive account management functionality. It is a foundation suitable for most sites that have user accounts. """ default_app_config = "proficia.apps.AppConfig"
[ "endlesslupita@gmail.com" ]
endlesslupita@gmail.com
4a9ee219d59b7f6d34dfc5f888c702d025c5b5ec
60db9eff105a36b1420f8d8ef7b0a2b8572163b9
/hackerrank/n_numbers.py
e1c5061ba2bebf9c0d9f49f57a4bf10ccff05133
[]
no_license
kruglov-dmitry/algos_playground
8773521b8a9f75b02e612bee3b59b71fd2da60e8
e62afe9a57e99948c286fd24000d4afca1771615
refs/heads/master
2020-04-08T02:04:24.798522
2020-02-23T21:26:51
2020-02-23T21:26:51
158,921,524
0
0
null
null
null
null
UTF-8
Python
false
false
587
py
# # Given a number N put all elements from 1 to N in array in following order # 1,3, ..., 4, 2 # Time: O(N) # Space: O(1) # def n_numbers(N): if N < 1: return [] res = [0] * N prev = 0 for idx in xrange(N//2): res[idx] = prev + 1 res[N - 1 - idx] = res[idx] + 1 prev = res[N - 1 - idx] if N % 2: res[N//2] = N return res N = 4 assert [1, 3, 4, 2] == n_numbers(N) N = 5 assert [1, 3, 5, 4, 2] == n_numbers(N) N = 1 assert [1] == n_numbers(N) N = 10 assert [1, 3, 5, 7, 9, 10, 8, 6, 4, 2] == n_numbers(N)
[ "kruglov.dima@gmail.com" ]
kruglov.dima@gmail.com
5879635f7e37f488cf8a5f9cc3dc537e7512d206
0083ad7bde53f273b1dfe5fd59fed7c5d5bed3a8
/connectcare/mysite/forms.py
8915a3425a18b00cb461039fa124de7ab347df7f
[]
no_license
SanjanaKrishnam/RBAC-Authorization
122e209ab9da5efa7a0188f352785ec6129c4a7e
ecbf359c23677db5104a29e4f3658a600cfe3607
refs/heads/master
2020-05-18T02:18:48.974707
2019-04-29T17:16:31
2019-04-29T17:16:31
184,112,344
1
0
null
null
null
null
UTF-8
Python
false
false
448
py
from django import forms class UserRegistrationForm(forms.Form): username = forms.CharField( required = True, label = 'Username', max_length = 32 ) email = forms.CharField( required = True, label = 'Email-id', max_length = 32, ) password = forms.CharField( required = True, label = 'Password', max_length = 32, widget = forms.PasswordInput() )
[ "sanjana.krishnam@gmail.com" ]
sanjana.krishnam@gmail.com
08dce837f091ba40378cd153987804f1b9320781
fc9de122ad97e3eb8c12f60fe83a79925a96ed4b
/pipeline.py
4415c23a1d087d40e85fc2d97214418ff0f66ebd
[]
no_license
Rajrup/Pose-Estimation
435891d590d5ec1a97ba161d30950234d4cfce71
67c09b4e8bc96efcf1b3318c705bf822a542a08a
refs/heads/main
2023-07-20T02:45:59.218734
2021-08-31T08:56:56
2021-08-31T08:56:56
401,631,077
0
0
null
null
null
null
UTF-8
Python
false
false
1,419
py
import cv2 from module_pose.pose_openpose_tf import PoseOpenpose from module_pose.pose_recognition_tf import PoseRecognition input_file = "./images/exercise.avi" output_file = "./images/exercise_pose.mp4" in_reader = cv2.VideoCapture(input_file) frame_height = int(in_reader.get(cv2.CAP_PROP_FRAME_HEIGHT)) frame_width = int(in_reader.get(cv2.CAP_PROP_FRAME_WIDTH)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') fps = in_reader.get(cv2.CAP_PROP_FPS) out_writer = cv2.VideoWriter(output_file, fourcc, fps, (frame_width, frame_height)) pose = PoseOpenpose() pose.Setup() recog = PoseRecognition() recog.Setup() output_frames = [] predict_labels = [] frame_id = 1 while True: try: ret, image = in_reader.read() if ret == False: break pose.PreProcess(image) pose.Apply() humans = pose.PostProcess() # print(humans) recog.PreProcess(humans) recog.Apply() predict_label = recog.PostProcess() ## OTUPUT ## print("Processing %dth image" % frame_id) print("Pose: {}".format(predict_label)) output_frames.append(image) predict_labels.append(predict_label) frame_id += 1 except KeyboardInterrupt: break for image, predict_label in zip(output_frames, predict_labels): cv2.putText(image, predict_label, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, color=(0, 0, 255), thickness=2) out_writer.write(image) in_reader.release() out_writer.release()
[ "rajrup.withbestwishes@gmail.com" ]
rajrup.withbestwishes@gmail.com
a81a539af0ad008720cc26dbebc755db65aeb05b
423fce12c428d5b47972263b985def5fe7afefb3
/builtins/bins-modules/this/caesar-cipher-lowercase.py
caf699c4cf5848487a3dfec29085551c621406ec
[]
no_license
AmigaTi/Python3Learning
58ed70dc4924c40e9154221cb983650cadbcb073
6a5c2dd8be903edc533292ecf50a0d94db2b4729
refs/heads/master
2021-09-22T13:56:09.215887
2018-09-10T17:49:19
2018-09-10T17:49:19
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,376
py
#!/usr/bin/python # -*- coding: utf-8 -*- import string ''' 凯撒密码属于替换密码的一种,替换密码就是指用一个别的字母来替换当前的字母。 比如我和对方约定一个替换表: l -> h,o -> a,v -> t,然后我发送love给对方,对方按照对照表就知道我发送的其实是hate。 凯撒密码使用的是将正常的 26 个英文字母进行移位替换,通常设定 shift 值为 3,相当于 a -> d,b -> e,c -> f... ''' lowercase = string.ascii_lowercase def substitution(text, key_table): text = text.lower() result = '' for l in text: i = lowercase.find(l) if i < 0: result += l else: result += key_table[i] return result def caesar_cipher_encrypt(text, shift): key_table = lowercase[shift:] + lowercase[:shift] return substitution(text, key_table) def caesar_cipher_decrypt(text, shift): return caesar_cipher_encrypt(text, -shift) # 破解,遍历26个不同的shift值,查看哪个是有意义的 def caesar_cipher_crack(text): for i in range(26): key_table = lowercase[-i:] + lowercase[:-i] print(substitution(text, key_table)[:12], '| shift is ', i) init_text = ''' We intend to begin on the first of February unrestricted submarine warfare. We shall endeavor in spite of this to keep the United States of America neutral. In the event of this not succeeding, we make Mexico a proposal of alliance on the following basis: make war together, make peace together, generous financial support and an understanding on our part that Mexico is to reconquer the lost territory in Texas, New Mexico, and Arizona. The settlement in detail is left to you. You will inform the President of the above most secretly as soon as the outbreak of war with the United States of America is certain and add the suggestion that he should, on his own initiative, invite Japan to immediate adherence and at the same time mediate between Japan and ourselves. Please call the President's attention to the fact that the ruthless employment of our submarines now offers the prospect of compelling England in a few months to make peace. ''' cipher_text_encrypt = caesar_cipher_encrypt(init_text, 13) print(cipher_text_encrypt) cipher_text_decrypt = caesar_cipher_decrypt(cipher_text_encrypt, 13) print(cipher_text_decrypt)
[ "shellever@163.com" ]
shellever@163.com
b6c5f2b49e0ef9d7cbfc4d3caa13a6c45a620e0d
1b01dec8c454337232a6cf1046412ec98269fe5d
/examples/arangodb_aio_example.py
562078912099c5ecc0e6027526c17df12952ee60
[ "BSD-3-Clause" ]
permissive
lietu/shylock
d38710220306af1e4fac638b2d24df8a8fdc3801
5ada3cb4bf75e2395fadb19f68ceff5ff92e6a65
refs/heads/master
2023-08-09T06:58:47.653545
2023-03-05T16:27:30
2023-03-05T16:27:30
243,951,853
5
8
NOASSERTION
2023-07-25T21:10:23
2020-02-29T10:55:05
Python
UTF-8
Python
false
false
1,603
py
import asyncio from time import time from aioarangodb import ArangoClient from shylock import AsyncLock as Lock from shylock import ShylockAioArangoDBBackend, configure HOSTS = "http://localhost:8529" USERNAME = "root" PASSWORD = "" async def main(): print("Start") client = ArangoClient(hosts=HOSTS) db = await client.db("shylock_test", username=USERNAME, password=PASSWORD) configure(await ShylockAioArangoDBBackend.create(db, "shylock")) lock_name = "test-lock" test_lock = Lock(lock_name) try: async with Lock(lock_name): print("Got lock") print("Testing re-lock") assert not await test_lock.acquire(False) raise ValueError() except ValueError: print("Caught exception, lock should be released") assert await test_lock.acquire(False) await test_lock.release() locks = [f"test-lock-a{i}" for i in range(3)] async def _wait(lock_name: str): start = time() print(f"Waiting for release of {lock_name}, this might take a while.") async with Lock(lock_name): elapsed = time() - start print(f"Release of {lock_name} took {elapsed:.3f}s") waits = [] for lock_name in locks: l = Lock(lock_name) await l.acquire() waits.append(_wait(lock_name)) await asyncio.gather(*waits) await client.close() if __name__ == "__main__": # Only in Python 3.7 -> # asyncio.run(main()) # Compatible with Python 3.6 -> loop = asyncio.get_event_loop() result = loop.run_until_complete(main())
[ "janne.enberg@lietu.net" ]
janne.enberg@lietu.net
73d1f2135a51c15de594eb6f04e0c740bff20dfd
a3edbba6d69c34ad73b48fae5b23033d7fd35aee
/util/utilurlparse.py
a0443a788b903c9886413dbc03c88f17ea58f227
[]
no_license
a2un/crawlpy
6b13f5dc6113d545c97bb92235323692f0c52691
8dc364dd077907e0e9c6333c09f07ca0bad9dd71
refs/heads/master
2020-03-07T06:32:07.834101
2018-03-29T17:40:15
2018-03-29T17:40:15
127,325,249
0
0
null
null
null
null
UTF-8
Python
false
false
86
py
from urlparse import urlparse def gethostname(url): return urlparse(url).hostname
[ "encodedgeek@gmail.com" ]
encodedgeek@gmail.com
badaad01eda5e27db0d00c551aa4d65aa0102f15
4bd5e2c5eb12750099438b736f1f8b2236f1eea8
/stockscraper2/stockscraper2/items.py
b839cd24ba69bdc25c71dbf9881f4d19751f87b0
[]
no_license
z2sonee/stock_scrapy
68df05ed7cc0658a9bf33f748f850a3919259777
8b2ece6fd3c2858f0e7e429bd954508726e8bc8c
refs/heads/main
2023-03-08T17:49:54.959095
2021-02-25T07:36:58
2021-02-25T07:36:58
342,159,017
0
0
null
null
null
null
UTF-8
Python
false
false
372
py
# Define here the models for your scraped items # # See documentation in: # https://docs.scrapy.org/en/latest/topics/items.html import scrapy class Stockscraper2Item(scrapy.Item): read_time = scrapy.Field() stock_volume = scrapy.Field() now_price = scrapy.Field() max_price = scrapy.Field() min_price = scrapy.Field() stock_code = scrapy.Field()
[ "noreply@github.com" ]
z2sonee.noreply@github.com