Skip to content

Reminders

1. ReplayBuffer:

  • 这是一个deque队列,长度是事先指定的。也就是说如果在达到了maxlen之后继续append(), 那么将会遵循队列的FIFO规则。
  • zip(*transitions)的作用是将元组中同一个位置的元素进行打包配对,直观上可以认为是将二维元组的行列绑定关系互换。
  • *的作用是解包,将原先的大元组中的每一个元素单独拿出来当做参数传进函数。

2. train_on_policy_agent()

  • 每一条轨迹都要进行重新初始化与存储,而且采集完完整数据之后直接对agent进行更新。虽然是on-policy但是是offline的哦!而且每一条episode都只能够被使用一次。

*3. train_off_policy_agent()

  • 这里的replay_buffer只会被初始化一次,但是因为容量是有限的,所以会根据FIFO排去前面的。
  • MBGD的操作是在这里完成的,而且如果没有到达minimal_size则不会开始训练。
  • 一条轨迹结束的原因只能是达到了结束的状态(由环境决定)

Source Code

from tqdm import tqdm  
import numpy as np  
import torch  
import collections  
import random  

class ReplayBuffer:  
    def __init__(self, capacity):  
        self.buffer = collections.deque(maxlen=capacity)  

    def add(self, state, action, reward, next_state, done):  
        self.buffer.append((state, action, reward, next_state, done))  

    def sample(self, batch_size):  
        transitions = random.sample(self.buffer, batch_size)  
        state, action, reward, next_state, done = zip(*transitions)  
        return np.array(state), np.array(action), np.array(reward), np.array(next_state), np.array(done)  

    def size(self):  
        return len(self.buffer)  

def moving_average(a, window_size):  
    cumulative_sum = np.cumsum(np.insert(a, 0, 0))  
    middle = (cumulative_sum[window_size:] - cumulative_sum[:-window_size]) / window_size  
    r = np.arange(1, window_size-1, 2)  
    begin = np.cumsum(a[:window_size-1])[::2] / r  
    end = (np.cumsum(a[:-window_size:-1])[::2] / r)[::-1]  
    return np.concatenate((begin, middle, end))  

def train_on_policy_agent(env, agent, num_episodes):  
    return_list = []  
    for i in range(10):  
        with tqdm(total=int(num_episodes/10), desc='Iteration %d' % i) as pbar:  
            for i_episode in range(int(num_episodes/10)):  
                episode_return = 0  
                transition_dict = {'states': [], 'actions': [], 'next_states': [], 'rewards': [], 'dones': []}  #storing trajectories  
                state = env.reset()  
                done = False  
                while not done:  
                    action = agent.take_action(state)   #decision-making  
                    next_state, reward, done, _ = env.step(action)  
                    transition_dict['states'].append(state)  
                    transition_dict['actions'].append(action)  
                    transition_dict['next_states'].append(next_state)  
                    transition_dict['rewards'].append(reward)  
                    transition_dict['dones'].append(done)  
                    state = next_state  
                    episode_return += reward  
                return_list.append(episode_return)  
                agent.update(transition_dict)   #parameter update  
                if (i_episode+1) % 10 == 0:  
                    pbar.set_postfix({'episode': '%d' % (num_episodes/10 * i + i_episode+1), 'return': '%.3f' % np.mean(return_list[-10:])})  
                pbar.update(1)  
    return return_list  

def train_off_policy_agent(env, agent, num_episodes, replay_buffer, minimal_size, batch_size):  
    return_list = []  
    for i in range(10):  
        with tqdm(total=int(num_episodes/10), desc='Iteration %d' % i) as pbar:  
            for i_episode in range(int(num_episodes/10)):  
                episode_return = 0  
                state = env.reset()  
                done = False  
                while not done:  
                    action = agent.take_action(state)  
                    next_state, reward, done, _ = env.step(action)  
                    replay_buffer.add(state, action, reward, next_state, done)  
                    state = next_state  
                    episode_return += reward  
                    if replay_buffer.size() > minimal_size:  
                        b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size)  
                        transition_dict = {'states': b_s, 'actions': b_a, 'next_states': b_ns, 'rewards': b_r, 'dones': b_d}  
                        agent.update(transition_dict)  
                return_list.append(episode_return)  
                if (i_episode+1) % 10 == 0:  
                    pbar.set_postfix({'episode': '%d' % (num_episodes/10 * i + i_episode+1), 'return': '%.3f' % np.mean(return_list[-10:])})  
                pbar.update(1)  
    return return_list

def compute_advantage(gamma, lmbda, td_delta):  #with GAE Method  
    td_delta = td_delta.detach().numpy()  
    advantage_list = []  
    advantage = 0.0  
    for delta in td_delta[::-1]:  
        advantage = gamma * lmbda * advantage + delta  
        advantage_list.append(advantage)  
    advantage_list.reverse()  
    return torch.tensor(advantage_list, dtype=torch.float)