function [t, x, u] = hw5_cvutID() % Discrete system dynamics f = @(x, u) x - 0.4*x^2 + u; % Cost Functions % !Choose appropriate cost functions! L = ; phi = ; % Quantization the state and input variable xb = -1:.2:1; ub = -.5:0.1:.5; % Time vector t = 0:5; % Initialize the table containing the optimal control policy and optimal % value function. Rows in matrices uopt and J correspond to time and % columns to quantized state values. For instance, u(i,j) correspond to % optimal control action for a system at state xb(j) and at time t(i). uopt = zeros(numel(t)-1, numel(xb)); J = zeros(numel(t), numel(xb)); % Initialize the value function at the final time for i = 1:numel(xb) J(numel(t), i) = phi(xb(i)); end %% Run the DP algorithm to find an optimal control policy for k = (numel(t)-1):-1:1 for i = 1:numel(xb) for j = 1:numel(ub) % Your code goes here. ... end % ...here, J(k, i) = []; uopt(k, i) = []; end end %% Simulation x = zeros(numel(t), 1); x(1) = .9; u = zeros(numel(t)-1, 1); for i = 2:numel(t) % ... and also here u(i-1) = 0; x(i) = f(x(i-1), u(i-1)); end figure(1) clf subplot(211) stairs(t, x) title('State variable') ylabel('x[k]') xlabel('Time index k') grid on subplot(212) stairs(t(1:end-1), u) title('Control variable') ylabel('u[k]') xlabel('Time index k') grid on end