尝试使用维基百科上的理论构建完整的加法器。
即。
Sum = A XOR B XOR Cin Cout = A和B或C和(A XOR B)
A和B是32位
这是我的代码
--Addition
for i in 0 to 31 loop
temp(i) <= a(i) XOR b(i) XOR cin;
if (((a(i) AND b(i)) OR (cin AND (a(i) XOR b(i)))) = '1') then
cin <= '1';
else
cin <= '0';
end if;
end loop;
cout <= cin;
当我模拟这个时,cin会被忽略。例如, 1011 + 0100将给出正确答案,但0001 + 0001 = 0。
完整代码:
LIBRARY ieee;
USE ieee.std_logic_1164.ALL;
USE ieee.std_logic_arith.ALL;
USE ieee.std_logic_unsigned.ALL;
ENTITY alu IS
PORT(
a: IN STD_LOGIC_VECTOR(31 DOWNTO 0);
b : IN STD_LOGIC_VECTOR(31 DOWNTO 0);
op : IN STD_LOGIC_VECTOR( 2 DOWNTO 0);
result : OUT STD_LOGIC_VECTOR(31 DOWNTO 0);
cout : OUT STD_LOGIC;
zero : OUT STD_LOGIC);
END alu;
--CIN is being inored for some reason. Must figure that out, and then code will start working.
ARCHITECTURE description OF alu IS
signal temp,cin :STD_LOGIC_VECTOR(31 DOWNTO 0);
BEGIN
process(a,b,op)
begin
if (op = "000") then
temp <= (a AND b);
elsif (op = "001") then
temp <= (a OR b);
elsif (op = "010") then
--Addition
for i in 0 to 31 loop
temp(i) <= a(i) XOR b(i) XOR cin(i);
cin(i+1) <= ( (a(i) AND b(i)) OR (cin AND (a(i) XOR b(i))) )
end loop;
cout <= cin(31);
elsif (op = "110") then
--Subtraction
elsif (op = "100") then
temp <= to_stdlogicvector(to_bitvector(a) sll 1);
elsif (op = "101") then
temp <= to_stdlogicvector(to_bitvector(a) srl 1);
end if;
end process;
result <= temp;
END description;
答案 0 :(得分:1)
cin对于i的每个值都必须是唯一的:
library ieee;
use ieee.std_logic_1164.all;
entity loopy is
end entity;
architecture foo of loopy is
signal a: std_logic_vector (31 downto 0) := x"00000001";
signal b: std_logic_vector (31 downto 0) := x"00000001";
signal sum: std_logic_vector (31 downto 0);
signal cout: std_logic := '0';
signal cin: std_logic_vector (32 downto 0) := (others => '0');
function slv_image(constant inp: std_logic_vector) return string is
variable image_str: string (1 to inp'length);
alias input_str: std_logic_vector (1 to inp'length) is inp;
begin
for i in input_str'range loop
image_str(i) := character'VALUE(std_ulogic'IMAGE(input_str(i)));
end loop;
return image_str;
end function;
begin
LOOPER:
process (a,b,cin)
begin
for i in 0 to 31 loop
sum(i) <= a(i) XOR b(i) XOR cin(i);
cin(i+1) <= (a(i) and b(i)) or ( cin(i) and ( a(i) xor b(i) ) );
end loop;
end process;
cout <= cin(32);
MONITOR:
process (sum)
begin
report "sum = " & slv_image(sum(31 downto 0));
end process;
end architecture;
这就是:
adderloop.vhdl:40:9:@ 0ms :(报告说明):sum = uuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu adderloop.vhdl:40:9:@ 0ms :(报告单):sum = 00000000000000000000000000000000
adderloop.vhdl:40:9:@ 0ms :(报告说明):sum = 00000000000000000000000000000010
第一个值没有受到影响的原因是它不使用进位。
这显示了三个delta周期,最后一个报告是正确的值。