mirror of
				https://github.com/qemu/qemu.git
				synced 2025-10-31 12:07:31 +00:00 
			
		
		
		
	target/riscv: vector single-width scaling shift instructions
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Message-Id: <20200701152549.1218-29-zhiwei_liu@c-sky.com> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
		
							parent
							
								
									0a1eaf0036
								
							
						
					
					
						commit
						04a614062d
					
				| @ -773,3 +773,20 @@ DEF_HELPER_6(vwsmaccsu_vx_w, void, ptr, ptr, tl, ptr, env, i32) | ||||
| DEF_HELPER_6(vwsmaccus_vx_b, void, ptr, ptr, tl, ptr, env, i32) | ||||
| DEF_HELPER_6(vwsmaccus_vx_h, void, ptr, ptr, tl, ptr, env, i32) | ||||
| DEF_HELPER_6(vwsmaccus_vx_w, void, ptr, ptr, tl, ptr, env, i32) | ||||
| 
 | ||||
| DEF_HELPER_6(vssrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32) | ||||
| DEF_HELPER_6(vssrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32) | ||||
| DEF_HELPER_6(vssrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32) | ||||
| DEF_HELPER_6(vssrl_vv_d, void, ptr, ptr, ptr, ptr, env, i32) | ||||
| DEF_HELPER_6(vssra_vv_b, void, ptr, ptr, ptr, ptr, env, i32) | ||||
| DEF_HELPER_6(vssra_vv_h, void, ptr, ptr, ptr, ptr, env, i32) | ||||
| DEF_HELPER_6(vssra_vv_w, void, ptr, ptr, ptr, ptr, env, i32) | ||||
| DEF_HELPER_6(vssra_vv_d, void, ptr, ptr, ptr, ptr, env, i32) | ||||
| DEF_HELPER_6(vssrl_vx_b, void, ptr, ptr, tl, ptr, env, i32) | ||||
| DEF_HELPER_6(vssrl_vx_h, void, ptr, ptr, tl, ptr, env, i32) | ||||
| DEF_HELPER_6(vssrl_vx_w, void, ptr, ptr, tl, ptr, env, i32) | ||||
| DEF_HELPER_6(vssrl_vx_d, void, ptr, ptr, tl, ptr, env, i32) | ||||
| DEF_HELPER_6(vssra_vx_b, void, ptr, ptr, tl, ptr, env, i32) | ||||
| DEF_HELPER_6(vssra_vx_h, void, ptr, ptr, tl, ptr, env, i32) | ||||
| DEF_HELPER_6(vssra_vx_w, void, ptr, ptr, tl, ptr, env, i32) | ||||
| DEF_HELPER_6(vssra_vx_d, void, ptr, ptr, tl, ptr, env, i32) | ||||
|  | ||||
| @ -433,6 +433,12 @@ vwsmacc_vx      111101 . ..... ..... 100 ..... 1010111 @r_vm | ||||
| vwsmaccsu_vv    111110 . ..... ..... 000 ..... 1010111 @r_vm | ||||
| vwsmaccsu_vx    111110 . ..... ..... 100 ..... 1010111 @r_vm | ||||
| vwsmaccus_vx    111111 . ..... ..... 100 ..... 1010111 @r_vm | ||||
| vssrl_vv        101010 . ..... ..... 000 ..... 1010111 @r_vm | ||||
| vssrl_vx        101010 . ..... ..... 100 ..... 1010111 @r_vm | ||||
| vssrl_vi        101010 . ..... ..... 011 ..... 1010111 @r_vm | ||||
| vssra_vv        101011 . ..... ..... 000 ..... 1010111 @r_vm | ||||
| vssra_vx        101011 . ..... ..... 100 ..... 1010111 @r_vm | ||||
| vssra_vi        101011 . ..... ..... 011 ..... 1010111 @r_vm | ||||
| 
 | ||||
| vsetvli         0 ........... ..... 111 ..... 1010111  @r2_zimm | ||||
| vsetvl          1000000 ..... ..... 111 ..... 1010111  @r | ||||
|  | ||||
| @ -1767,3 +1767,11 @@ GEN_OPIVX_WIDEN_TRANS(vwsmaccu_vx) | ||||
| GEN_OPIVX_WIDEN_TRANS(vwsmacc_vx) | ||||
| GEN_OPIVX_WIDEN_TRANS(vwsmaccsu_vx) | ||||
| GEN_OPIVX_WIDEN_TRANS(vwsmaccus_vx) | ||||
| 
 | ||||
| /* Vector Single-Width Scaling Shift Instructions */ | ||||
| GEN_OPIVV_TRANS(vssrl_vv, opivv_check) | ||||
| GEN_OPIVV_TRANS(vssra_vv, opivv_check) | ||||
| GEN_OPIVX_TRANS(vssrl_vx,  opivx_check) | ||||
| GEN_OPIVX_TRANS(vssra_vx,  opivx_check) | ||||
| GEN_OPIVI_TRANS(vssrl_vi, 1, vssrl_vx, opivx_check) | ||||
| GEN_OPIVI_TRANS(vssra_vi, 0, vssra_vx, opivx_check) | ||||
|  | ||||
| @ -2909,3 +2909,120 @@ RVVCALL(OPIVX3_RM, vwsmaccus_vx_w, WOP_SUS_W, H8, H4, vwsmaccus32) | ||||
| GEN_VEXT_VX_RM(vwsmaccus_vx_b, 1, 2, clearh) | ||||
| GEN_VEXT_VX_RM(vwsmaccus_vx_h, 2, 4, clearl) | ||||
| GEN_VEXT_VX_RM(vwsmaccus_vx_w, 4, 8, clearq) | ||||
| 
 | ||||
| /* Vector Single-Width Scaling Shift Instructions */ | ||||
| static inline uint8_t | ||||
| vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b) | ||||
| { | ||||
|     uint8_t round, shift = b & 0x7; | ||||
|     uint8_t res; | ||||
| 
 | ||||
|     round = get_round(vxrm, a, shift); | ||||
|     res   = (a >> shift)  + round; | ||||
|     return res; | ||||
| } | ||||
| static inline uint16_t | ||||
| vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b) | ||||
| { | ||||
|     uint8_t round, shift = b & 0xf; | ||||
|     uint16_t res; | ||||
| 
 | ||||
|     round = get_round(vxrm, a, shift); | ||||
|     res   = (a >> shift)  + round; | ||||
|     return res; | ||||
| } | ||||
| static inline uint32_t | ||||
| vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b) | ||||
| { | ||||
|     uint8_t round, shift = b & 0x1f; | ||||
|     uint32_t res; | ||||
| 
 | ||||
|     round = get_round(vxrm, a, shift); | ||||
|     res   = (a >> shift)  + round; | ||||
|     return res; | ||||
| } | ||||
| static inline uint64_t | ||||
| vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b) | ||||
| { | ||||
|     uint8_t round, shift = b & 0x3f; | ||||
|     uint64_t res; | ||||
| 
 | ||||
|     round = get_round(vxrm, a, shift); | ||||
|     res   = (a >> shift)  + round; | ||||
|     return res; | ||||
| } | ||||
| RVVCALL(OPIVV2_RM, vssrl_vv_b, OP_UUU_B, H1, H1, H1, vssrl8) | ||||
| RVVCALL(OPIVV2_RM, vssrl_vv_h, OP_UUU_H, H2, H2, H2, vssrl16) | ||||
| RVVCALL(OPIVV2_RM, vssrl_vv_w, OP_UUU_W, H4, H4, H4, vssrl32) | ||||
| RVVCALL(OPIVV2_RM, vssrl_vv_d, OP_UUU_D, H8, H8, H8, vssrl64) | ||||
| GEN_VEXT_VV_RM(vssrl_vv_b, 1, 1, clearb) | ||||
| GEN_VEXT_VV_RM(vssrl_vv_h, 2, 2, clearh) | ||||
| GEN_VEXT_VV_RM(vssrl_vv_w, 4, 4, clearl) | ||||
| GEN_VEXT_VV_RM(vssrl_vv_d, 8, 8, clearq) | ||||
| 
 | ||||
| RVVCALL(OPIVX2_RM, vssrl_vx_b, OP_UUU_B, H1, H1, vssrl8) | ||||
| RVVCALL(OPIVX2_RM, vssrl_vx_h, OP_UUU_H, H2, H2, vssrl16) | ||||
| RVVCALL(OPIVX2_RM, vssrl_vx_w, OP_UUU_W, H4, H4, vssrl32) | ||||
| RVVCALL(OPIVX2_RM, vssrl_vx_d, OP_UUU_D, H8, H8, vssrl64) | ||||
| GEN_VEXT_VX_RM(vssrl_vx_b, 1, 1, clearb) | ||||
| GEN_VEXT_VX_RM(vssrl_vx_h, 2, 2, clearh) | ||||
| GEN_VEXT_VX_RM(vssrl_vx_w, 4, 4, clearl) | ||||
| GEN_VEXT_VX_RM(vssrl_vx_d, 8, 8, clearq) | ||||
| 
 | ||||
| static inline int8_t | ||||
| vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) | ||||
| { | ||||
|     uint8_t round, shift = b & 0x7; | ||||
|     int8_t res; | ||||
| 
 | ||||
|     round = get_round(vxrm, a, shift); | ||||
|     res   = (a >> shift)  + round; | ||||
|     return res; | ||||
| } | ||||
| static inline int16_t | ||||
| vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) | ||||
| { | ||||
|     uint8_t round, shift = b & 0xf; | ||||
|     int16_t res; | ||||
| 
 | ||||
|     round = get_round(vxrm, a, shift); | ||||
|     res   = (a >> shift)  + round; | ||||
|     return res; | ||||
| } | ||||
| static inline int32_t | ||||
| vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) | ||||
| { | ||||
|     uint8_t round, shift = b & 0x1f; | ||||
|     int32_t res; | ||||
| 
 | ||||
|     round = get_round(vxrm, a, shift); | ||||
|     res   = (a >> shift)  + round; | ||||
|     return res; | ||||
| } | ||||
| static inline int64_t | ||||
| vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) | ||||
| { | ||||
|     uint8_t round, shift = b & 0x3f; | ||||
|     int64_t res; | ||||
| 
 | ||||
|     round = get_round(vxrm, a, shift); | ||||
|     res   = (a >> shift)  + round; | ||||
|     return res; | ||||
| } | ||||
| RVVCALL(OPIVV2_RM, vssra_vv_b, OP_SSS_B, H1, H1, H1, vssra8) | ||||
| RVVCALL(OPIVV2_RM, vssra_vv_h, OP_SSS_H, H2, H2, H2, vssra16) | ||||
| RVVCALL(OPIVV2_RM, vssra_vv_w, OP_SSS_W, H4, H4, H4, vssra32) | ||||
| RVVCALL(OPIVV2_RM, vssra_vv_d, OP_SSS_D, H8, H8, H8, vssra64) | ||||
| GEN_VEXT_VV_RM(vssra_vv_b, 1, 1, clearb) | ||||
| GEN_VEXT_VV_RM(vssra_vv_h, 2, 2, clearh) | ||||
| GEN_VEXT_VV_RM(vssra_vv_w, 4, 4, clearl) | ||||
| GEN_VEXT_VV_RM(vssra_vv_d, 8, 8, clearq) | ||||
| 
 | ||||
| RVVCALL(OPIVX2_RM, vssra_vx_b, OP_SSS_B, H1, H1, vssra8) | ||||
| RVVCALL(OPIVX2_RM, vssra_vx_h, OP_SSS_H, H2, H2, vssra16) | ||||
| RVVCALL(OPIVX2_RM, vssra_vx_w, OP_SSS_W, H4, H4, vssra32) | ||||
| RVVCALL(OPIVX2_RM, vssra_vx_d, OP_SSS_D, H8, H8, vssra64) | ||||
| GEN_VEXT_VX_RM(vssra_vx_b, 1, 1, clearb) | ||||
| GEN_VEXT_VX_RM(vssra_vx_h, 2, 2, clearh) | ||||
| GEN_VEXT_VX_RM(vssra_vx_w, 4, 4, clearl) | ||||
| GEN_VEXT_VX_RM(vssra_vx_d, 8, 8, clearq) | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user
	 LIU Zhiwei
						LIU Zhiwei