Browse Source

Reduce typo count (#9106)

pull/9116/head
Bruce Mitchener 3 months ago
committed by GitHub
parent
commit
e2664e55a9
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
  1. 2
      cranelift/codegen/meta/src/isa/x86.rs
  2. 12
      cranelift/codegen/src/isa/riscv64/inst/emit.rs
  3. 2
      cranelift/codegen/src/isa/riscv64/inst_vector.isle
  4. 2
      cranelift/codegen/src/isa/s390x/lower.isle
  5. 2
      cranelift/codegen/src/isa/x64/lower.isle
  6. 2
      crates/c-api/src/async.rs
  7. 4
      crates/cache/src/config.rs
  8. 12
      crates/wiggle/tests/lists.rs
  9. 8
      examples/async.cpp
  10. 2
      pulley/README.md
  11. 4
      winch/codegen/src/abi/mod.rs

2
cranelift/codegen/meta/src/isa/x86.rs

@ -284,7 +284,7 @@ pub(crate) fn define() -> TargetIsa {
);
let sapphire_rapids = settings.add_preset(
"sapphirerapids",
"Saphire Rapids microarchitecture.",
"Sapphire Rapids microarchitecture.",
preset!(icelake_server),
);
settings.add_preset(

12
cranelift/codegen/src/isa/riscv64/inst/emit.rs

@ -149,7 +149,7 @@ impl Inst {
}
}
/// Returns Some(VState) if this insturction is expecting a specific vector state
/// Returns Some(VState) if this instruction is expecting a specific vector state
/// before emission.
fn expected_vstate(&self) -> Option<&VState> {
match self {
@ -363,13 +363,13 @@ impl Inst {
_ => return None,
};
// The canonical expansion for these instruction has `rd == rs1`, but
// these are all comutative operations, so we can swap the operands.
// these are all commutative operations, so we can swap the operands.
let src = if rd.to_reg() == rs1 { rs2 } else { rs1 };
sink.put2(encode_ca_type(op, rd, src));
}
// The sub instructions are non comutative, so we can't swap the operands.
// The sub instructions are non commutative, so we can't swap the operands.
Inst::AluRRR {
alu_op: alu_op @ (AluOPRRR::Sub | AluOPRRR::Subw),
rd,
@ -386,7 +386,7 @@ impl Inst {
// c.j
//
// We don't have a separate JAL as that is only availabile in RV32C
// We don't have a separate JAL as that is only available in RV32C
Inst::Jal { label } => {
sink.use_label_at_offset(*start_off, label, LabelUse::RVCJump);
sink.add_uncond_branch(*start_off, *start_off + 2, label);
@ -870,7 +870,7 @@ impl Inst {
&Inst::RawData { ref data } => {
// Right now we only put a u32 or u64 in this instruction.
// It is not very long, no need to check if need `emit_island`.
// If data is very long , this is a bug because RawData is typecial
// If data is very long , this is a bug because RawData is typically
// use to load some data and rely on some position in the code stream.
// and we may exceed `Inst::worst_case_size`.
// for more information see https://github.com/bytecodealliance/wasmtime/pull/5612.
@ -1570,7 +1570,7 @@ impl Inst {
(xregs, yregs, condition)
};
// Unconditonally move one of the values to the destination register.
// Unconditionally move one of the values to the destination register.
//
// These moves may not end up being emitted if the source and
// destination registers are the same. That logic is built into

2
cranelift/codegen/src/isa/riscv64/inst_vector.isle

@ -1745,7 +1745,7 @@
(rv_vmfeq_vf y x (unmasked) ty))
;; FloatCC.NotEqual
;; Note: This is UnorderedNotEqual. It is the only unoredered comparison that is not named as such.
;; Note: This is UnorderedNotEqual. It is the only unordered comparison that is not named as such.
(rule 0 (gen_fcmp_mask (ty_vec_fits_in_register ty) (FloatCC.NotEqual) x y)
(rv_vmfne_vv x y (unmasked) ty))

2
cranelift/codegen/src/isa/s390x/lower.isle

@ -611,7 +611,7 @@
;; would indeed happen, we have no way of signalling two different trap
;; conditions from the same instruction. By explicitly checking for the
;; integer-overflow case ahead of time, any hardware trap in the divide
;; instruction is guaranteed to indicate divison-by-zero.
;; instruction is guaranteed to indicate division-by-zero.
;;
;; In addition, for types smaller than 64 bits we would have to perform
;; the check explicitly anyway, since the instruction provides a 64-bit

2
cranelift/codegen/src/isa/x64/lower.isle

@ -3727,7 +3727,7 @@
(tmp1 Xmm (x64_subps tmp1 tmp2))
;; Create mask for all positive lanes to saturate (i.e. greater than
;; or equal to the maxmimum allowable unsigned int).
;; or equal to the maximum allowable unsigned int).
(tmp2 Xmm (x64_cmpps tmp2 tmp1 (FcmpImm.LessThanOrEqual)))
;; Convert those set of lanes that have the max_signed_int factored out.

2
crates/c-api/src/async.rs

@ -126,7 +126,7 @@ async fn invoke_c_async_callback<'a>(
let (params, out_results) = hostcall_val_storage.split_at_mut(params.len());
// Invoke the C function pointer.
// The result will be a continutation which we will wrap in a Future.
// The result will be a continuation which we will wrap in a Future.
let mut caller = wasmtime_caller_t { caller };
let mut trap = None;
extern "C" fn panic_callback(_: *mut c_void) -> bool {

4
crates/cache/src/config.rs

@ -157,7 +157,7 @@ const ZSTD_COMPRESSION_LEVELS: std::ops::RangeInclusive<i32> = 0..=21;
// so we have at most one module per wasmtime instance
// if changed, update cli-cache.md
const DEFAULT_WORKER_EVENT_QUEUE_SIZE: u64 = 0x10;
const WORKER_EVENT_QUEUE_SIZE_WARNING_TRESHOLD: u64 = 3;
const WORKER_EVENT_QUEUE_SIZE_WARNING_THRESHOLD: u64 = 3;
// should be quick and provide good enough compression
// if changed, update cli-cache.md
const DEFAULT_BASELINE_COMPRESSION_LEVEL: i32 = zstd::DEFAULT_COMPRESSION_LEVEL;
@ -460,7 +460,7 @@ impl CacheConfig {
self.worker_event_queue_size = Some(DEFAULT_WORKER_EVENT_QUEUE_SIZE);
}
if self.worker_event_queue_size.unwrap() < WORKER_EVENT_QUEUE_SIZE_WARNING_TRESHOLD {
if self.worker_event_queue_size.unwrap() < WORKER_EVENT_QUEUE_SIZE_WARNING_THRESHOLD {
warn!("Detected small worker event queue size. Some messages might be lost.");
}
}

12
crates/wiggle/tests/lists.rs

@ -44,14 +44,14 @@ impl<'a> lists::Lists for WasiCtx<'a> {
}
#[derive(Debug)]
struct ReduceExcusesExcercise {
struct ReduceExcusesExercise {
excuse_values: Vec<types::Excuse>,
excuse_ptr_locs: Vec<MemArea>,
array_ptr_loc: MemArea,
return_ptr_loc: MemArea,
}
impl ReduceExcusesExcercise {
impl ReduceExcusesExercise {
pub fn strat() -> BoxedStrategy<Self> {
(1..256u32)
.prop_flat_map(|len| {
@ -126,7 +126,7 @@ impl ReduceExcusesExcercise {
}
proptest! {
#[test]
fn reduce_excuses(e in ReduceExcusesExcercise::strat()) {
fn reduce_excuses(e in ReduceExcusesExercise::strat()) {
e.test()
}
}
@ -141,12 +141,12 @@ fn excuse_strat() -> impl Strategy<Value = types::Excuse> {
}
#[derive(Debug)]
struct PopulateExcusesExcercise {
struct PopulateExcusesExercise {
array_ptr_loc: MemArea,
elements: Vec<MemArea>,
}
impl PopulateExcusesExcercise {
impl PopulateExcusesExercise {
pub fn strat() -> BoxedStrategy<Self> {
(1..256u32)
.prop_flat_map(|len| {
@ -214,7 +214,7 @@ impl PopulateExcusesExcercise {
}
proptest! {
#[test]
fn populate_excuses(e in PopulateExcusesExcercise::strat()) {
fn populate_excuses(e in PopulateExcusesExercise::strat()) {
e.test()
}
}

8
examples/async.cpp

@ -204,13 +204,13 @@ int main() {
host_func_name.data(), host_func_name.size(), functype.get(),
[](void *, wasmtime_caller_t *, const wasmtime_val_t *args, size_t,
wasmtime_val_t *, size_t, wasm_trap_t **trap_ret,
wasmtime_async_continuation_t *continutation_ret) {
wasmtime_async_continuation_t *continuation_ret) {
std::cout << "invoking async host function" << std::endl;
printer_state.set_value_to_print(args[0].of.i32);
continutation_ret->callback = &poll_print_finished_state;
continutation_ret->env = new async_call_env{trap_ret};
continutation_ret->finalizer = [](void *env) {
continuation_ret->callback = &poll_print_finished_state;
continuation_ret->env = new async_call_env{trap_ret};
continuation_ret->finalizer = [](void *env) {
std::cout << "deleting async_call_env" << std::endl;
delete static_cast<async_call_env *>(env);
};

2
pulley/README.md

@ -97,7 +97,7 @@ interpreter:
perform the work of multiple operations in a single instruction. The more work
we do in each turn of the interpreter loop the less we are impacted by its
overhead. Additionally, Cranelift, as the primary Pulley bytecode producer,
can leverage ISLE lowering patterns to easily identify opportunites for
can leverage ISLE lowering patterns to easily identify opportunities for
emitting super-instructions.
* We do not, in general, define sub-opcodes. There should be only one branch, on

4
winch/codegen/src/abi/mod.rs

@ -487,7 +487,7 @@ impl ABIParams {
return Self::with_bytes(initial_bytes);
}
let regiser_capacity = params.len().min(6);
let register_capacity = params.len().min(6);
let (mut operands, mut regs, mut stack_bytes): (
SmallVec<[ABIOperand; 6]>,
HashSet<Reg>,
@ -495,7 +495,7 @@ impl ABIParams {
) = params.iter().fold(
(
SmallVec::new(),
HashSet::with_capacity(regiser_capacity),
HashSet::with_capacity(register_capacity),
initial_bytes,
),
|(mut operands, mut regs, stack_bytes), arg| {

Loading…
Cancel
Save