mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-27 04:22:58 +00:00
drm/i915/gt: fix typos in i915/gt files.
Fix all typos in files under drm/i915/gt reported by codespell tool. v2: Fix grammar mistake in comment. <Andi> v3: Correct typo in commit log. <Krzysztof Niemiec> Signed-off-by: Nitin Gote <nitin.r.gote@intel.com> Reviewed-by: Krzysztof Niemiec <krzysztof.niemiec@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250120081517.3237326-2-nitin.r.gote@intel.com Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
@@ -179,7 +179,7 @@ u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
return __gen2_emit_breadcrumb(rq, cs, 8, 8);
|
||||
}
|
||||
|
||||
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
|
||||
/* Just userspace ABI convention to limit the wa batch bo to a reasonable size */
|
||||
#define I830_BATCH_LIMIT SZ_256K
|
||||
#define I830_TLB_ENTRIES (2)
|
||||
#define I830_WA_SIZE max(I830_TLB_ENTRIES * SZ_4K, I830_BATCH_LIMIT)
|
||||
|
||||
@@ -308,7 +308,7 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
|
||||
/*
|
||||
* There is a discrepancy here between the size reported
|
||||
* by the register and the size of the context layout
|
||||
* in the docs. Both are described as authorative!
|
||||
* in the docs. Both are described as authoritative!
|
||||
*
|
||||
* The discrepancy is on the order of a few cachelines,
|
||||
* but the total is under one page (4k), which is our
|
||||
@@ -845,7 +845,7 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
|
||||
* Note that we have a catch-22 situation where we need to be able to access
|
||||
* the blitter forcewake domain to read the engine fuses, but at the same time
|
||||
* we need to know which engines are available on the system to know which
|
||||
* forcewake domains are present. We solve this by intializing the forcewake
|
||||
* forcewake domains are present. We solve this by initializing the forcewake
|
||||
* domains based on the full engine mask in the platform capabilities before
|
||||
* calling this function and pruning the domains for fused-off engines
|
||||
* afterwards.
|
||||
@@ -1411,7 +1411,7 @@ create_ggtt_bind_context(struct intel_engine_cs *engine)
|
||||
|
||||
/*
|
||||
* MI_UPDATE_GTT can insert up to 511 PTE entries and there could be multiple
|
||||
* bind requets at a time so get a bigger ring.
|
||||
* bind requests at a time so get a bigger ring.
|
||||
*/
|
||||
return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K,
|
||||
I915_GEM_HWS_GGTT_BIND_ADDR,
|
||||
@@ -1533,7 +1533,7 @@ int intel_engines_init(struct intel_gt *gt)
|
||||
|
||||
/**
|
||||
* intel_engine_cleanup_common - cleans up the engine state created by
|
||||
* the common initiailizers.
|
||||
* the common initializers.
|
||||
* @engine: Engine to cleanup.
|
||||
*
|
||||
* This cleans up everything created by the common helpers.
|
||||
|
||||
@@ -237,7 +237,7 @@ struct intel_engine_execlists {
|
||||
*/
|
||||
struct i915_request * const *active;
|
||||
/**
|
||||
* @inflight: the set of contexts submitted and acknowleged by HW
|
||||
* @inflight: the set of contexts submitted and acknowledged by HW
|
||||
*
|
||||
* The set of inflight contexts is managed by reading CS events
|
||||
* from the HW. On a context-switch event (not preemption), we
|
||||
@@ -260,7 +260,7 @@ struct intel_engine_execlists {
|
||||
unsigned int port_mask;
|
||||
|
||||
/**
|
||||
* @virtual: Queue of requets on a virtual engine, sorted by priority.
|
||||
* @virtual: Queue of requests on a virtual engine, sorted by priority.
|
||||
* Each RB entry is a struct i915_priolist containing a list of requests
|
||||
* of the same priority.
|
||||
*/
|
||||
|
||||
@@ -480,7 +480,7 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt)
|
||||
gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(1), ~gt_interrupts[1], gt_interrupts[1]);
|
||||
/*
|
||||
* RPS interrupts will get enabled/disabled on demand when RPS itself
|
||||
* is enabled/disabled. Same wil be the case for GuC interrupts.
|
||||
* is enabled/disabled. Same will be the case for GuC interrupts.
|
||||
*/
|
||||
gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(2), gt->pm_imr, gt->pm_ier);
|
||||
gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(3), ~gt_interrupts[3], gt_interrupts[3]);
|
||||
|
||||
@@ -239,7 +239,7 @@ static u32 rw_with_mcr_steering_fw(struct intel_gt *gt,
|
||||
* to remain in multicast mode for reads. There's no real
|
||||
* downside to this, so we'll just go ahead and do so on all
|
||||
* platforms; we'll only clear the multicast bit from the mask
|
||||
* when exlicitly doing a write operation.
|
||||
* when explicitly doing a write operation.
|
||||
*/
|
||||
if (rw_flag == FW_REG_WRITE)
|
||||
mcr_mask |= GEN11_MCR_MULTICAST;
|
||||
|
||||
@@ -304,7 +304,7 @@ struct intel_context *intel_migrate_create_context(struct intel_migrate *m)
|
||||
struct intel_context *ce;
|
||||
|
||||
/*
|
||||
* We randomly distribute contexts across the engines upon constrction,
|
||||
* We randomly distribute contexts across the engines upon construction,
|
||||
* as they all share the same pinned vm, and so in order to allow
|
||||
* multiple blits to run in parallel, we must construct each blit
|
||||
* to use a different range of the vm for its GTT. This has to be
|
||||
@@ -646,7 +646,7 @@ calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
|
||||
* When CHUNK_SZ is passed all the pages upto CHUNK_SZ
|
||||
* will be taken for the blt. in Flat-ccs supported
|
||||
* platform Smem obj will have more pages than required
|
||||
* for main meory hence limit it to the required size
|
||||
* for main memory hence limit it to the required size
|
||||
* for main memory
|
||||
*/
|
||||
return min_t(u64, bytes_to_cpy, CHUNK_SZ);
|
||||
|
||||
@@ -675,7 +675,7 @@ void intel_mocs_init(struct intel_gt *gt)
|
||||
__init_mocs_table(gt->uncore, &table, global_mocs_offset());
|
||||
|
||||
/*
|
||||
* Initialize the L3CC table as part of mocs initalization to make
|
||||
* Initialize the L3CC table as part of mocs initialization to make
|
||||
* sure the LNCFCMOCSx registers are programmed for the subsequent
|
||||
* memory transactions including guc transactions
|
||||
*/
|
||||
|
||||
@@ -1098,7 +1098,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
|
||||
dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
|
||||
dma_fence_put(fence);
|
||||
|
||||
/* Restart iteration after droping lock */
|
||||
/* Restart iteration after dropping lock */
|
||||
spin_lock(&timelines->lock);
|
||||
tl = list_entry(&timelines->active_list, typeof(*tl), link);
|
||||
}
|
||||
|
||||
@@ -236,7 +236,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
|
||||
/*
|
||||
* In case of resets fails because engine resumes from
|
||||
* incorrect RING_HEAD and then GPU may be then fed
|
||||
* to invalid instrcutions, which may lead to unrecoverable
|
||||
* to invalid instructions, which may lead to unrecoverable
|
||||
* hang. So at first write doesn't succeed then try again.
|
||||
*/
|
||||
ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
|
||||
|
||||
@@ -40,7 +40,7 @@ enum {
|
||||
/**
|
||||
* struct intel_rps_freq_caps - rps freq capabilities
|
||||
* @rp0_freq: non-overclocked max frequency
|
||||
* @rp1_freq: "less than" RP0 power/freqency
|
||||
* @rp1_freq: "less than" RP0 power/frequency
|
||||
* @min_freq: aka RPn, minimum frequency
|
||||
*
|
||||
* Freq caps exposed by HW, values are in "hw units" and intel_gpu_freq()
|
||||
@@ -90,7 +90,7 @@ struct intel_rps {
|
||||
u8 boost_freq; /* Frequency to request when wait boosting */
|
||||
u8 idle_freq; /* Frequency to request when we are idle */
|
||||
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
|
||||
u8 rp1_freq; /* "less than" RP0 power/freqency */
|
||||
u8 rp1_freq; /* "less than" RP0 power/frequency */
|
||||
u8 rp0_freq; /* Non-overclocked max frequency. */
|
||||
u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
|
||||
|
||||
/*
|
||||
* Standalone media shares the general MMIO space with the primary
|
||||
* GT. We'll re-use the primary GT's mapping.
|
||||
* GT. We'll reuse the primary GT's mapping.
|
||||
*/
|
||||
uncore->regs = intel_uncore_regs(&i915->uncore);
|
||||
if (drm_WARN_ON(&i915->drm, uncore->regs == NULL))
|
||||
|
||||
@@ -687,7 +687,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
|
||||
* According to documentation software must consider the configuration
|
||||
* as 2x4x8 and hardware will translate this to 1x8x8.
|
||||
*
|
||||
* Furthemore, even though SScount is three bits, maximum documented
|
||||
* Furthermore, even though SScount is three bits, maximum documented
|
||||
* value for it is four. From this some rules/restrictions follow:
|
||||
*
|
||||
* 1.
|
||||
|
||||
@@ -1318,7 +1318,7 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
|
||||
* We'll do our default/implicit steering based on GSLICE (in the
|
||||
* sliceid field) and DSS (in the subsliceid field). If we can
|
||||
* find overlap between the valid MSLICE and/or LNCF values with
|
||||
* a suitable GSLICE, then we can just re-use the default value and
|
||||
* a suitable GSLICE, then we can just reuse the default value and
|
||||
* skip and explicit steering at runtime.
|
||||
*
|
||||
* We only need to look for overlap between GSLICE/MSLICE/LNCF to find
|
||||
|
||||
@@ -53,7 +53,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
|
||||
if (i915_request_completed(rq)) /* that was quick! */
|
||||
return 0;
|
||||
|
||||
/* Wait until the HW has acknowleged the submission (or err) */
|
||||
/* Wait until the HW has acknowledged the submission (or err) */
|
||||
intel_engine_flush_submission(engine);
|
||||
if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
|
||||
return 0;
|
||||
|
||||
@@ -548,7 +548,7 @@ static int igt_reset_fail_engine(void *arg)
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
/* Check that we can recover from engine-reset failues */
|
||||
/* Check that we can recover from engine-reset failures */
|
||||
|
||||
if (!intel_has_reset_engine(gt))
|
||||
return 0;
|
||||
|
||||
@@ -63,7 +63,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
|
||||
if (i915_request_completed(rq)) /* that was quick! */
|
||||
return 0;
|
||||
|
||||
/* Wait until the HW has acknowleged the submission (or err) */
|
||||
/* Wait until the HW has acknowledged the submission (or err) */
|
||||
intel_engine_flush_submission(engine);
|
||||
if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
|
||||
return 0;
|
||||
|
||||
@@ -211,7 +211,7 @@ int live_rc6_ctx_wa(void *arg)
|
||||
i915_reset_engine_count(error, engine);
|
||||
const u32 *res;
|
||||
|
||||
/* Use a sacrifical context */
|
||||
/* Use a sacrificial context */
|
||||
ce = intel_context_create(engine);
|
||||
if (IS_ERR(ce)) {
|
||||
err = PTR_ERR(ce);
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
#include "selftests/igt_spinner.h"
|
||||
#include "selftests/librapl.h"
|
||||
|
||||
/* Try to isolate the impact of cstates from determing frequency response */
|
||||
/* Try to isolate the impact of cstates from determining frequency response */
|
||||
#define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */
|
||||
|
||||
static void dummy_rps_work(struct work_struct *wrk)
|
||||
|
||||
@@ -10,7 +10,7 @@ i915/gt/shaders/clear_kernel directory.
|
||||
|
||||
The generated .c files should never be modified directly. Instead, any modification
|
||||
needs to be done on the on their respective ASM files and build instructions below
|
||||
needes to be followed.
|
||||
needs to be followed.
|
||||
|
||||
Building
|
||||
========
|
||||
@@ -24,7 +24,7 @@ on building.
|
||||
Please make sure your Mesa tool is compiled with "-Dtools=intel" and
|
||||
"-Ddri-drivers=i965", and run this script from IGT source root directory"
|
||||
|
||||
The instructions bellow assume:
|
||||
The instructions below assume:
|
||||
* IGT gpu tools source code is located on your home directory (~) as ~/igt
|
||||
* Mesa source code is located on your home directory (~) as ~/mesa
|
||||
and built under the ~/mesa/build directory
|
||||
@@ -43,4 +43,4 @@ igt $ ./scripts/generate_clear_kernel.sh -g ivb \
|
||||
~/igt/lib/i915/shaders/clear_kernel/hsw.asm
|
||||
~ $ cd ~/igt
|
||||
igt $ ./scripts/generate_clear_kernel.sh -g hsw \
|
||||
-m ~/mesa/build/src/intel/tools/i965_asm
|
||||
-m ~/mesa/build/src/intel/tools/i965_asm
|
||||
|
||||
@@ -24,7 +24,7 @@ mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
|
||||
* DW 1.4 - Rsvd (intended for context ID)
|
||||
* DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
|
||||
* DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
|
||||
* DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
|
||||
* DW 1.7 - Rsvd MBZ (intended for Total Thread Count)
|
||||
*
|
||||
* Binding Table
|
||||
*
|
||||
|
||||
@@ -24,7 +24,7 @@ mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
|
||||
* DW 1.4 - Rsvd (intended for context ID)
|
||||
* DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
|
||||
* DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
|
||||
* DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
|
||||
* DW 1.7 - Rsvd MBZ (intended for Total Thread Count)
|
||||
*
|
||||
* Binding Table
|
||||
*
|
||||
|
||||
@@ -81,7 +81,7 @@ struct guc_debug_capture_list {
|
||||
*
|
||||
* intel_guc_capture module uses these structures to maintain static
|
||||
* tables (per unique platform) that consists of lists of registers
|
||||
* (offsets, names, flags,...) that are used at the ADS regisration
|
||||
* (offsets, names, flags,...) that are used at the ADS registration
|
||||
* time as well as during runtime processing and reporting of error-
|
||||
* capture states generated by GuC just prior to engine reset events.
|
||||
*/
|
||||
@@ -200,7 +200,7 @@ struct intel_guc_state_capture {
|
||||
* dynamically allocate new nodes when receiving the G2H notification
|
||||
* because the event handlers for all G2H event-processing is called
|
||||
* by the ct processing worker queue and when that queue is being
|
||||
* processed, there is no absoluate guarantee that we are not in the
|
||||
* processed, there is no absolute guarantee that we are not in the
|
||||
* midst of a GT reset operation (which doesn't allow allocations).
|
||||
*/
|
||||
struct list_head cachelist;
|
||||
|
||||
@@ -690,7 +690,7 @@ int intel_guc_suspend(struct intel_guc *guc)
|
||||
* H2G MMIO command completes.
|
||||
*
|
||||
* Don't abort on a failure code from the GuC. Keep going and do the
|
||||
* clean up in santize() and re-initialisation on resume and hopefully
|
||||
* clean up in sanitize() and re-initialisation on resume and hopefully
|
||||
* the error here won't be problematic.
|
||||
*/
|
||||
ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
|
||||
|
||||
@@ -295,7 +295,7 @@ struct intel_guc {
|
||||
*/
|
||||
struct work_struct dead_guc_worker;
|
||||
/**
|
||||
* @last_dead_guc_jiffies: timestamp of previous 'dead guc' occurrance
|
||||
* @last_dead_guc_jiffies: timestamp of previous 'dead guc' occurrence
|
||||
* used to prevent a fundamentally broken system from continuously
|
||||
* reloading the GuC.
|
||||
*/
|
||||
|
||||
@@ -408,7 +408,7 @@ enum guc_capture_type {
|
||||
GUC_CAPTURE_LIST_TYPE_MAX,
|
||||
};
|
||||
|
||||
/* Class indecies for capture_class and capture_instance arrays */
|
||||
/* Class indices for capture_class and capture_instance arrays */
|
||||
enum {
|
||||
GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE = 0,
|
||||
GUC_CAPTURE_LIST_CLASS_VIDEO = 1,
|
||||
|
||||
@@ -1223,7 +1223,7 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
|
||||
* determine validity of these values. Instead we read the values multiple times
|
||||
* until they are consistent. In test runs, 3 attempts results in consistent
|
||||
* values. The upper bound is set to 6 attempts and may need to be tuned as per
|
||||
* any new occurences.
|
||||
* any new occurrences.
|
||||
*/
|
||||
static void __get_engine_usage_record(struct intel_engine_cs *engine,
|
||||
u32 *last_in, u32 *id, u32 *total)
|
||||
@@ -2995,7 +2995,7 @@ static int __guc_context_pin(struct intel_context *ce,
|
||||
|
||||
/*
|
||||
* GuC context gets pinned in guc_request_alloc. See that function for
|
||||
* explaination of why.
|
||||
* explanation of why.
|
||||
*/
|
||||
|
||||
return lrc_pin(ce, engine, vaddr);
|
||||
|
||||
@@ -512,7 +512,7 @@ static int __uc_init_hw(struct intel_uc *uc)
|
||||
ERR_PTR(ret), attempts);
|
||||
}
|
||||
|
||||
/* Did we succeded or run out of retries? */
|
||||
/* Did we succeed or run out of retries? */
|
||||
if (ret)
|
||||
goto err_log_capture;
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ static int intel_hang_guc(void *arg)
|
||||
old_beat = engine->props.heartbeat_interval_ms;
|
||||
ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
|
||||
if (ret) {
|
||||
gt_err(gt, "Failed to boost heatbeat interval: %pe\n", ERR_PTR(ret));
|
||||
gt_err(gt, "Failed to boost heartbeat interval: %pe\n", ERR_PTR(ret));
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user