+ * the doorbell, workqueue and process descriptor, additionally it
stores
+ * information about all possible HW contexts that use it (64 x
number of
+ * engine classes of guc_execlist_context structs).
+ *
+ * The idea is that every direct-submission GuC client gets one SW
Context ID
+ * and every HW context created by that client gets one SW Counter.
The "SW
+ * Context ID" and "SW Counter" to use now get passed on every work
queue item.
+ *
+ * But we don't have direct submission yet: does that mean we are
limited to 64
+ * contexts in total (one client)? Not really: we can use extra GuC
context
+ * descriptors to store more HW contexts. They are special in that
they don't
+ * have their own work queue, doorbell or process descriptor.
Instead, these
+ * "principal" GuC context descriptors use the one that belongs to
the client
+ * as a "proxy" for submission (a generalization of the old proxy
submission).
*
* The Scratch registers:
* There are 16 MMIO-based registers start from 0xC180. The kernel
driver writes
@@ -164,11 +169,28 @@ static int __guc_deallocate_doorbell(struct
intel_guc *guc, u32 stage_id)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static struct guc_stage_desc *__get_stage_desc(struct
intel_guc_client *client)
+static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc,
u32 index)
+{
+ struct guc_stage_desc *base = guc->stage_desc_pool_vaddr;
+
+ GEM_BUG_ON(!USES_GUC_SUBMISSION(guc_to_i915(guc)));
+ GEM_BUG_ON(index >= GUC_MAX_STAGE_DESCRIPTORS);
+
+ return &base[index];
+}
+
+static struct guc_stage_desc *__get_proxy_stage_desc(struct
intel_guc_client *client)
{
- struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
+ GEM_BUG_ON(!I915_SELFTEST_ONLY(client->guc->starting_proxy_id) &&
+ client->stage_id < GUC_MAX_PPAL_STAGE_DESCRIPTORS);
+ return __get_stage_desc(client->guc, client->stage_id);
+}
- return &base[client->stage_id];
+static struct guc_stage_desc *__get_ppal_stage_desc(struct intel_guc
*guc,
+ u32 index)
+{
+ GEM_BUG_ON(index >= GUC_MAX_PPAL_STAGE_DESCRIPTORS);
+ return __get_stage_desc(guc, index);
}
/*
@@ -183,7 +205,7 @@ static void __update_doorbell_desc(struct
intel_guc_client *client, u16 new_id)
struct guc_stage_desc *desc;
/* Update the GuC's idea of the doorbell ID */
- desc = __get_stage_desc(client);
+ desc = __get_proxy_stage_desc(client);
desc->db_id = new_id;
}
@@ -329,14 +351,12 @@ static int guc_stage_desc_pool_create(struct
intel_guc *guc)
guc->stage_desc_pool = vma;
guc->stage_desc_pool_vaddr = vaddr;
- ida_init(&guc->stage_ids);
return 0;
}
static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
{
- ida_destroy(&guc->stage_ids);
i915_vma_unpin_and_release(&guc->stage_desc_pool,
I915_VMA_RELEASE_MAP);
}
@@ -347,78 +367,26 @@ static void guc_stage_desc_pool_destroy(struct
intel_guc *guc)
* data structures relating to this client (doorbell, process
descriptor,
* write queue, etc).
*/
-static void guc_stage_desc_init(struct intel_guc_client *client)
+static void guc_proxy_stage_desc_init(struct intel_guc_client *client)
{
- struct intel_guc *guc = client->guc;
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *engine;
- struct i915_gem_context *ctx = client->owner;
struct guc_stage_desc *desc;
- unsigned int tmp;
u32 gfx_addr;
- desc = __get_stage_desc(client);
+ desc = __get_proxy_stage_desc(client);
memset(desc, 0, sizeof(*desc));
desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
+ GUC_STAGE_DESC_ATTR_PROXY |
GUC_STAGE_DESC_ATTR_KERNEL;
- if (is_high_priority(client))
- desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT;
desc->stage_id = client->stage_id;
desc->priority = client->priority;
desc->db_id = client->doorbell_id;
- for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
- struct intel_context *ce = to_intel_context(ctx, engine);
- u32 guc_engine_id = engine->guc_id;
- struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
-
- /* TODO: We have a design issue to be solved here. Only when we
- * receive the first batch, we know which engine is used by the
- * user. But here GuC expects the lrc and ring to be pinned. It
- * is not an issue for default context, which is the only one
- * for now who owns a GuC client. But for future owner of GuC
- * client, need to make sure lrc is pinned prior to enter here.
- */
- if (!ce->state)
- break; /* XXX: continue? */
-
- /*
- * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy
- * submission or, in other words, not using a direct submission
- * model) the KMD's LRCA is not used for any work submission.
- * Instead, the GuC uses the LRCA of the user mode context (see
- * guc_add_request below).
- */
- lrc->context_desc = lower_32_bits(ce->lrc_desc);
-
- /* The state page is after PPHWSP */
- lrc->ring_lrca = intel_guc_ggtt_offset(guc, ce->state) +
- LRC_STATE_PN * PAGE_SIZE;
-
- /* XXX: In direct submission, the GuC wants the HW context id
- * here. In proxy submission, it wants the stage id
- */
- lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
- (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
-
- lrc->ring_begin = intel_guc_ggtt_offset(guc, ce->ring->vma);
- lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
- lrc->ring_next_free_location = lrc->ring_begin;
- lrc->ring_current_tail_pointer_value = 0;
-
- desc->engines_used |= (1 << guc_engine_id);
- }
-
- DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
- client->engines, desc->engines_used);
- WARN_ON(desc->engines_used == 0);
-
/*
* The doorbell, process descriptor, and workqueue are all parts
* of the client object, which the GuC will reference via the GGTT
*/
- gfx_addr = intel_guc_ggtt_offset(guc, client->vma);
+ gfx_addr = intel_guc_ggtt_offset(client->guc, client->vma);
desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
@@ -430,11 +398,11 @@ static void guc_stage_desc_init(struct
intel_guc_client *client)
desc->desc_private = ptr_to_u64(client);
}
-static void guc_stage_desc_fini(struct intel_guc_client *client)
+static void guc_proxy_stage_desc_fini(struct intel_guc_client *client)
{
struct guc_stage_desc *desc;
- desc = __get_stage_desc(client);
+ desc = __get_proxy_stage_desc(client);
memset(desc, 0, sizeof(*desc));
}
@@ -553,7 +521,7 @@ static void inject_preempt_context(struct
work_struct *work)
struct intel_guc *guc = container_of(preempt_work, typeof(*guc),
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
- struct guc_stage_desc *stage_desc = __get_stage_desc(client);
+ struct guc_stage_desc *stage_desc = __get_proxy_stage_desc(client);
struct intel_context *ce = to_intel_context(client->owner, engine);
u32 data[7];
@@ -919,6 +887,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
struct i915_vma *vma;
void *vaddr;
int ret;
+ u32 starting_id;
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
@@ -931,8 +900,11 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
client->doorbell_id = GUC_DOORBELL_INVALID;
spin_lock_init(&client->wq_lock);
- ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
- GFP_KERNEL);
+ if (!I915_SELFTEST_ONLY(starting_id = guc->starting_proxy_id))
+ starting_id = GUC_MAX_PPAL_STAGE_DESCRIPTORS;
+
+ ret = ida_simple_get(&guc->client_ids, starting_id,
+ GUC_MAX_STAGE_DESCRIPTORS, GFP_KERNEL);
if (ret < 0)
goto err_client;
@@ -983,7 +955,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
err_vma:
i915_vma_unpin_and_release(&client->vma, 0);
err_id:
- ida_simple_remove(&guc->stage_ids, client->stage_id);
+ ida_simple_remove(&guc->client_ids, client->stage_id);
err_client:
kfree(client);
return ERR_PTR(ret);
@@ -993,7 +965,7 @@ static void guc_client_free(struct
intel_guc_client *client)
{
unreserve_doorbell(client);
i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
- ida_simple_remove(&client->guc->stage_ids, client->stage_id);
+ ida_simple_remove(&client->guc->client_ids, client->stage_id);
kfree(client);
}
@@ -1063,7 +1035,7 @@ static int __guc_client_enable(struct
intel_guc_client *client)
int ret;
guc_proc_desc_init(client);
- guc_stage_desc_init(client);
+ guc_proxy_stage_desc_init(client);
ret = create_doorbell(client);
if (ret)
@@ -1072,7 +1044,7 @@ static int __guc_client_enable(struct
intel_guc_client *client)
return 0;
fail:
- guc_stage_desc_fini(client);
+ guc_proxy_stage_desc_fini(client);
guc_proc_desc_fini(client);
return ret;
}
@@ -1089,7 +1061,7 @@ static void __guc_client_disable(struct
intel_guc_client *client)
else
__destroy_doorbell(client);
- guc_stage_desc_fini(client);
+ guc_proxy_stage_desc_fini(client);
guc_proc_desc_fini(client);
}
@@ -1145,6 +1117,9 @@ int intel_guc_submission_init(struct intel_guc
*guc)
GEM_BUG_ON(!guc->stage_desc_pool);
WARN_ON(!guc_verify_doorbells(guc));
+
+ ida_init(&guc->client_ids);
+
ret = guc_clients_create(guc);
if (ret)
goto err_pool;
@@ -1157,6 +1132,7 @@ int intel_guc_submission_init(struct intel_guc
*guc)
return 0;
err_pool:
+ ida_destroy(&guc->client_ids);
guc_stage_desc_pool_destroy(guc);
return ret;
}
@@ -1173,6 +1149,8 @@ void intel_guc_submission_fini(struct intel_guc
*guc)
guc_clients_destroy(guc);
WARN_ON(!guc_verify_doorbells(guc));
+ ida_destroy(&guc->client_ids);
+
if (guc->stage_desc_pool)
guc_stage_desc_pool_destroy(guc);
}
@@ -1257,6 +1235,203 @@ static void guc_submission_unpark(struct
intel_engine_cs *engine)
intel_engine_pin_breadcrumbs_irq(engine);
}
+static void guc_map_gem_ctx_to_ppal_stage(struct intel_guc *guc,
+ struct guc_stage_desc *desc,
+ u32 id)
+{
+ GEM_BUG_ON(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE);
+
+ desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
+ GUC_STAGE_DESC_ATTR_PRINCIPAL |
+ GUC_STAGE_DESC_ATTR_KERNEL;
+ desc->stage_id = id;
+
+ /* all ppal contexts will be submitted trough the execbuf client */
+ desc->proxy_id = guc->execbuf_client->stage_id;
+
+ /*
+ * max_lrc_per_class is used in GuC to cut short loops over the
+ * lrc_bitmap when only a small amount of lrcs are used. We could
+ * recalculate this value every time an lrc is added or removed, but
+ * given the fact that we only have a max number of lrcs per
stage_desc
+ * equal to the max number of instances of a class (because we map
+ * gem_context 1:1 with stage_desc) and that the GuC loops only in
+ * specific cases, redoing the calculation each time doesn't give
us a
+ * big benefit for the cost so we can just use a static value.
+ */
+ desc->max_lrc_per_class = MAX_ENGINE_INSTANCE + 1;
+}
+
+static void guc_unmap_gem_ctx_from_ppal_stage(struct intel_guc *guc,
+ struct guc_stage_desc *desc)
+{
+ GEM_BUG_ON(!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE));
+ GEM_BUG_ON(desc->lrc_count > 0);
+
+ memset(desc, 0, sizeof(*desc));
+}
+
+static inline void guc_ppal_stage_lrc_pin(struct intel_engine_cs
*engine,
+ struct i915_gem_context *ctx,
+ struct intel_context *ce)
+{
+ struct intel_guc *guc = &ctx->i915->guc;
+ struct guc_stage_desc *desc;
+ struct guc_execlist_context *lrc;
+ u8 guc_class = engine->class;
+
+ /* 1:1 gem_context to ppal mapping */
+ GEM_BUG_ON(ce->sw_counter > MAX_ENGINE_INSTANCE);
+
+ desc = __get_ppal_stage_desc(guc, ce->sw_context_id);
+ GEM_BUG_ON(desc->lrc_alloc_map[guc_class].bitmap &
BIT(ce->sw_counter));
+
+ if (!desc->lrc_count++)
+ guc_map_gem_ctx_to_ppal_stage(guc, desc, ce->sw_context_id);
+
+ lrc = &desc->lrc[guc_class][ce->sw_counter];
+ lrc->hw_context_desc = ce->lrc_desc;
+ lrc->ring_lrc = intel_guc_ggtt_offset(guc, ce->state) +
+ LRC_STATE_PN * PAGE_SIZE;
+ lrc->ring_begin = intel_guc_ggtt_offset(guc, ce->ring->vma);
+ lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
+
+ desc->lrc_alloc_map[guc_class].bitmap |= BIT(ce->sw_counter);
+}
+
+static inline void guc_ppal_stage_lrc_unpin(struct intel_context *ce)
+{
+ struct i915_gem_context *ctx = ce->gem_context;
+ struct intel_guc *guc = &ctx->i915->guc;
+ struct intel_engine_cs *engine = ctx->i915->engine[ce -
ctx->__engine];
+ struct guc_stage_desc *desc;
+ struct guc_execlist_context *lrc;
+ u8 guc_class = engine->class;
+
+ desc = __get_ppal_stage_desc(guc, ce->sw_context_id);
+ GEM_BUG_ON(!(desc->lrc_alloc_map[guc_class].bitmap &
BIT(ce->sw_counter)));
+
+ lrc = &desc->lrc[guc_class][ce->sw_counter];
+
+ /*
+ * GuC needs us to keep the lrc mapped until it has finished
processing
+ * the ctx switch interrupt. When executing nop or very small
workloads
+ * it is possible (but quite unlikely) that 2 contexts on different
+ * ELSPs of the same engine complete before the GuC manages to
process
+ * the interrupt for the first completion. Experiments show this
happens
+ * for ~0.2% of contexts when executing nop workloads on different
+ * contexts back to back on the same engine. When submitting nop
+ * workloads on all engines at the same time the hit-rate goes up to
+ * ~0.7%. In all the observed cases GuC required < 100us to catch
up,
+ * with the single engine case being always below 20us.
+ *
+ * The completion of the request on the second lrc will reduce our
+ * pin_count on the first lrc to zero, thus triggering a call to
this
+ * function potentially before GuC has had time to process the
+ * interrupt. To avoid this, we could get an extra pin on the
context or
+ * delay the unpin when guc is in use, but given that the issue is
+ * limited to pathological scenarios and has very low hit rate even
+ * there, we can just introduce a small delay when it happens to
give
+ * time to GuC to catch up. Also to be noted that since the requests
+ * have completed on the HW we've most likely already sent GuC
the next
+ * contexts to be executed, so it is unlikely that by waiting
we'll add
+ * bubbles in the HW execution.
+ */
+ WARN_ON(wait_for_us(lrc->is_present_in_sq == 0, 1000));
+
+ desc->lrc_alloc_map[guc_class].bitmap &= ~BIT(ce->sw_counter);
+ memset(lrc, 0, sizeof(*lrc));
+
+ if (!--desc->lrc_count)
+ guc_unmap_gem_ctx_from_ppal_stage(guc, desc);
+}
+
+static inline void guc_init_lrc_mapping(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ enum intel_engine_id id;
+
+ /*
+ * Some context (e.g. kernel_context) might have been pinned
before we
+ * enabled GuC submission, so we need to add them to the GuC
bookeping.
+ * Also, after a reset the GuC we want to make sure that the
information
+ * shared with GuC is properly reset.
+ *
+ * NOTE: the code below assumes 1:1 mapping between ppal
descriptors and
+ * gem contexts for simplicity.
+ */
+ list_for_each_entry(ctx, &i915->contexts.list, link) {
+ if (atomic_read(&ctx->hw_id_pin_count)) {
+ struct guc_stage_desc *desc;
+
+ /* make sure the descriptor is clean... */
+ GEM_BUG_ON(ctx->hw_id > GUC_MAX_PPAL_STAGE_DESCRIPTORS);
+ desc = __get_ppal_stage_desc(guc, ctx->hw_id);
+ memset(desc, 0, sizeof(*desc));
+
+ /* ...and the (re-)pin all the lrcs */
+ for_each_engine(engine, i915, id) {
+ ce = to_intel_context(ctx, engine);
+ if (ce->pin_count > 0)
+ guc_ppal_stage_lrc_pin(engine, ctx, ce);
+ }
+ }
+ }
+}
+
+static inline void guc_fini_lrc_mapping(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ enum intel_engine_id id;
+
+ list_for_each_entry(ctx, &i915->contexts.list, link) {
+ if (atomic_read(&ctx->hw_id_pin_count)) {
+ for_each_engine(engine, i915, id) {
+ ce = to_intel_context(ctx, engine);
+ if (ce->pin_count > 0)
+ guc_ppal_stage_lrc_unpin(ce);
+ }
+ }
+ }
+}
+
+static void guc_context_unpin(struct intel_context *ce)
+{
+ guc_ppal_stage_lrc_unpin(ce);
+ intel_execlists_context_unpin(ce);
+}
+
+static const struct intel_context_ops guc_context_ops = {
+ .unpin = guc_context_unpin,
+ .destroy = intel_execlists_context_destroy,
+};
+
+static struct intel_context *guc_context_pin(struct intel_engine_cs
*engine,
+ struct i915_gem_context *ctx)
+{
+ struct intel_context *ce = to_intel_context(ctx, engine);
+
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+
+ if (likely(ce->pin_count++))
+ return ce;
+ GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+
+ ce->ops = &guc_context_ops;
+
+ ce = intel_execlists_context_pin(engine, ctx, ce);
+ if (!IS_ERR(ce))
+ guc_ppal_stage_lrc_pin(engine, ctx, ce);
+
+ return ce;
+}
+
static void guc_set_default_submission(struct intel_engine_cs *engine)
{
/*
@@ -1274,6 +1449,8 @@ static void guc_set_default_submission(struct
intel_engine_cs *engine)
engine->execlists.tasklet.func = guc_submission_tasklet;
+ engine->context_pin = guc_context_pin;
+
engine->park = guc_submission_park;
engine->unpark = guc_submission_unpark;
@@ -1320,6 +1497,8 @@ int intel_guc_submission_enable(struct intel_guc
*guc)
engine->set_default_submission(engine);
}
+ guc_init_lrc_mapping(guc);
+
return 0;
}
@@ -1329,6 +1508,7 @@ void intel_guc_submission_disable(struct
intel_guc *guc)
GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
+ guc_fini_lrc_mapping(guc);
guc_interrupts_release(dev_priv);
guc_clients_disable(guc);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c
b/drivers/gpu/drm/i915/intel_lrc.c
index 48e0cdf42221..444bc83554c5 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1071,7 +1071,7 @@ static void execlists_submit_request(struct
i915_request *request)
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-static void execlists_context_destroy(struct intel_context *ce)
+void intel_execlists_context_destroy(struct intel_context *ce)
{
GEM_BUG_ON(ce->pin_count);
@@ -1084,7 +1084,7 @@ static void execlists_context_destroy(struct
intel_context *ce)
i915_gem_object_put(ce->state->obj);
}
-static void execlists_context_unpin(struct intel_context *ce)
+void intel_execlists_context_unpin(struct intel_context *ce)
{
struct intel_engine_cs *engine;
@@ -1141,10 +1141,10 @@ static int __context_pin(struct
i915_gem_context *ctx, struct i915_vma *vma)
return i915_vma_pin(vma, 0, 0, flags);
}
-static struct intel_context *
-__execlists_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
- struct intel_context *ce)
+struct intel_context *
+intel_execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx,
+ struct intel_context *ce)
{
void *vaddr;
int ret;
@@ -1205,8 +1205,8 @@ __execlists_context_pin(struct intel_engine_cs
*engine,
}
static const struct intel_context_ops execlists_context_ops = {
- .unpin = execlists_context_unpin,
- .destroy = execlists_context_destroy,
+ .unpin = intel_execlists_context_unpin,
+ .destroy = intel_execlists_context_destroy,
};
static struct intel_context *
@@ -1224,7 +1224,7 @@ execlists_context_pin(struct intel_engine_cs
*engine,
ce->ops = &execlists_context_ops;
- return __execlists_context_pin(engine, ctx, ce);
+ return intel_execlists_context_pin(engine, ctx, ce);
}
static int execlists_request_alloc(struct i915_request *request)
diff --git a/drivers/gpu/drm/i915/intel_lrc.h
b/drivers/gpu/drm/i915/intel_lrc.h
index f5a5502ecf70..178b181ea651 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -104,4 +104,11 @@ void intel_lr_context_resume(struct
drm_i915_private *dev_priv);
void intel_execlists_set_default_submission(struct intel_engine_cs
*engine);
+struct intel_context *
+intel_execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx,
+ struct intel_context *ce);
+void intel_execlists_context_unpin(struct intel_context *ce);
+void intel_execlists_context_destroy(struct intel_context *ce);
+
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c
b/drivers/gpu/drm/i915/selftests/intel_guc.c
index bf27162fb327..eb4e8bbe8c82 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -301,6 +301,7 @@ static int igt_guc_doorbells(void *arg)
if (err)
goto unlock;
+ guc->starting_proxy_id = GUC_MAX_PPAL_STAGE_DESCRIPTORS - ATTEMPTS;