diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 0b1a3f9..d7884b1 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1645,7 +1645,9 @@ the amount of locking which needs to be done.
all the readers who were traversing the list when we deleted the
element are finished. We use call_rcu() to
register a callback which will actually destroy the object once
- the readers are finished.
+ all pre-existing readers are finished. Alternatively,
+ synchronize_rcu() may be used to block until
+ all pre-existing are finished.
But how does Read Copy Update know when the readers are
@@ -1714,7 +1716,7 @@ the amount of locking which needs to be done.
- object_put(obj);
+ list_del_rcu(&obj->list);
cache_num--;
-+ call_rcu(&obj->rcu, cache_delete_rcu, obj);
++ call_rcu(&obj->rcu, cache_delete_rcu);
}
/* Must be holding cache_lock */
@@ -1725,14 +1727,6 @@ the amount of locking which needs to be done.
if (++cache_num > MAX_CACHE_SIZE) {
struct object *i, *outcast = NULL;
list_for_each_entry(i, &cache, list) {
-@@ -85,6 +94,7 @@
- obj->popularity = 0;
- atomic_set(&obj->refcnt, 1); /* The cache holds a reference */
- spin_lock_init(&obj->lock);
-+ INIT_RCU_HEAD(&obj->rcu);
-
- spin_lock_irqsave(&cache_lock, flags);
- __cache_add(obj);
@@ -104,12 +114,11 @@
struct object *cache_find(int id)
{
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 790d1a8..0c134f8 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -218,13 +218,22 @@ over a rather long period of time, but improvements are always welcome!
include:
a. Keeping a count of the number of data-structure elements
- used by the RCU-protected data structure, including those
- waiting for a grace period to elapse. Enforce a limit
- on this number, stalling updates as needed to allow
- previously deferred frees to complete.
-
- Alternatively, limit only the number awaiting deferred
- free rather than the total number of elements.
+ used by the RCU-protected data structure, including
+ those waiting for a grace period to elapse. Enforce a
+ limit on this number, stalling updates as needed to allow
+ previously deferred frees to complete. Alternatively,
+ limit only the number awaiting deferred free rather than
+ the total number of elements.
+
+ One way to stall the updates is to acquire the update-side
+ mutex. (Don't try this with a spinlock -- other CPUs
+ spinning on the lock could prevent the grace period
+ from ever ending.) Another way to stall the updates
+ is for the updates to use a wrapper function around
+ the memory allocator, so that this wrapper function
+ simulates OOM when there is too much memory awaiting an
+ RCU grace period. There are of course many other
+ variations on this theme.
b. Limiting update rate. For example, if updates occur only
once per hour, then no explicit rate limiting is required,
@@ -365,3 +374,26 @@ over a rather long period of time, but improvements are always welcome!
and the compiler to freely reorder code into and out of RCU
read-side critical sections. It is the responsibility of the
RCU update-side primitives to deal with this.
+
+17. Use CONFIG_PROVE_RCU, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and
+ the __rcu sparse checks to validate your RCU code. These
+ can help find problems as follows:
+
+ CONFIG_PROVE_RCU: check that accesses to RCU-protected data
+ structures are carried out under the proper RCU
+ read-side critical section, while holding the right
+ combination of locks, or whatever other conditions
+ are appropriate.
+
+ CONFIG_DEBUG_OBJECTS_RCU_HEAD: check that you don't pass the
+ same object to call_rcu() (or friends) before an RCU
+ grace period has elapsed since the last time that you
+ passed that same object to call_rcu() (or friends).
+
+ __rcu sparse checks: tag the pointer to the RCU-protected data
+ structure with __rcu, and sparse will warn you if you
+ access that pointer without the services of one of the
+ variants of rcu_dereference().
+
+ These debugging aids can help you find problems that are
+ otherwise extremely difficult to spot.
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 44c6dcc..862c08e 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -80,6 +80,24 @@ o A CPU looping with bottom halves disabled. This condition can
o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
without invoking schedule().
+o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
+ happen to preempt a low-priority task in the middle of an RCU
+ read-side critical section. This is especially damaging if
+ that low-priority task is not permitted to run on any other CPU,
+ in which case the next RCU grace period can never complete, which
+ will eventually cause the system to run out of memory and hang.
+ While the system is in the process of running itself out of
+ memory, you might see stall-warning messages.
+
+o A CPU-bound real-time task in a CONFIG_PREEMPT_RT kernel that
+ is running at a higher priority than the RCU softirq threads.
+ This will prevent RCU callbacks from ever being invoked,
+ and in a CONFIG_TREE_PREEMPT_RCU kernel will further prevent
+ RCU grace periods from ever completing. Either way, the
+ system will eventually run out of memory and hang. In the
+ CONFIG_TREE_PREEMPT_RCU case, you might see stall-warning
+ messages.
+
o A bug in the RCU implementation.
o A hardware failure. This is quite unlikely, but has occurred
diff --git a/Makefile b/Makefile
index 2c0299f..f3bdff8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 36
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc1
NAME = Sheep on Meth
# *DOCUMENTATION*
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index a46cb35..1f11f5c 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -40,7 +40,6 @@
static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
unsigned int xstate_size;
-EXPORT_SYMBOL_GPL(xstate_size);
unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32);
static struct i387_fxsave_struct fx_scratch __cpuinitdata;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index ddeb231..0fd6378 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -697,7 +697,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
pit->wq = create_singlethread_workqueue("kvm-pit-wq");
if (!pit->wq) {
mutex_unlock(&pit->pit_state.lock);
- kvm_free_irq_source_id(kvm, pit->irq_source_id);
kfree(pit);
return NULL;
}
@@ -743,7 +742,7 @@ fail:
kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
kvm_free_irq_source_id(kvm, pit->irq_source_id);
- destroy_workqueue(pit->wq);
+
kfree(pit);
return NULL;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3a09c62..25f1907 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2387,7 +2387,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
if (cpu_has_xsave)
memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->xsave,
- xstate_size);
+ sizeof(struct xsave_struct));
else {
memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->fxsave,
@@ -2405,7 +2405,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
if (cpu_has_xsave)
memcpy(&vcpu->arch.guest_fpu.state->xsave,
- guest_xsave->region, xstate_size);
+ guest_xsave->region, sizeof(struct xsave_struct));
else {
if (xstate_bv & ~XSTATE_FPSSE)
return -EINVAL;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 710af89..ddf5def 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -819,16 +819,13 @@ static const struct intel_driver_description {
"Sandybridge", NULL, &intel_gen6_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
"Sandybridge", NULL, &intel_gen6_driver },
- { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG,
- "Sandybridge", NULL, &intel_gen6_driver },
{ 0, 0, NULL, NULL, NULL }
};
static int __devinit intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge)
{
- int i, mask;
-
+ int i;
bridge->driver = NULL;
for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
@@ -848,19 +845,14 @@ static int __devinit intel_gmch_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
- if (bridge->driver->mask_memory == intel_gen6_mask_memory)
- mask = 40;
- else if (bridge->driver->mask_memory == intel_i965_mask_memory)
- mask = 36;
- else
- mask = 32;
-
- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
- dev_err(&intel_private.pcidev->dev,
- "set gfx device dma mask %d-bit failed!\n", mask);
- else
- pci_set_consistent_dma_mask(intel_private.pcidev,
- DMA_BIT_MASK(mask));
+ if (bridge->driver->mask_memory == intel_i965_mask_memory) {
+ if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
+ dev_err(&intel_private.pcidev->dev,
+ "set gfx device dma mask 36bit failed!\n");
+ else
+ pci_set_consistent_dma_mask(intel_private.pcidev,
+ DMA_BIT_MASK(36));
+ }
return 1;
}
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 08d4753..c05e3e5 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -204,7 +204,6 @@
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG 0x0126
/* cover 915 and 945 variants */
#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 5c8e534..da78f2c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -8,7 +8,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_suspend.o \
i915_gem.o \
i915_gem_debug.o \
- i915_gem_evict.o \
i915_gem_tiling.o \
i915_trace_points.o \
intel_display.o \
@@ -19,7 +18,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
- intel_panel.o \
intel_i2c.o \
intel_fb.o \
intel_tv.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 8c2ad01..0d6ff64 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -30,17 +30,20 @@
#include "intel_drv.h"
struct intel_dvo_device {
- const char *name;
+ char *name;
int type;
/* DVOA/B/C output register */
u32 dvo_reg;
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
+ struct i2c_adapter *i2c_bus;
const struct intel_dvo_dev_ops *dev_ops;
void *dev_priv;
- struct i2c_adapter *i2c_bus;
+
+ struct drm_display_mode *panel_fixed_mode;
+ bool panel_wants_dither;
};
struct intel_dvo_dev_ops {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 92d5605..9214119 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -467,9 +467,6 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
- if (error->overlay)
- intel_overlay_print_error_state(m, error->overlay);
-
out:
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 44af317..f19ffe8 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -499,13 +499,6 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
}
}
-
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
- BEGIN_LP_RING(2);
- OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
- }
i915_emit_breadcrumb(dev);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 00befce..5044f65 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -181,7 +181,6 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
- INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
{0, 0, 0}
};
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 047cd7c..906663b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -113,9 +113,6 @@ struct intel_opregion {
int enabled;
};
-struct intel_overlay;
-struct intel_overlay_error_state;
-
struct drm_i915_master_private {
drm_local_map_t *sarea;
struct _drm_i915_sarea *sarea_priv;
@@ -169,7 +166,6 @@ struct drm_i915_error_state {
u32 purgeable:1;
} *active_bo;
u32 active_bo_count;
- struct intel_overlay_error_state *overlay;
};
struct drm_i915_display_funcs {
@@ -190,6 +186,8 @@ struct drm_i915_display_funcs {
/* clock gating init */
};
+struct intel_overlay;
+
struct intel_device_info {
u8 is_mobile : 1;
u8 is_i8xx : 1;
@@ -244,7 +242,6 @@ typedef struct drm_i915_private {
struct pci_dev *bridge_dev;
struct intel_ring_buffer render_ring;
struct intel_ring_buffer bsd_ring;
- uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
void *seqno_page;
@@ -254,7 +251,6 @@ typedef struct drm_i915_private {
drm_local_map_t hws_map;
struct drm_gem_object *seqno_obj;
struct drm_gem_object *pwrctx;
- struct drm_gem_object *renderctx;
struct resource mch_res;
@@ -289,9 +285,6 @@ typedef struct drm_i915_private {
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
int num_pipe;
- u32 flush_rings;
-#define FLUSH_RENDER_RING 0x1
-#define FLUSH_BSD_RING 0x2
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
@@ -575,6 +568,8 @@ typedef struct drm_i915_private {
*/
struct delayed_work retire_work;
+ uint32_t next_gem_seqno;
+
/**
* Waiting sequence number, if any
*/
@@ -615,8 +610,6 @@ typedef struct drm_i915_private {
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
- /* Panel fitter placement and size for Ironlake+ */
- u32 pch_pf_pos, pch_pf_size;
struct drm_crtc *plane_to_crtc_mapping[2];
struct drm_crtc *pipe_to_crtc_mapping[2];
@@ -676,8 +669,6 @@ struct drm_i915_gem_object {
struct list_head list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
- /** This object's place on eviction list */
- struct list_head evict_list;
/**
* This is set if the object is on the active or flushing lists
@@ -987,7 +978,6 @@ int i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end);
-int i915_gpu_idle(struct drm_device *dev);
int i915_gem_idle(struct drm_device *dev);
uint32_t i915_add_request(struct drm_device *dev,
struct drm_file *file_priv,
@@ -1001,9 +991,7 @@ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj,
- int id,
- int align);
+ struct drm_gem_object *obj, int id);
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
@@ -1015,11 +1003,6 @@ int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
-/* i915_gem_evict.c */
-int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
-int i915_gem_evict_everything(struct drm_device *dev);
-int i915_gem_evict_inactive(struct drm_device *dev);
-
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
@@ -1083,10 +1066,6 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
-/* overlay */
-extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
-extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
-
/**
* Lock test for when it's just for synchronization of ring access.
*
@@ -1113,26 +1092,26 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
#define I915_VERBOSE 0
#define BEGIN_LP_RING(n) do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
- intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \
+ intel_ring_begin(dev, &dev_priv->render_ring, (n)); \
} while (0)
#define OUT_RING(x) do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
- intel_ring_emit(dev, &dev_priv__->render_ring, x); \
+ intel_ring_emit(dev, &dev_priv->render_ring, x); \
} while (0)
#define ADVANCE_LP_RING() do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
+ drm_i915_private_t *dev_priv = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG("ADVANCE_LP_RING %x\n", \
- dev_priv__->render_ring.tail); \
- intel_ring_advance(dev, &dev_priv__->render_ring); \
+ dev_priv->render_ring.tail); \
+ intel_ring_advance(dev, &dev_priv->render_ring); \
} while(0)
/**
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index df5a713..0758c78 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,7 +35,6 @@
#include
#include
-static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -49,6 +48,8 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
+static int i915_gem_evict_something(struct drm_device *dev, int min_size);
+static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv);
@@ -57,14 +58,6 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj);
static LIST_HEAD(shrink_list);
static DEFINE_SPINLOCK(shrink_list_lock);
-static inline bool
-i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
-{
- return obj_priv->gtt_space &&
- !obj_priv->active &&
- obj_priv->pin_count == 0;
-}
-
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end)
{
@@ -320,8 +313,7 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
if (ret == -ENOMEM) {
struct drm_device *dev = obj->dev;
- ret = i915_gem_evict_something(dev, obj->size,
- i915_gem_get_gtt_alignment(obj));
+ ret = i915_gem_evict_something(dev, obj->size);
if (ret)
return ret;
@@ -1044,11 +1036,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
-
- /* Maintain LRU order of "inactive" objects */
- if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1150,7 +1137,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
pgoff_t page_offset;
unsigned long pfn;
@@ -1168,6 +1155,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto unlock;
+ list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
ret = i915_gem_object_set_to_gtt_domain(obj, write);
if (ret)
goto unlock;
@@ -1180,9 +1169,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
}
- if (i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
page_offset;
@@ -1377,6 +1363,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_mmap_gtt *args = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret;
@@ -1422,6 +1409,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
return ret;
}
+ list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
}
drm_gem_object_unreference(obj);
@@ -1505,16 +1493,9 @@ i915_gem_object_truncate(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct inode *inode;
- /* Our goal here is to return as much of the memory as
- * is possible back to the system as we are called from OOM.
- * To do this we must instruct the shmfs to drop all of its
- * backing pages, *now*. Here we mirror the actions taken
- * when by shmem_delete_inode() to release the backing store.
- */
inode = obj->filp->f_path.dentry->d_inode;
- truncate_inode_pages(inode->i_mapping, 0);
- if (inode->i_op->truncate_range)
- inode->i_op->truncate_range(inode, 0, (loff_t)-1);
+ if (inode->i_op->truncate)
+ inode->i_op->truncate (inode);
obj_priv->madv = __I915_MADV_PURGED;
}
@@ -1906,6 +1887,19 @@ i915_gem_flush(struct drm_device *dev,
flush_domains);
}
+static void
+i915_gem_flush_ring(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains,
+ struct intel_ring_buffer *ring)
+{
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ drm_agp_chipset_flush(dev);
+ ring->flush(dev, ring,
+ invalidate_domains,
+ flush_domains);
+}
+
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
@@ -1979,6 +1973,8 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
* cause memory corruption through use-after-free.
*/
+ BUG_ON(obj_priv->active);
+
/* release the fence reg _after_ flushing */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
i915_gem_clear_fence_reg(obj);
@@ -2014,7 +2010,34 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret;
}
-int
+static struct drm_gem_object *
+i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *best = NULL;
+ struct drm_gem_object *first = NULL;
+
+ /* Try to find the smallest clean object */
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ struct drm_gem_object *obj = &obj_priv->base;
+ if (obj->size >= min_size) {
+ if ((!obj_priv->dirty ||
+ i915_gem_object_is_purgeable(obj_priv)) &&
+ (!best || obj->size < best->size)) {
+ best = obj;
+ if (best->size == min_size)
+ return best;
+ }
+ if (!first)
+ first = obj;
+ }
+ }
+
+ return best ? best : first;
+}
+
+static int
i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2055,6 +2078,155 @@ i915_gpu_idle(struct drm_device *dev)
return ret;
}
+static int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ bool lists_empty;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev)
+ || list_empty(&dev_priv->bsd_ring.active_list)));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ if (lists_empty)
+ return -ENOSPC;
+
+ /* Flush everything (on to the inactive lists) and evict */
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
+ ret = i915_gem_evict_from_inactive_list(dev);
+ if (ret)
+ return ret;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->render_ring.active_list) &&
+ (!HAS_BSD(dev)
+ || list_empty(&dev_priv->bsd_ring.active_list)));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+ BUG_ON(!lists_empty);
+
+ return 0;
+}
+
+static int
+i915_gem_evict_something(struct drm_device *dev, int min_size)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ int ret;
+
+ struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
+ for (;;) {
+ i915_gem_retire_requests(dev);
+
+ /* If there's an inactive buffer available now, grab it
+ * and be done.
+ */
+ obj = i915_gem_find_inactive_object(dev, min_size);
+ if (obj) {
+ struct drm_i915_gem_object *obj_priv;
+
+#if WATCH_LRU
+ DRM_INFO("%s: evicting %p\n", __func__, obj);
+#endif
+ obj_priv = to_intel_bo(obj);
+ BUG_ON(obj_priv->pin_count != 0);
+ BUG_ON(obj_priv->active);
+
+ /* Wait on the rendering and unbind the buffer. */
+ return i915_gem_object_unbind(obj);
+ }
+
+ /* If we didn't get anything, but the ring is still processing
+ * things, wait for the next to finish and hopefully leave us
+ * a buffer to evict.
+ */
+ if (!list_empty(&render_ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&render_ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ ret = i915_wait_request(dev,
+ request->seqno, request->ring);
+ if (ret)
+ return ret;
+
+ continue;
+ }
+
+ if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&bsd_ring->request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ ret = i915_wait_request(dev,
+ request->seqno, request->ring);
+ if (ret)
+ return ret;
+
+ continue;
+ }
+
+ /* If we didn't have anything on the request list but there
+ * are buffers awaiting a flush, emit one and try again.
+ * When we wait on it, those buffers waiting for that flush
+ * will get moved to inactive.
+ */
+ if (!list_empty(&dev_priv->mm.flushing_list)) {
+ struct drm_i915_gem_object *obj_priv;
+
+ /* Find an object that we can immediately reuse */
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+ obj = &obj_priv->base;
+ if (obj->size >= min_size)
+ break;
+
+ obj = NULL;
+ }
+
+ if (obj != NULL) {
+ uint32_t seqno;
+
+ i915_gem_flush_ring(dev,
+ obj->write_domain,
+ obj->write_domain,
+ obj_priv->ring);
+ seqno = i915_add_request(dev, NULL,
+ obj->write_domain,
+ obj_priv->ring);
+ if (seqno == 0)
+ return -ENOMEM;
+ continue;
+ }
+ }
+
+ /* If we didn't do any of the above, there's no single buffer
+ * large enough to swap out for the new one, so just evict
+ * everything and start again. (This should be rare.)
+ */
+ if (!list_empty (&dev_priv->mm.inactive_list))
+ return i915_gem_evict_from_inactive_list(dev);
+ else
+ return i915_gem_evict_everything(dev);
+ }
+}
+
int
i915_gem_object_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
@@ -2494,7 +2666,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
#if WATCH_LRU
DRM_INFO("%s: GTT full, evicting something\n", __func__);
#endif
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_evict_something(dev, obj->size);
if (ret)
return ret;
@@ -2512,8 +2684,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */
- ret = i915_gem_evict_something(dev, obj->size,
- alignment);
+ ret = i915_gem_evict_something(dev, obj->size);
if (ret) {
/* now try to shrink everyone else */
if (gfpmask) {
@@ -2543,7 +2714,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_evict_something(dev, obj->size);
if (ret)
return ret;
@@ -2552,9 +2723,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
atomic_inc(&dev->gtt_count);
atomic_add(obj->size, &dev->gtt_memory);
- /* keep track of bounds object by adding it to the inactive list */
- list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
@@ -2949,7 +3117,6 @@ static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
@@ -3012,13 +3179,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
obj->pending_write_domain = obj->write_domain;
obj->read_domains = obj->pending_read_domains;
- if (flush_domains & I915_GEM_GPU_DOMAINS) {
- if (obj_priv->ring == &dev_priv->render_ring)
- dev_priv->flush_rings |= FLUSH_RENDER_RING;
- else if (obj_priv->ring == &dev_priv->bsd_ring)
- dev_priv->flush_rings |= FLUSH_BSD_RING;
- }
-
dev->invalidate_domains |= invalidate_domains;
dev->flush_domains |= flush_domains;
#if WATCH_BUF
@@ -3558,6 +3718,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ring = &dev_priv->render_ring;
}
+
if (args->buffer_count < 1) {
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
@@ -3731,7 +3892,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
dev->invalidate_domains = 0;
dev->flush_domains = 0;
- dev_priv->flush_rings = 0;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
@@ -3752,14 +3912,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
- if (dev_priv->flush_rings & FLUSH_RENDER_RING)
- (void)i915_add_request(dev, file_priv,
- dev->flush_domains,
- &dev_priv->render_ring);
- if (dev_priv->flush_rings & FLUSH_BSD_RING)
+ if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
(void)i915_add_request(dev, file_priv,
- dev->flush_domains,
- &dev_priv->bsd_ring);
+ dev->flush_domains,
+ &dev_priv->render_ring);
+
+ if (HAS_BSD(dev))
+ (void)i915_add_request(dev, file_priv,
+ dev->flush_domains,
+ &dev_priv->bsd_ring);
+ }
}
for (i = 0; i < args->buffer_count; i++) {
@@ -4030,10 +4192,6 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
if (alignment == 0)
alignment = i915_gem_get_gtt_alignment(obj);
if (obj_priv->gtt_offset & (alignment - 1)) {
- WARN(obj_priv->pin_count,
- "bo is already pinned with incorrect alignment:"
- " offset=%x, req.alignment=%x\n",
- obj_priv->gtt_offset, alignment);
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
@@ -4055,7 +4213,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
atomic_inc(&dev->pin_count);
atomic_add(obj->size, &dev->pin_memory);
if (!obj_priv->active &&
- (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
+ !list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4200,34 +4359,22 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
-
- /* Count all active objects as busy, even if they are currently not used
- * by the gpu. Users of this interface expect objects to eventually
- * become non-busy without any further actions, therefore emit any
- * necessary flushes here.
+ /* Update the active list for the hardware's current position.
+ * Otherwise this only updates on a delayed timer or when irqs are
+ * actually unmasked, and our working set ends up being larger than
+ * required.
*/
- obj_priv = to_intel_bo(obj);
- args->busy = obj_priv->active;
- if (args->busy) {
- /* Unconditionally flush objects, even when the gpu still uses this
- * object. Userspace calling this function indicates that it wants to
- * use this buffer rather sooner than later, so issuing the required
- * flush earlier is beneficial.
- */
- if (obj->write_domain) {
- i915_gem_flush(dev, 0, obj->write_domain);
- (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
- }
-
- /* Update the active list for the hardware's current position.
- * Otherwise this only updates on a delayed timer or when irqs
- * are actually unmasked, and our working set ends up being
- * larger than required.
- */
- i915_gem_retire_requests_ring(dev, obj_priv->ring);
+ i915_gem_retire_requests(dev);
- args->busy = obj_priv->active;
- }
+ obj_priv = to_intel_bo(obj);
+ /* Don't count being on the flushing list against the object being
+ * done. Otherwise, a buffer left on the flushing list but not getting
+ * flushed (because nobody's flushing that domain) won't ever return
+ * unbusy and get reused by libdrm's bo cache. The other expected
+ * consumer of this interface, OpenGL's occlusion queries, also specs
+ * that the objects get unbusy "eventually" without any interference.
+ */
+ args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -4367,6 +4514,30 @@ void i915_gem_free_object(struct drm_gem_object *obj)
i915_gem_free_object_tail(obj);
}
+/** Unbinds all inactive objects. */
+static int
+i915_gem_evict_from_inactive_list(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ while (!list_empty(&dev_priv->mm.inactive_list)) {
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = &list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list)->base;
+
+ ret = i915_gem_object_unbind(obj);
+ if (ret != 0) {
+ DRM_ERROR("Error unbinding object: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
int
i915_gem_idle(struct drm_device *dev)
{
@@ -4391,7 +4562,7 @@ i915_gem_idle(struct drm_device *dev)
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_gem_evict_inactive(dev);
+ ret = i915_gem_evict_from_inactive_list(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4509,8 +4680,6 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
goto cleanup_render_ring;
}
- dev_priv->next_seqno = 1;
-
return 0;
cleanup_render_ring:
@@ -4672,7 +4841,7 @@ i915_gem_load(struct drm_device *dev)
* e.g. for cursor + overlay regs
*/
int i915_gem_init_phys_object(struct drm_device *dev,
- int id, int size, int align)
+ int id, int size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_phys_object *phys_obj;
@@ -4687,7 +4856,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
phys_obj->id = id;
- phys_obj->handle = drm_pci_alloc(dev, size, align);
+ phys_obj->handle = drm_pci_alloc(dev, size, 0);
if (!phys_obj->handle) {
ret = -ENOMEM;
goto kfree_obj;
@@ -4769,9 +4938,7 @@ out:
int
i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj,
- int id,
- int align)
+ struct drm_gem_object *obj, int id)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
@@ -4790,10 +4957,11 @@ i915_gem_attach_phys_object(struct drm_device *dev,
i915_gem_detach_phys_object(dev, obj);
}
+
/* create a new object */
if (!dev_priv->mm.phys_objs[id - 1]) {
ret = i915_gem_init_phys_object(dev, id,
- obj->size, align);
+ obj->size);
if (ret) {
DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
goto out;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
deleted file mode 100644
index 72cae3c..0000000
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Copyright © 2008-2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt
- * Chris Wilson
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drv.h"
-#include "i915_drm.h"
-
-static struct drm_i915_gem_object *
-i915_gem_next_active_object(struct drm_device *dev,
- struct list_head **render_iter,
- struct list_head **bsd_iter)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
-
- if (*render_iter != &dev_priv->render_ring.active_list)
- render_obj = list_entry(*render_iter,
- struct drm_i915_gem_object,
- list);
-
- if (HAS_BSD(dev)) {
- if (*bsd_iter != &dev_priv->bsd_ring.active_list)
- bsd_obj = list_entry(*bsd_iter,
- struct drm_i915_gem_object,
- list);
-
- if (render_obj == NULL) {
- *bsd_iter = (*bsd_iter)->next;
- return bsd_obj;
- }
-
- if (bsd_obj == NULL) {
- *render_iter = (*render_iter)->next;
- return render_obj;
- }
-
- /* XXX can we handle seqno wrapping? */
- if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
- *render_iter = (*render_iter)->next;
- return render_obj;
- } else {
- *bsd_iter = (*bsd_iter)->next;
- return bsd_obj;
- }
- } else {
- *render_iter = (*render_iter)->next;
- return render_obj;
- }
-}
-
-static bool
-mark_free(struct drm_i915_gem_object *obj_priv,
- struct list_head *unwind)
-{
- list_add(&obj_priv->evict_list, unwind);
- return drm_mm_scan_add_block(obj_priv->gtt_space);
-}
-
-#define i915_for_each_active_object(OBJ, R, B) \
- *(R) = dev_priv->render_ring.active_list.next; \
- *(B) = dev_priv->bsd_ring.active_list.next; \
- while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
-
-int
-i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct list_head eviction_list, unwind_list;
- struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
- struct list_head *render_iter, *bsd_iter;
- int ret = 0;
-
- i915_gem_retire_requests(dev);
-
- /* Re-check for free space after retiring requests */
- if (drm_mm_search_free(&dev_priv->mm.gtt_space,
- min_size, alignment, 0))
- return 0;
-
- /*
- * The goal is to evict objects and amalgamate space in LRU order.
- * The oldest idle objects reside on the inactive list, which is in
- * retirement order. The next objects to retire are those on the (per
- * ring) active list that do not have an outstanding flush. Once the
- * hardware reports completion (the seqno is updated after the
- * batchbuffer has been finished) the clean buffer objects would
- * be retired to the inactive list. Any dirty objects would be added
- * to the tail of the flushing list. So after processing the clean
- * active objects we need to emit a MI_FLUSH to retire the flushing
- * list, hence the retirement order of the flushing list is in
- * advance of the dirty objects on the active lists.
- *
- * The retirement sequence is thus:
- * 1. Inactive objects (already retired)
- * 2. Clean active objects
- * 3. Flushing list
- * 4. Dirty active objects.
- *
- * On each list, the oldest objects lie at the HEAD with the freshest
- * object on the TAIL.
- */
-
- INIT_LIST_HEAD(&unwind_list);
- drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
-
- /* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- if (mark_free(obj_priv, &unwind_list))
- goto found;
- }
-
- /* Now merge in the soon-to-be-expired objects... */
- i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
- /* Does the object require an outstanding flush? */
- if (obj_priv->base.write_domain || obj_priv->pin_count)
- continue;
-
- if (mark_free(obj_priv, &unwind_list))
- goto found;
- }
-
- /* Finally add anything with a pending flush (in order of retirement) */
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
- if (obj_priv->pin_count)
- continue;
-
- if (mark_free(obj_priv, &unwind_list))
- goto found;
- }
- i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
- if (! obj_priv->base.write_domain || obj_priv->pin_count)
- continue;
-
- if (mark_free(obj_priv, &unwind_list))
- goto found;
- }
-
- /* Nothing found, clean up and bail out! */
- list_for_each_entry(obj_priv, &unwind_list, evict_list) {
- ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
- BUG_ON(ret);
- }
-
- /* We expect the caller to unpin, evict all and try again, or give up.
- * So calling i915_gem_evict_everything() is unnecessary.
- */
- return -ENOSPC;
-
-found:
- INIT_LIST_HEAD(&eviction_list);
- list_for_each_entry_safe(obj_priv, tmp_obj_priv,
- &unwind_list, evict_list) {
- if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
- /* drm_mm doesn't allow any other other operations while
- * scanning, therefore store to be evicted objects on a
- * temporary list. */
- list_move(&obj_priv->evict_list, &eviction_list);
- }
- }
-
- /* Unbinding will emit any required flushes */
- list_for_each_entry_safe(obj_priv, tmp_obj_priv,
- &eviction_list, evict_list) {
-#if WATCH_LRU
- DRM_INFO("%s: evicting %p\n", __func__, obj);
-#endif
- ret = i915_gem_object_unbind(&obj_priv->base);
- if (ret)
- return ret;
- }
-
- /* The just created free hole should be on the top of the free stack
- * maintained by drm_mm, so this BUG_ON actually executes in O(1).
- * Furthermore all accessed data has just recently been used, so it
- * should be really fast, too. */
- BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
- alignment, 0));
-
- return 0;
-}
-
-int
-i915_gem_evict_everything(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
- bool lists_empty;
-
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev)
- || list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
-
- if (lists_empty)
- return -ENOSPC;
-
- /* Flush everything (on to the inactive lists) and evict */
- ret = i915_gpu_idle(dev);
- if (ret)
- return ret;
-
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
- ret = i915_gem_evict_inactive(dev);
- if (ret)
- return ret;
-
- spin_lock(&dev_priv->mm.active_list_lock);
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- (!HAS_BSD(dev)
- || list_empty(&dev_priv->bsd_ring.active_list)));
- spin_unlock(&dev_priv->mm.active_list_lock);
- BUG_ON(!lists_empty);
-
- return 0;
-}
-
-/** Unbinds all inactive objects. */
-int
-i915_gem_evict_inactive(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- while (!list_empty(&dev_priv->mm.inactive_list)) {
- struct drm_gem_object *obj;
- int ret;
-
- obj = &list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list)->base;
-
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- DRM_ERROR("Error unbinding object: %d\n", ret);
- return ret;
- }
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 16861b8..85785a8 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -425,11 +425,9 @@ static struct drm_i915_error_object *
i915_error_object_create(struct drm_device *dev,
struct drm_gem_object *src)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_object *dst;
struct drm_i915_gem_object *src_priv;
int page, page_count;
- u32 reloc_offset;
if (src == NULL)
return NULL;
@@ -444,27 +442,18 @@ i915_error_object_create(struct drm_device *dev,
if (dst == NULL)
return NULL;
- reloc_offset = src_priv->gtt_offset;
for (page = 0; page < page_count; page++) {
+ void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
unsigned long flags;
- void __iomem *s;
- void *d;
- d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
if (d == NULL)
goto unwind;
-
local_irq_save(flags);
- s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc_offset,
- KM_IRQ0);
- memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s, KM_IRQ0);
+ s = kmap_atomic(src_priv->pages[page], KM_IRQ0);
+ memcpy(d, s, PAGE_SIZE);
+ kunmap_atomic(s, KM_IRQ0);
local_irq_restore(flags);
-
dst->pages[page] = d;
-
- reloc_offset += PAGE_SIZE;
}
dst->page_count = page_count;
dst->gtt_offset = src_priv->gtt_offset;
@@ -500,7 +489,6 @@ i915_error_state_free(struct drm_device *dev,
i915_error_object_free(error->batchbuffer[1]);
i915_error_object_free(error->ringbuffer);
kfree(error->active_bo);
- kfree(error->overlay);
kfree(error);
}
@@ -624,57 +612,18 @@ static void i915_capture_error_state(struct drm_device *dev)
if (batchbuffer[1] == NULL &&
error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
+ error->acthd < obj_priv->gtt_offset + obj->size &&
+ batchbuffer[0] != obj)
batchbuffer[1] = obj;
count++;
}
- /* Scan the other lists for completeness for those bizarre errors. */
- if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
- if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
- batchbuffer[0] = obj;
-
- if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
- batchbuffer[1] = obj;
-
- if (batchbuffer[0] && batchbuffer[1])
- break;
- }
- }
- if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
- if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
- batchbuffer[0] = obj;
-
- if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
- batchbuffer[1] = obj;
-
- if (batchbuffer[0] && batchbuffer[1])
- break;
- }
- }
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userpace.
*/
error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
- if (batchbuffer[1] != batchbuffer[0])
- error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
- else
- error->batchbuffer[1] = NULL;
+ error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
/* Record the ringbuffer */
error->ringbuffer = i915_error_object_create(dev,
@@ -718,8 +667,6 @@ static void i915_capture_error_state(struct drm_device *dev)
do_gettimeofday(&error->time);
- error->overlay = intel_overlay_capture_error_state(dev);
-
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (dev_priv->first_error == NULL) {
dev_priv->first_error = error;
@@ -1304,16 +1251,6 @@ void i915_hangcheck_elapsed(unsigned long data)
&dev_priv->render_ring),
i915_get_tail_request(dev)->seqno)) {
dev_priv->hangcheck_count = 0;
-
- /* Issue a wake-up to catch stuck h/w. */
- if (dev_priv->render_ring.waiting_gem_seqno |
- dev_priv->bsd_ring.waiting_gem_seqno) {
- DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
- if (dev_priv->render_ring.waiting_gem_seqno)
- DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
- if (dev_priv->bsd_ring.waiting_gem_seqno)
- DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
- }
return;
}
@@ -1381,17 +1318,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
(void) I915_READ(DEIER);
- /* Gen6 only needs render pipe_control now */
- if (IS_GEN6(dev))
- render_mask = GT_PIPE_NOTIFY;
-
+ /* user interrupt should be enabled, but masked initial */
dev_priv->gt_irq_mask_reg = ~render_mask;
dev_priv->gt_irq_enable_reg = render_mask;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- if (IS_GEN6(dev))
- I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
(void) I915_READ(GTIER);
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index ea5d3fe..d1bf92b 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -114,6 +114,10 @@ struct opregion_asle {
#define ASLE_REQ_MSK 0xf
/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAIL (2<<10)
+#define ASLE_BACKLIGHT_FAIL (2<<12)
+#define ASLE_PFIT_FAIL (2<<14)
+#define ASLE_PWM_FREQ_FAIL (2<<16)
#define ASLE_ALS_ILLUM_FAILED (1<<10)
#define ASLE_BACKLIGHT_FAILED (1<<12)
#define ASLE_PFIT_FAILED (1<<14)
@@ -151,11 +155,11 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
u32 max_backlight, level, shift;
if (!(bclp & ASLE_BCLP_VALID))
- return ASLE_BACKLIGHT_FAILED;
+ return ASLE_BACKLIGHT_FAIL;
bclp &= ASLE_BCLP_MSK;
if (bclp < 0 || bclp > 255)
- return ASLE_BACKLIGHT_FAILED;
+ return ASLE_BACKLIGHT_FAIL;
blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
@@ -207,7 +211,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
if (!(pfit & ASLE_PFIT_VALID))
- return ASLE_PFIT_FAILED;
+ return ASLE_PFIT_FAIL;
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 67e3ec1..281db6e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -170,7 +170,6 @@
#define MI_NO_WRITE_FLUSH (1 << 2)
#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
-#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
@@ -181,12 +180,6 @@
#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
-#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
-#define MI_MM_SPACE_GTT (1<<8)
-#define MI_MM_SPACE_PHYSICAL (0<<8)
-#define MI_SAVE_EXT_STATE_EN (1<<3)
-#define MI_RESTORE_EXT_STATE_EN (1<<2)
-#define MI_RESTORE_INHIBIT (1<<0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -1107,11 +1100,6 @@
#define PEG_BAND_GAP_DATA 0x14d68
/*
- * Logical Context regs
- */
-#define CCID 0x2180
-#define CCID_EN (1<<0)
-/*
* Overlay regs
*/
@@ -2081,7 +2069,6 @@
#define PIPE_DITHER_TYPE_ST01 (1 << 2)
/* Pipe A */
#define PIPEADSL 0x70000
-#define DSL_LINEMASK 0x00000fff
#define PIPEACONF 0x70008
#define PIPEACONF_ENABLE (1<<31)
#define PIPEACONF_DISABLE 0
@@ -2941,7 +2928,6 @@
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
#define TRANS_DP_HSYNC_ACTIVE_LOW 0
-#define TRANS_DP_SYNC_MASK (3<<3)
/* SNB eDP training params */
/* SNB A-stepping */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2c6b98f..6e20252 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -34,7 +34,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
} else {
dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
}
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
+ if (!IS_IRONLAKE(dev))
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
}
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
+ if (!IS_IRONLAKE(dev))
dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
fpa0_reg = PCH_FPA0;
@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
fpb1_reg = FPB1;
}
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
}
@@ -395,20 +395,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_a_reg);
- udelay(150);
+ DRM_UDELAY(150);
}
I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
/* Actually enable it */
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
- POSTING_READ(dpll_a_reg);
- udelay(150);
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ DRM_UDELAY(150);
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
- POSTING_READ(DPLL_A_MD);
- }
- udelay(150);
+ DRM_UDELAY(150);
/* Restore mode */
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
@@ -417,10 +413,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
- if (!HAS_PCH_SPLIT(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
@@ -464,20 +460,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
- POSTING_READ(dpll_b_reg);
- udelay(150);
+ DRM_UDELAY(150);
}
I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
- POSTING_READ(dpll_b_reg);
- udelay(150);
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+ DRM_UDELAY(150);
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
- POSTING_READ(DPLL_B_MD);
- }
- udelay(150);
+ DRM_UDELAY(150);
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
@@ -486,10 +478,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
- if (!HAS_PCH_SPLIT(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
@@ -554,14 +546,14 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
/* CRT state */
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
} else {
dev_priv->saveADPA = I915_READ(ADPA);
}
/* LVDS state */
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -579,10 +571,10 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveLVDS = I915_READ(LVDS);
}
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
+ if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
@@ -610,7 +602,7 @@ void i915_save_display(struct drm_device *dev)
/* Only save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE_M(dev)) {
dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
} else if (IS_GM45(dev)) {
dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
@@ -626,7 +618,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE(dev))
dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
@@ -668,24 +660,24 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
/* CRT state */
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
else
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
- if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
- if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
+ if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
@@ -716,7 +708,7 @@ void i915_restore_display(struct drm_device *dev)
/* only restore FBC info on the platform that supports FBC*/
if (I915_HAS_FBC(dev)) {
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE_M(dev)) {
ironlake_disable_fbc(dev);
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
@@ -731,15 +723,14 @@ void i915_restore_display(struct drm_device *dev)
}
}
/* VGA state */
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
- POSTING_READ(VGA_PD);
- udelay(150);
+ DRM_UDELAY(150);
i915_restore_vga(dev);
}
@@ -757,7 +748,7 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
/* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveDEIER = I915_READ(DEIER);
dev_priv->saveDEIMR = I915_READ(DEIMR);
dev_priv->saveGTIER = I915_READ(GTIER);
@@ -771,7 +762,7 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveIMR = I915_READ(IMR);
}
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
/* Cache mode state */
@@ -829,7 +820,7 @@ int i915_restore_state(struct drm_device *dev)
i915_restore_display(dev);
/* Interrupt state */
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(DEIER, dev_priv->saveDEIER);
I915_WRITE(DEIMR, dev_priv->saveDEIMR);
I915_WRITE(GTIER, dev_priv->saveGTIER);
@@ -844,7 +835,7 @@ int i915_restore_state(struct drm_device *dev)
/* Clock gating state */
intel_init_clock_gating(dev);
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE_M(dev))
ironlake_enable_drps(dev);
/* Cache mode state */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4b77351..ee0732b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -160,20 +160,19 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa, temp;
bool ret;
- bool turn_off_dac = false;
temp = adpa = I915_READ(PCH_ADPA);
- if (HAS_PCH_SPLIT(dev))
- turn_off_dac = true;
-
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- if (turn_off_dac)
- adpa &= ~ADPA_DAC_ENABLE;
-
- /* disable HPD first */
- I915_WRITE(PCH_ADPA, adpa);
- (void)I915_READ(PCH_ADPA);
+ if (HAS_PCH_CPT(dev)) {
+ /* Disable DAC before force detect */
+ I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE);
+ (void)I915_READ(PCH_ADPA);
+ } else {
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ /* disable HPD first */
+ I915_WRITE(PCH_ADPA, adpa);
+ (void)I915_READ(PCH_ADPA);
+ }
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -186,11 +185,10 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
I915_WRITE(PCH_ADPA, adpa);
- if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
- 1000, 1))
- DRM_ERROR("timed out waiting for FORCE_TRIGGER");
+ while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
+ ;
- if (turn_off_dac) {
+ if (HAS_PCH_CPT(dev)) {
I915_WRITE(PCH_ADPA, temp);
(void)I915_READ(PCH_ADPA);
}
@@ -239,13 +237,17 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
for (i = 0; i < tries ; i++) {
+ unsigned long timeout;
/* turn on the FORCE_DETECT */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ timeout = jiffies + msecs_to_jiffies(1000);
/* wait for FORCE_DETECT to go off */
- if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
- CRT_HOTPLUG_FORCE_DETECT) == 0,
- 1000, 1))
- DRM_ERROR("timed out waiting for FORCE_DETECT to go off");
+ do {
+ if (!(I915_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT))
+ break;
+ msleep(1);
+ } while (time_after(timeout, jiffies));
}
stat = I915_READ(PORT_HOTPLUG_STAT);
@@ -329,7 +331,7 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
/* Wait for next Vblank to substitue
* border color for Color info */
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev);
st00 = I915_READ8(VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
@@ -506,8 +508,17 @@ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs
.best_encoder = intel_attached_encoder,
};
+static void intel_crt_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_crt_enc_destroy,
};
void intel_crt_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 23157e1..5ec10e0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -29,7 +29,6 @@
#include
#include
#include
-#include
#include "drmP.h"
#include "intel_drv.h"
#include "i915_drm.h"
@@ -977,54 +976,14 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
-/**
- * intel_wait_for_vblank - wait for vblank on a given pipe
- * @dev: drm device
- * @pipe: pipe to wait for
- *
- * Wait for vblank to occur on a given pipe. Needed for various bits of
- * mode setting code.
- */
-void intel_wait_for_vblank(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
-
- /* Wait for vblank interrupt bit to set */
- if (wait_for((I915_READ(pipestat_reg) &
- PIPE_VBLANK_INTERRUPT_STATUS) == 0,
- 50, 0))
- DRM_DEBUG_KMS("vblank wait timed out\n");
-}
-
-/**
- * intel_wait_for_vblank_off - wait for vblank after disabling a pipe
- * @dev: drm device
- * @pipe: pipe to wait for
- *
- * After disabling a pipe, we can't wait for vblank in the usual way,
- * spinning on the vblank interrupt status bit, since we won't actually
- * see an interrupt when the pipe is disabled.
- *
- * So this function waits for the display line value to settle (it
- * usually ends up stopping at the start of the next frame).
- */
-void intel_wait_for_vblank_off(struct drm_device *dev, int pipe)
+void
+intel_wait_for_vblank(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
- unsigned long timeout = jiffies + msecs_to_jiffies(100);
- u32 last_line;
-
- /* Wait for the display line to settle */
- do {
- last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
- mdelay(5);
- } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
- time_after(timeout, jiffies));
-
- if (time_after(jiffies, timeout))
- DRM_DEBUG_KMS("vblank wait timed out\n");
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ if (in_dbg_master())
+ mdelay(20); /* The kernel debugger cannot call msleep() */
+ else
+ msleep(20);
}
/* Parameters have changed, update FBC info */
@@ -1078,6 +1037,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1);
u32 fbc_ctl;
if (!I915_HAS_FBC(dev))
@@ -1092,11 +1052,16 @@ void i8xx_disable_fbc(struct drm_device *dev)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
- return;
+ while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) {
+ if (time_after(jiffies, timeout)) {
+ DRM_DEBUG_DRIVER("FBC idle timed out\n");
+ break;
+ }
+ ; /* do nothing */
}
+ intel_wait_for_vblank(dev);
+
DRM_DEBUG_KMS("disabled FBC\n");
}
@@ -1153,6 +1118,7 @@ void g4x_disable_fbc(struct drm_device *dev)
dpfc_ctl = I915_READ(DPFC_CONTROL);
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ intel_wait_for_vblank(dev);
DRM_DEBUG_KMS("disabled FBC\n");
}
@@ -1213,6 +1179,7 @@ void ironlake_disable_fbc(struct drm_device *dev)
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+ intel_wait_for_vblank(dev);
DRM_DEBUG_KMS("disabled FBC\n");
}
@@ -1511,7 +1478,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if ((IS_I965G(dev) || plane == 0))
intel_update_fbc(crtc, &crtc->mode);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev);
intel_increase_pllclock(crtc, true);
return 0;
@@ -1618,18 +1585,20 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
Start, Offset, x, y, crtc->fb->pitch);
I915_WRITE(dspstride, crtc->fb->pitch);
if (IS_I965G(dev)) {
+ I915_WRITE(dspbase, Offset);
+ I915_READ(dspbase);
I915_WRITE(dspsurf, Start);
+ I915_READ(dspsurf);
I915_WRITE(dsptileoff, (y << 16) | x);
- I915_WRITE(dspbase, Offset);
} else {
I915_WRITE(dspbase, Start + Offset);
+ I915_READ(dspbase);
}
- POSTING_READ(dspbase);
if ((IS_I965G(dev) || plane == 0))
intel_update_fbc(crtc, &crtc->mode);
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev);
if (old_fb) {
intel_fb = to_intel_framebuffer(old_fb);
@@ -1658,6 +1627,54 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
+/* Disable the VGA plane that we never use */
+static void i915_disable_vga (struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 sr1;
+ u32 vga_reg;
+
+ if (HAS_PCH_SPLIT(dev))
+ vga_reg = CPU_VGACNTRL;
+ else
+ vga_reg = VGACNTRL;
+
+ if (I915_READ(vga_reg) & VGA_DISP_DISABLE)
+ return;
+
+ I915_WRITE8(VGA_SR_INDEX, 1);
+ sr1 = I915_READ8(VGA_SR_DATA);
+ I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5));
+ udelay(100);
+
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+}
+
+static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ DRM_DEBUG_KMS("\n");
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl &= ~DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
+}
+
+static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpa_ctl;
+
+ dpa_ctl = I915_READ(DP_A);
+ dpa_ctl |= DP_PLL_ENABLE;
+ I915_WRITE(DP_A, dpa_ctl);
+ udelay(200);
+}
+
+
static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
@@ -1928,6 +1945,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
int trans_dpll_sel = (pipe == 0) ? 0 : 1;
u32 temp;
+ int n;
u32 pipe_bpc;
temp = I915_READ(pipeconf_reg);
@@ -1940,7 +1958,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
+ DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
temp = I915_READ(PCH_LVDS);
@@ -1950,7 +1968,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
- if (!HAS_eDP) {
+ if (HAS_eDP) {
+ /* enable eDP PLL */
+ ironlake_enable_pll_edp(crtc);
+ } else {
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
@@ -1984,13 +2005,15 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Enable panel fitting for LVDS */
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
|| HAS_eDP || intel_pch_has_edp(crtc)) {
- if (dev_priv->pch_pf_size) {
- temp = I915_READ(pf_ctl_reg);
- I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(pf_win_pos, dev_priv->pch_pf_pos);
- I915_WRITE(pf_win_size, dev_priv->pch_pf_size);
- } else
- I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
+ temp = I915_READ(pf_ctl_reg);
+ I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
+
+ /* currently full aspect */
+ I915_WRITE(pf_win_pos, 0);
+
+ I915_WRITE(pf_win_size,
+ (dev_priv->panel_fixed_mode->hdisplay << 16) |
+ (dev_priv->panel_fixed_mode->vdisplay));
}
/* Enable CPU pipe */
@@ -2074,10 +2097,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int reg;
reg = I915_READ(trans_dp_ctl);
- reg &= ~(TRANS_DP_PORT_SEL_MASK |
- TRANS_DP_SYNC_MASK);
- reg |= (TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_ENH_FRAMING);
+ reg &= ~TRANS_DP_PORT_SEL_MASK;
+ reg = TRANS_DP_OUTPUT_ENABLE |
+ TRANS_DP_ENH_FRAMING;
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2115,17 +2137,18 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
I915_READ(transconf_reg);
- if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 10, 0))
- DRM_ERROR("failed to enable transcoder\n");
+ while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
+ ;
+
}
intel_crtc_load_lut(crtc);
intel_update_fbc(crtc, &crtc->mode);
- break;
+ break;
case DRM_MODE_DPMS_OFF:
- DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
+ DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
drm_vblank_off(dev, pipe);
/* Disable display plane */
@@ -2141,14 +2164,26 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
+ i915_disable_vga(dev);
+
/* disable cpu pipe, disable after all planes disabled */
temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
-
+ I915_READ(pipeconf_reg);
+ n = 0;
/* wait for cpu pipe off, pipe state */
- if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1))
- DRM_ERROR("failed to turn off cpu pipe\n");
+ while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) {
+ n++;
+ if (n < 60) {
+ udelay(500);
+ continue;
+ } else {
+ DRM_DEBUG_KMS("pipe %d off delay\n",
+ pipe);
+ break;
+ }
+ }
} else
DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
@@ -2209,10 +2244,20 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
temp = I915_READ(transconf_reg);
if ((temp & TRANS_ENABLE) != 0) {
I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
-
+ I915_READ(transconf_reg);
+ n = 0;
/* wait for PCH transcoder off, transcoder state */
- if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1))
- DRM_ERROR("failed to disable transcoder\n");
+ while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) {
+ n++;
+ if (n < 60) {
+ udelay(500);
+ continue;
+ } else {
+ DRM_DEBUG_KMS("transcoder %d off "
+ "delay\n", pipe);
+ break;
+ }
+ }
}
temp = I915_READ(transconf_reg);
@@ -2249,6 +2294,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
I915_READ(pch_dpll_reg);
+ if (HAS_eDP) {
+ ironlake_disable_pll_edp(crtc);
+ }
+
/* Switch from PCDclk to Rawclk */
temp = I915_READ(fdi_rx_reg);
temp &= ~FDI_SEL_PCDCLK;
@@ -2323,6 +2372,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
+ intel_update_watermarks(dev);
+
/* Enable the DPLL */
temp = I915_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) == 0) {
@@ -2362,6 +2413,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
intel_crtc_dpms_overlay(intel_crtc, true);
break;
case DRM_MODE_DPMS_OFF:
+ intel_update_watermarks(dev);
+
/* Give the overlay scaler a chance to disable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, false);
drm_vblank_off(dev, pipe);
@@ -2370,6 +2423,9 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
+ /* Disable the VGA plane that we never use */
+ i915_disable_vga(dev);
+
/* Disable display plane */
temp = I915_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2379,8 +2435,10 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(dspbase_reg);
}
- /* Wait for vblank for the disable to take effect */
- intel_wait_for_vblank_off(dev, pipe);
+ if (!IS_I9XX(dev)) {
+ /* Wait for vblank for the disable to take effect */
+ intel_wait_for_vblank(dev);
+ }
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipeconf_reg == PIPEACONF &&
@@ -2395,7 +2453,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
}
/* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank_off(dev, pipe);
+ intel_wait_for_vblank(dev);
temp = I915_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -2411,6 +2469,9 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
/**
* Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
*/
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
@@ -2421,26 +2482,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
int pipe = intel_crtc->pipe;
bool enabled;
- intel_crtc->dpms_mode = mode;
- intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
-
- /* When switching on the display, ensure that SR is disabled
- * with multiple pipes prior to enabling to new pipe.
- *
- * When switching off the display, make sure the cursor is
- * properly hidden prior to disabling the pipe.
- */
- if (mode == DRM_MODE_DPMS_ON)
- intel_update_watermarks(dev);
- else
- intel_crtc_update_cursor(crtc);
-
dev_priv->display.dpms(crtc, mode);
- if (mode == DRM_MODE_DPMS_ON)
- intel_crtc_update_cursor(crtc);
- else
- intel_update_watermarks(dev);
+ intel_crtc->dpms_mode = mode;
if (!dev->primary->master)
return;
@@ -2492,20 +2536,6 @@ void intel_encoder_commit (struct drm_encoder *encoder)
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
-void intel_encoder_destroy(struct drm_encoder *encoder)
-{
- struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-
- if (intel_encoder->ddc_bus)
- intel_i2c_destroy(intel_encoder->ddc_bus);
-
- if (intel_encoder->i2c_bus)
- intel_i2c_destroy(intel_encoder->i2c_bus);
-
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
-}
-
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -2837,7 +2867,7 @@ struct cxsr_latency {
unsigned long cursor_hpll_disable;
};
-static const struct cxsr_latency cxsr_latency_table[] = {
+static struct cxsr_latency cxsr_latency_table[] = {
{1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
{1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
{1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
@@ -2875,13 +2905,11 @@ static const struct cxsr_latency cxsr_latency_table[] = {
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
-static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
- int is_ddr3,
- int fsb,
- int mem)
+static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
+ int fsb, int mem)
{
- const struct cxsr_latency *latency;
int i;
+ struct cxsr_latency *latency;
if (fsb == 0 || mem == 0)
return NULL;
@@ -2902,9 +2930,13 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
static void pineview_disable_cxsr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg;
/* deactivate cxsr */
- I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+ reg = I915_READ(DSPFW3);
+ reg &= ~(PINEVIEW_SELF_REFRESH_EN);
+ I915_WRITE(DSPFW3, reg);
+ DRM_INFO("Big FIFO is disabled\n");
}
/*
@@ -2992,12 +3024,12 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
+ struct cxsr_latency *latency;
int sr_clock;
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
@@ -3043,8 +3075,9 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
/* activate cxsr */
- I915_WRITE(DSPFW3,
- I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
+ reg = I915_READ(DSPFW3);
+ reg |= PINEVIEW_SELF_REFRESH_EN;
+ I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("Self-refresh is enabled\n");
} else {
pineview_disable_cxsr(dev);
@@ -3321,11 +3354,12 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
int line_count;
int planea_htotal = 0, planeb_htotal = 0;
struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
/* Need htotal for all active display plane */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
+ intel_crtc = to_intel_crtc(crtc);
+ if (crtc->enabled) {
if (intel_crtc->plane == 0)
planea_htotal = crtc->mode.htotal;
else
@@ -3485,6 +3519,7 @@ static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
int sr_hdisplay = 0;
unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
int enabled = 0, pixel_size = 0;
@@ -3495,8 +3530,8 @@ static void intel_update_watermarks(struct drm_device *dev)
/* Get the clock config from both planes */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
+ intel_crtc = to_intel_crtc(crtc);
+ if (crtc->enabled) {
enabled++;
if (intel_crtc->plane == 0) {
DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
@@ -3931,7 +3966,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll_reg = pch_dpll_reg;
}
- if (!is_edp) {
+ if (is_edp) {
+ ironlake_disable_pll_edp(crtc);
+ } else if ((dpll & DPLL_VCO_ENABLE)) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
I915_READ(dpll_reg);
@@ -4130,7 +4167,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(pipeconf_reg, pipeconf);
I915_READ(pipeconf_reg);
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev);
if (IS_IRONLAKE(dev)) {
/* enable address swizzle for tiling buffer */
@@ -4143,6 +4180,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Flush the plane changes */
ret = intel_pipe_set_base(crtc, x, y, old_fb);
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
intel_update_watermarks(dev);
drm_vblank_post_modeset(dev, pipe);
@@ -4176,62 +4216,6 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
}
}
-static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- bool visible = base != 0;
- u32 cntl;
-
- if (intel_crtc->cursor_visible == visible)
- return;
-
- cntl = I915_READ(CURACNTR);
- if (visible) {
- /* On these chipsets we can only modify the base whilst
- * the cursor is disabled.
- */
- I915_WRITE(CURABASE, base);
-
- cntl &= ~(CURSOR_FORMAT_MASK);
- /* XXX width must be 64, stride 256 => 0x00 << 28 */
- cntl |= CURSOR_ENABLE |
- CURSOR_GAMMA_ENABLE |
- CURSOR_FORMAT_ARGB;
- } else
- cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
- I915_WRITE(CURACNTR, cntl);
-
- intel_crtc->cursor_visible = visible;
-}
-
-static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- bool visible = base != 0;
-
- if (intel_crtc->cursor_visible != visible) {
- uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
- if (base) {
- cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
- cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
- cntl |= pipe << 28; /* Connect to correct pipe */
- } else {
- cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
- cntl |= CURSOR_MODE_DISABLE;
- }
- I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
-
- intel_crtc->cursor_visible = visible;
- }
- /* and commit changes on next vblank */
- I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
-}
-
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
static void intel_crtc_update_cursor(struct drm_crtc *crtc)
{
@@ -4241,12 +4225,12 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int x = intel_crtc->cursor_x;
int y = intel_crtc->cursor_y;
- u32 base, pos;
+ uint32_t base, pos;
bool visible;
pos = 0;
- if (intel_crtc->cursor_on && crtc->fb) {
+ if (crtc->fb) {
base = intel_crtc->cursor_addr;
if (x > (int) crtc->fb->width)
base = 0;
@@ -4275,14 +4259,37 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
pos |= y << CURSOR_Y_SHIFT;
visible = base != 0;
- if (!visible && !intel_crtc->cursor_visible)
+ if (!visible && !intel_crtc->cursor_visble)
return;
I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
- if (IS_845G(dev) || IS_I865G(dev))
- i845_update_cursor(crtc, base);
- else
- i9xx_update_cursor(crtc, base);
+ if (intel_crtc->cursor_visble != visible) {
+ uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
+ if (base) {
+ /* Hooray for CUR*CNTR differences */
+ if (IS_MOBILE(dev) || IS_I9XX(dev)) {
+ cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
+ cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ cntl |= pipe << 28; /* Connect to correct pipe */
+ } else {
+ cntl &= ~(CURSOR_FORMAT_MASK);
+ cntl |= CURSOR_ENABLE;
+ cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
+ }
+ } else {
+ if (IS_MOBILE(dev) || IS_I9XX(dev)) {
+ cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+ cntl |= CURSOR_MODE_DISABLE;
+ } else {
+ cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
+ }
+ }
+ I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
+
+ intel_crtc->cursor_visble = visible;
+ }
+ /* and commit changes on next vblank */
+ I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
if (visible)
intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
@@ -4347,10 +4354,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
addr = obj_priv->gtt_offset;
} else {
- int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, bo,
- (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
- align);
+ (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
goto fail_locked;
@@ -4539,7 +4544,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
encoder_funcs->commit(encoder);
}
/* let the connector get through one full cycle before testing */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev);
return crtc;
}
@@ -4744,7 +4749,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
dpll = I915_READ(dpll_reg);
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev);
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
@@ -4788,7 +4793,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
dpll = I915_READ(dpll_reg);
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
@@ -5078,16 +5083,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->pending_flip_obj = obj;
if (intel_crtc->plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
- if (IS_GEN3(dev) || IS_GEN2(dev)) {
- BEGIN_LP_RING(2);
- OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
+ /* Wait for any previous flip to finish */
+ if (IS_GEN3(dev))
+ while (I915_READ(ISR) & flip_mask)
+ ;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = obj_priv->gtt_offset;
@@ -5101,14 +5104,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(offset | obj_priv->tiling_mode);
pipesrc = I915_READ(pipesrc_reg);
OUT_RING(pipesrc & 0x0fff0fff);
- } else if (IS_GEN3(dev)) {
- OUT_RING(MI_DISPLAY_FLIP_I915 |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- OUT_RING(offset);
- OUT_RING(MI_NOOP);
} else {
- OUT_RING(MI_DISPLAY_FLIP |
+ OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
OUT_RING(offset);
@@ -5435,37 +5432,37 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
};
static struct drm_gem_object *
-intel_alloc_context_page(struct drm_device *dev)
+intel_alloc_power_context(struct drm_device *dev)
{
- struct drm_gem_object *ctx;
+ struct drm_gem_object *pwrctx;
int ret;
- ctx = i915_gem_alloc_object(dev, 4096);
- if (!ctx) {
+ pwrctx = i915_gem_alloc_object(dev, 4096);
+ if (!pwrctx) {
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
return NULL;
}
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(ctx, 4096);
+ ret = i915_gem_object_pin(pwrctx, 4096);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
}
- ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
+ ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
if (ret) {
DRM_ERROR("failed to set-domain on power context: %d\n", ret);
goto err_unpin;
}
mutex_unlock(&dev->struct_mutex);
- return ctx;
+ return pwrctx;
err_unpin:
- i915_gem_object_unpin(ctx);
+ i915_gem_object_unpin(pwrctx);
err_unref:
- drm_gem_object_unreference(ctx);
+ drm_gem_object_unreference(pwrctx);
mutex_unlock(&dev->struct_mutex);
return NULL;
}
@@ -5497,6 +5494,7 @@ void ironlake_enable_drps(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
+ int i = 0;
/* 100ms RC evaluation intervals */
I915_WRITE(RCUPEI, 100000);
@@ -5540,8 +5538,13 @@ void ironlake_enable_drps(struct drm_device *dev)
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
- if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0))
- DRM_ERROR("stuck trying to change perf mode\n");
+ while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
+ if (i++ > 100) {
+ DRM_ERROR("stuck trying to change perf mode\n");
+ break;
+ }
+ msleep(1);
+ }
msleep(1);
ironlake_set_drps(dev, fstart);
@@ -5722,8 +5725,7 @@ void intel_init_clock_gating(struct drm_device *dev)
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
}
- if (IS_GEN6(dev))
- return;
+ return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5766,31 +5768,6 @@ void intel_init_clock_gating(struct drm_device *dev)
* GPU can automatically power down the render unit if given a page
* to save state.
*/
- if (IS_IRONLAKE_M(dev)) {
- if (dev_priv->renderctx == NULL)
- dev_priv->renderctx = intel_alloc_context_page(dev);
- if (dev_priv->renderctx) {
- struct drm_i915_gem_object *obj_priv;
- obj_priv = to_intel_bo(dev_priv->renderctx);
- if (obj_priv) {
- BEGIN_LP_RING(4);
- OUT_RING(MI_SET_CONTEXT);
- OUT_RING(obj_priv->gtt_offset |
- MI_MM_SPACE_GTT |
- MI_SAVE_EXT_STATE_EN |
- MI_RESTORE_EXT_STATE_EN |
- MI_RESTORE_INHIBIT);
- OUT_RING(MI_NOOP);
- OUT_RING(MI_FLUSH);
- ADVANCE_LP_RING();
- }
- } else {
- DRM_DEBUG_KMS("Failed to allocate render context."
- "Disable RC6\n");
- return;
- }
- }
-
if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_i915_gem_object *obj_priv = NULL;
@@ -5799,7 +5776,7 @@ void intel_init_clock_gating(struct drm_device *dev)
} else {
struct drm_gem_object *pwrctx;
- pwrctx = intel_alloc_context_page(dev);
+ pwrctx = intel_alloc_power_context(dev);
if (pwrctx) {
dev_priv->pwrctx = pwrctx;
obj_priv = to_intel_bo(pwrctx);
@@ -5971,29 +5948,6 @@ static void intel_init_quirks(struct drm_device *dev)
}
}
-/* Disable the VGA plane that we never use */
-static void i915_disable_vga(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u8 sr1;
- u32 vga_reg;
-
- if (HAS_PCH_SPLIT(dev))
- vga_reg = CPU_VGACNTRL;
- else
- vga_reg = VGACNTRL;
-
- vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
- outb(1, VGA_SR_INDEX);
- sr1 = inb(VGA_SR_DATA);
- outb(sr1 | 1<<5, VGA_SR_DATA);
- vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
- udelay(300);
-
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
- POSTING_READ(vga_reg);
-}
-
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6042,9 +5996,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_clock_gating(dev);
- /* Just disable it once at startup */
- i915_disable_vga(dev);
-
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
@@ -6083,16 +6034,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- if (dev_priv->renderctx) {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = to_intel_bo(dev_priv->renderctx);
- I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
- I915_READ(CCID);
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(dev_priv->renderctx);
- }
-
if (dev_priv->pwrctx) {
struct drm_i915_gem_object *obj_priv;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 9caccd0..40be1fa 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -42,11 +42,10 @@
#define DP_LINK_CONFIGURATION_SIZE 9
-#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
-#define IS_PCH_eDP(i) ((i)->is_pch_edp)
+#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP)
+#define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp)
-struct intel_dp {
- struct intel_encoder base;
+struct intel_dp_priv {
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
@@ -55,39 +54,40 @@ struct intel_dp {
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[4];
+ struct intel_encoder *intel_encoder;
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
};
-static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
-{
- return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base);
-}
+static void
+intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
-static void intel_dp_link_train(struct intel_dp *intel_dp);
-static void intel_dp_link_down(struct intel_dp *intel_dp);
+static void
+intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP);
void
intel_edp_link_config (struct intel_encoder *intel_encoder,
- int *lane_num, int *link_bw)
+ int *lane_num, int *link_bw)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- *lane_num = intel_dp->lane_count;
- if (intel_dp->link_bw == DP_LINK_BW_1_62)
+ *lane_num = dp_priv->lane_count;
+ if (dp_priv->link_bw == DP_LINK_BW_1_62)
*link_bw = 162000;
- else if (intel_dp->link_bw == DP_LINK_BW_2_7)
+ else if (dp_priv->link_bw == DP_LINK_BW_2_7)
*link_bw = 270000;
}
static int
-intel_dp_max_lane_count(struct intel_dp *intel_dp)
+intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
{
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int max_lane_count = 4;
- if (intel_dp->dpcd[0] >= 0x11) {
- max_lane_count = intel_dp->dpcd[2] & 0x1f;
+ if (dp_priv->dpcd[0] >= 0x11) {
+ max_lane_count = dp_priv->dpcd[2] & 0x1f;
switch (max_lane_count) {
case 1: case 2: case 4:
break;
@@ -99,9 +99,10 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp)
}
static int
-intel_dp_max_link_bw(struct intel_dp *intel_dp)
+intel_dp_max_link_bw(struct intel_encoder *intel_encoder)
{
- int max_link_bw = intel_dp->dpcd[1];
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ int max_link_bw = dp_priv->dpcd[1];
switch (max_link_bw) {
case DP_LINK_BW_1_62:
@@ -125,11 +126,13 @@ intel_dp_link_clock(uint8_t link_bw)
/* I think this is a fiction */
static int
-intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct drm_device *dev,
+ struct intel_encoder *intel_encoder, int pixel_clock)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv))
return (pixel_clock * dev_priv->edp_bpp) / 8;
else
return pixel_clock * 3;
@@ -146,13 +149,14 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
- int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
+ int max_lanes = intel_dp_max_lane_count(intel_encoder);
- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+ if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
dev_priv->panel_fixed_mode) {
if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
return MODE_PANEL;
@@ -163,8 +167,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
/* only refuse the mode on non eDP since we have seen some wierd eDP panels
which are outside spec tolerances but somehow work by magic */
- if (!IS_eDP(intel_dp) &&
- (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
+ if (!IS_eDP(intel_encoder) &&
+ (intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
> intel_dp_max_data_rate(max_link_clock, max_lanes)))
return MODE_CLOCK_HIGH;
@@ -228,12 +232,13 @@ intel_hrawclk(struct drm_device *dev)
}
static int
-intel_dp_aux_ch(struct intel_dp *intel_dp,
+intel_dp_aux_ch(struct intel_encoder *intel_encoder,
uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
- uint32_t output_reg = intel_dp->output_reg;
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ uint32_t output_reg = dp_priv->output_reg;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
@@ -248,7 +253,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
*/
- if (IS_eDP(intel_dp)) {
+ if (IS_eDP(intel_encoder)) {
if (IS_GEN6(dev))
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
else
@@ -339,7 +344,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* Write data to the aux channel in native mode */
static int
-intel_dp_aux_native_write(struct intel_dp *intel_dp,
+intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
uint16_t address, uint8_t *send, int send_bytes)
{
int ret;
@@ -356,7 +361,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
for (;;) {
- ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
+ ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
@@ -371,15 +376,15 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
/* Write a single byte to the aux channel in native mode */
static int
-intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
+intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder,
uint16_t address, uint8_t byte)
{
- return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
+ return intel_dp_aux_native_write(intel_encoder, address, &byte, 1);
}
/* read bytes from a native aux channel */
static int
-intel_dp_aux_native_read(struct intel_dp *intel_dp,
+intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
uint16_t address, uint8_t *recv, int recv_bytes)
{
uint8_t msg[4];
@@ -398,7 +403,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
reply_bytes = recv_bytes + 1;
for (;;) {
- ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
+ ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
return -EPROTO;
@@ -421,9 +426,10 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- struct intel_dp *intel_dp = container_of(adapter,
- struct intel_dp,
- adapter);
+ struct intel_dp_priv *dp_priv = container_of(adapter,
+ struct intel_dp_priv,
+ adapter);
+ struct intel_encoder *intel_encoder = dp_priv->intel_encoder;
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
@@ -462,7 +468,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
for (;;) {
- ret = intel_dp_aux_ch(intel_dp,
+ ret = intel_dp_aux_ch(intel_encoder,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
@@ -490,42 +496,57 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
static int
-intel_dp_i2c_init(struct intel_dp *intel_dp,
+intel_dp_i2c_init(struct intel_encoder *intel_encoder,
struct intel_connector *intel_connector, const char *name)
{
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+
DRM_DEBUG_KMS("i2c_init %s\n", name);
- intel_dp->algo.running = false;
- intel_dp->algo.address = 0;
- intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
-
- memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
- intel_dp->adapter.owner = THIS_MODULE;
- intel_dp->adapter.class = I2C_CLASS_DDC;
- strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
- intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
- intel_dp->adapter.algo_data = &intel_dp->algo;
- intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
-
- return i2c_dp_aux_add_bus(&intel_dp->adapter);
+ dp_priv->algo.running = false;
+ dp_priv->algo.address = 0;
+ dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
+
+ memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter));
+ dp_priv->adapter.owner = THIS_MODULE;
+ dp_priv->adapter.class = I2C_CLASS_DDC;
+ strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
+ dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
+ dp_priv->adapter.algo_data = &dp_priv->algo;
+ dp_priv->adapter.dev.parent = &intel_connector->base.kdev;
+
+ return i2c_dp_aux_add_bus(&dp_priv->adapter);
}
static bool
intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int lane_count, clock;
- int max_lane_count = intel_dp_max_lane_count(intel_dp);
- int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ int max_lane_count = intel_dp_max_lane_count(intel_encoder);
+ int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+ if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
dev_priv->panel_fixed_mode) {
- intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
- intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
- mode, adjusted_mode);
+ struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
+
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+
+ adjusted_mode->clock = fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+
/*
* the mode->clock is used to calculate the Data&Link M/N
* of the pipe. For the eDP the fixed clock should be used.
@@ -537,33 +558,31 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock)
+ if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock)
<= link_avail) {
- intel_dp->link_bw = bws[clock];
- intel_dp->lane_count = lane_count;
- adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ dp_priv->link_bw = bws[clock];
+ dp_priv->lane_count = lane_count;
+ adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
DRM_DEBUG_KMS("Display port link bw %02x lane "
"count %d clock %d\n",
- intel_dp->link_bw, intel_dp->lane_count,
+ dp_priv->link_bw, dp_priv->lane_count,
adjusted_mode->clock);
return true;
}
}
}
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
/* okay we failed just pick the highest */
- intel_dp->lane_count = max_lane_count;
- intel_dp->link_bw = bws[max_clock];
- adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ dp_priv->lane_count = max_lane_count;
+ dp_priv->link_bw = bws[max_clock];
+ adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
"count %d clock %d\n",
- intel_dp->link_bw, intel_dp->lane_count,
+ dp_priv->link_bw, dp_priv->lane_count,
adjusted_mode->clock);
-
return true;
}
-
return false;
}
@@ -607,14 +626,17 @@ bool intel_pch_has_edp(struct drm_crtc *crtc)
struct drm_encoder *encoder;
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_dp *intel_dp;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp_priv *dp_priv;
- if (encoder->crtc != crtc)
+ if (!encoder || encoder->crtc != crtc)
continue;
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
- return intel_dp->is_pch_edp;
+ intel_encoder = enc_to_intel_encoder(encoder);
+ dp_priv = intel_encoder->dev_priv;
+
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT)
+ return dp_priv->is_pch_edp;
}
return false;
}
@@ -635,15 +657,18 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
* Find the lane count in the intel_encoder private
*/
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_dp *intel_dp;
+ struct intel_encoder *intel_encoder;
+ struct intel_dp_priv *dp_priv;
if (encoder->crtc != crtc)
continue;
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
- lane_count = intel_dp->lane_count;
- if (IS_PCH_eDP(intel_dp))
+ intel_encoder = enc_to_intel_encoder(encoder);
+ dp_priv = intel_encoder->dev_priv;
+
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ lane_count = dp_priv->lane_count;
+ if (IS_PCH_eDP(dp_priv))
bpp = dev_priv->edp_bpp;
break;
}
@@ -699,114 +724,107 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_crtc *crtc = intel_dp->base.enc.crtc;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct drm_crtc *crtc = intel_encoder->enc.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_dp->DP = (DP_VOLTAGE_0_4 |
+ dp_priv->DP = (DP_VOLTAGE_0_4 |
DP_PRE_EMPHASIS_0);
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- intel_dp->DP |= DP_SYNC_HS_HIGH;
+ dp_priv->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- intel_dp->DP |= DP_SYNC_VS_HIGH;
+ dp_priv->DP |= DP_SYNC_VS_HIGH;
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
- intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
+ dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT;
else
- intel_dp->DP |= DP_LINK_TRAIN_OFF;
+ dp_priv->DP |= DP_LINK_TRAIN_OFF;
- switch (intel_dp->lane_count) {
+ switch (dp_priv->lane_count) {
case 1:
- intel_dp->DP |= DP_PORT_WIDTH_1;
+ dp_priv->DP |= DP_PORT_WIDTH_1;
break;
case 2:
- intel_dp->DP |= DP_PORT_WIDTH_2;
+ dp_priv->DP |= DP_PORT_WIDTH_2;
break;
case 4:
- intel_dp->DP |= DP_PORT_WIDTH_4;
+ dp_priv->DP |= DP_PORT_WIDTH_4;
break;
}
- if (intel_dp->has_audio)
- intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+ if (dp_priv->has_audio)
+ dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE;
- memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
- intel_dp->link_configuration[0] = intel_dp->link_bw;
- intel_dp->link_configuration[1] = intel_dp->lane_count;
+ memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ dp_priv->link_configuration[0] = dp_priv->link_bw;
+ dp_priv->link_configuration[1] = dp_priv->lane_count;
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
- if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
- intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- intel_dp->DP |= DP_ENHANCED_FRAMING;
+ if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
+ dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ dp_priv->DP |= DP_ENHANCED_FRAMING;
}
/* CPT DP's pipe select is decided in TRANS_DP_CTL */
if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
- intel_dp->DP |= DP_PIPEB_SELECT;
+ dp_priv->DP |= DP_PIPEB_SELECT;
- if (IS_eDP(intel_dp)) {
+ if (IS_eDP(intel_encoder)) {
/* don't miss out required setting for eDP */
- intel_dp->DP |= DP_PLL_ENABLE;
+ dp_priv->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
- intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ dp_priv->DP |= DP_PLL_FREQ_160MHZ;
else
- intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ dp_priv->DP |= DP_PLL_FREQ_270MHZ;
}
}
static void ironlake_edp_panel_on (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp;
+ unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+ u32 pp, pp_status;
- if (I915_READ(PCH_PP_STATUS) & PP_ON)
+ pp_status = I915_READ(PCH_PP_STATUS);
+ if (pp_status & PP_ON)
return;
pp = I915_READ(PCH_PP_CONTROL);
-
- /* ILK workaround: disable reset around power sequence */
- pp &= ~PANEL_POWER_RESET;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
-
pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
+ do {
+ pp_status = I915_READ(PCH_PP_STATUS);
+ } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
- if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10))
- DRM_ERROR("panel on wait timed out: 0x%08x\n",
- I915_READ(PCH_PP_STATUS));
+ if (time_after(jiffies, timeout))
+ DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status);
pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
- pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
}
static void ironlake_edp_panel_off (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp;
+ unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+ u32 pp, pp_status;
pp = I915_READ(PCH_PP_CONTROL);
-
- /* ILK workaround: disable reset around power sequence */
- pp &= ~PANEL_POWER_RESET;
- I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
-
pp &= ~POWER_TARGET_ON;
I915_WRITE(PCH_PP_CONTROL, pp);
+ do {
+ pp_status = I915_READ(PCH_PP_STATUS);
+ } while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
- if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10))
- DRM_ERROR("panel off wait timed out: 0x%08x\n",
- I915_READ(PCH_PP_STATUS));
+ if (time_after(jiffies, timeout))
+ DRM_DEBUG_KMS("panel off wait timed out\n");
/* Make sure VDD is enabled so DP AUX will work */
- pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
+ pp |= EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
- POSTING_READ(PCH_PP_CONTROL);
}
static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -831,87 +849,33 @@ static void ironlake_edp_backlight_off (struct drm_device *dev)
I915_WRITE(PCH_PP_CONTROL, pp);
}
-static void ironlake_edp_pll_on(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpa_ctl;
-
- DRM_DEBUG_KMS("\n");
- dpa_ctl = I915_READ(DP_A);
- dpa_ctl &= ~DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
-}
-
-static void ironlake_edp_pll_off(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 dpa_ctl;
-
- dpa_ctl = I915_READ(DP_A);
- dpa_ctl |= DP_PLL_ENABLE;
- I915_WRITE(DP_A, dpa_ctl);
- udelay(200);
-}
-
-static void intel_dp_prepare(struct drm_encoder *encoder)
-{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
-
- if (IS_eDP(intel_dp)) {
- ironlake_edp_backlight_off(dev);
- ironlake_edp_panel_on(dev);
- ironlake_edp_pll_on(encoder);
- }
- if (dp_reg & DP_PORT_EN)
- intel_dp_link_down(intel_dp);
-}
-
-static void intel_dp_commit(struct drm_encoder *encoder)
-{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
-
- if (!(dp_reg & DP_PORT_EN)) {
- intel_dp_link_train(intel_dp);
- }
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
- ironlake_edp_backlight_on(dev);
-}
-
static void
intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+ uint32_t dp_reg = I915_READ(dp_priv->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
- ironlake_edp_backlight_off(dev);
- ironlake_edp_panel_off(dev);
+ if (dp_reg & DP_PORT_EN) {
+ intel_dp_link_down(intel_encoder, dp_priv->DP);
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
+ ironlake_edp_backlight_off(dev);
+ ironlake_edp_panel_off(dev);
+ }
}
- if (dp_reg & DP_PORT_EN)
- intel_dp_link_down(intel_dp);
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
- ironlake_edp_pll_off(encoder);
} else {
if (!(dp_reg & DP_PORT_EN)) {
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
+ intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
ironlake_edp_panel_on(dev);
- intel_dp_link_train(intel_dp);
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
ironlake_edp_backlight_on(dev);
+ }
}
}
- intel_dp->dpms_mode = mode;
+ dp_priv->dpms_mode = mode;
}
/*
@@ -919,12 +883,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
* link status information
*/
static bool
-intel_dp_get_link_status(struct intel_dp *intel_dp,
+intel_dp_get_link_status(struct intel_encoder *intel_encoder,
uint8_t link_status[DP_LINK_STATUS_SIZE])
{
int ret;
- ret = intel_dp_aux_native_read(intel_dp,
+ ret = intel_dp_aux_native_read(intel_encoder,
DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE);
if (ret != DP_LINK_STATUS_SIZE)
@@ -1001,7 +965,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
}
static void
-intel_get_adjust_train(struct intel_dp *intel_dp,
+intel_get_adjust_train(struct intel_encoder *intel_encoder,
uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane_count,
uint8_t train_set[4])
@@ -1137,27 +1101,27 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
}
static bool
-intel_dp_set_link_train(struct intel_dp *intel_dp,
+intel_dp_set_link_train(struct intel_encoder *intel_encoder,
uint32_t dp_reg_value,
uint8_t dp_train_pat,
uint8_t train_set[4],
bool first)
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
- I915_WRITE(intel_dp->output_reg, dp_reg_value);
- POSTING_READ(intel_dp->output_reg);
+ I915_WRITE(dp_priv->output_reg, dp_reg_value);
+ POSTING_READ(dp_priv->output_reg);
if (first)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev);
- intel_dp_aux_native_write_1(intel_dp,
+ intel_dp_aux_native_write_1(intel_encoder,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
- ret = intel_dp_aux_native_write(intel_dp,
+ ret = intel_dp_aux_native_write(intel_encoder,
DP_TRAINING_LANE0_SET, train_set, 4);
if (ret != 4)
return false;
@@ -1166,10 +1130,12 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
}
static void
-intel_dp_link_train(struct intel_dp *intel_dp)
+intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t train_set[4];
uint8_t link_status[DP_LINK_STATUS_SIZE];
int i;
@@ -1179,15 +1145,13 @@ intel_dp_link_train(struct intel_dp *intel_dp)
bool first = true;
int tries;
u32 reg;
- uint32_t DP = intel_dp->DP;
/* Write the link configuration data */
- intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
- intel_dp->link_configuration,
- DP_LINK_CONFIGURATION_SIZE);
+ intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET,
+ link_configuration, DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
@@ -1198,39 +1162,39 @@ intel_dp_link_train(struct intel_dp *intel_dp)
for (;;) {
/* Use train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
- if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
- if (!intel_dp_set_link_train(intel_dp, reg,
+ if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_1, train_set, first))
break;
first = false;
/* Set training pattern 1 */
udelay(100);
- if (!intel_dp_get_link_status(intel_dp, link_status))
+ if (!intel_dp_get_link_status(intel_encoder, link_status))
break;
- if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
clock_recovery = true;
break;
}
/* Check to see if we've tried the max voltage */
- for (i = 0; i < intel_dp->lane_count; i++)
+ for (i = 0; i < dp_priv->lane_count; i++)
if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
- if (i == intel_dp->lane_count)
+ if (i == dp_priv->lane_count)
break;
/* Check to see if we've tried the same voltage 5 times */
@@ -1243,7 +1207,7 @@ intel_dp_link_train(struct intel_dp *intel_dp)
voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
+ intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
}
/* channel equalization */
@@ -1253,30 +1217,30 @@ intel_dp_link_train(struct intel_dp *intel_dp)
/* Use train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
- if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
+ if (IS_GEN6(dev) && IS_eDP(intel_encoder)) {
signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
+ signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
/* channel eq pattern */
- if (!intel_dp_set_link_train(intel_dp, reg,
+ if (!intel_dp_set_link_train(intel_encoder, reg,
DP_TRAINING_PATTERN_2, train_set,
false))
break;
udelay(400);
- if (!intel_dp_get_link_status(intel_dp, link_status))
+ if (!intel_dp_get_link_status(intel_encoder, link_status))
break;
- if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) {
+ if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
channel_eq = true;
break;
}
@@ -1286,53 +1250,53 @@ intel_dp_link_train(struct intel_dp *intel_dp)
break;
/* Compute new train_set as requested by target */
- intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
+ intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
++tries;
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
- I915_WRITE(intel_dp->output_reg, reg);
- POSTING_READ(intel_dp->output_reg);
- intel_dp_aux_native_write_1(intel_dp,
+ I915_WRITE(dp_priv->output_reg, reg);
+ POSTING_READ(dp_priv->output_reg);
+ intel_dp_aux_native_write_1(intel_encoder,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
}
static void
-intel_dp_link_down(struct intel_dp *intel_dp)
+intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
{
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t DP = intel_dp->DP;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
DRM_DEBUG_KMS("\n");
- if (IS_eDP(intel_dp)) {
+ if (IS_eDP(intel_encoder)) {
DP &= ~DP_PLL_ENABLE;
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ I915_WRITE(dp_priv->output_reg, DP);
+ POSTING_READ(dp_priv->output_reg);
udelay(100);
}
- if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
- I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
- POSTING_READ(intel_dp->output_reg);
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+ POSTING_READ(dp_priv->output_reg);
} else {
DP &= ~DP_LINK_TRAIN_MASK;
- I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
- POSTING_READ(intel_dp->output_reg);
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ POSTING_READ(dp_priv->output_reg);
}
udelay(17000);
- if (IS_eDP(intel_dp))
+ if (IS_eDP(intel_encoder))
DP |= DP_LINK_TRAIN_OFF;
- I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
- POSTING_READ(intel_dp->output_reg);
+ I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(dp_priv->output_reg);
}
/*
@@ -1345,39 +1309,41 @@ intel_dp_link_down(struct intel_dp *intel_dp)
*/
static void
-intel_dp_check_link_status(struct intel_dp *intel_dp)
+intel_dp_check_link_status(struct intel_encoder *intel_encoder)
{
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint8_t link_status[DP_LINK_STATUS_SIZE];
- if (!intel_dp->base.enc.crtc)
+ if (!intel_encoder->enc.crtc)
return;
- if (!intel_dp_get_link_status(intel_dp, link_status)) {
- intel_dp_link_down(intel_dp);
+ if (!intel_dp_get_link_status(intel_encoder, link_status)) {
+ intel_dp_link_down(intel_encoder, dp_priv->DP);
return;
}
- if (!intel_channel_eq_ok(link_status, intel_dp->lane_count))
- intel_dp_link_train(intel_dp);
+ if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
+ intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
}
static enum drm_connector_status
ironlake_dp_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
enum drm_connector_status status;
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_dp,
- 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+ if (intel_dp_aux_native_read(intel_encoder,
+ 0x000, dp_priv->dpcd,
+ sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
{
- if (intel_dp->dpcd[0] != 0)
+ if (dp_priv->dpcd[0] != 0)
status = connector_status_connected;
}
- DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
- intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
+ DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0],
+ dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]);
return status;
}
@@ -1391,18 +1357,19 @@ static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
uint32_t temp, bit;
enum drm_connector_status status;
- intel_dp->has_audio = false;
+ dp_priv->has_audio = false;
if (HAS_PCH_SPLIT(dev))
return ironlake_dp_detect(connector);
- switch (intel_dp->output_reg) {
+ switch (dp_priv->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
break;
@@ -1422,11 +1389,11 @@ intel_dp_detect(struct drm_connector *connector)
return connector_status_disconnected;
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_dp,
- 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+ if (intel_dp_aux_native_read(intel_encoder,
+ 0x000, dp_priv->dpcd,
+ sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
{
- if (intel_dp->dpcd[0] != 0)
+ if (dp_priv->dpcd[0] != 0)
status = connector_status_connected;
}
return status;
@@ -1435,17 +1402,18 @@ intel_dp_detect(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct drm_device *dev = intel_dp->base.enc.dev;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
- ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus);
+ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (ret) {
- if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
+ if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
!dev_priv->panel_fixed_mode) {
struct drm_display_mode *newmode;
list_for_each_entry(newmode, &connector->probed_modes,
@@ -1462,7 +1430,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
/* if eDP has no EDID, try to use fixed panel mode from VBT */
- if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1484,9 +1452,9 @@ intel_dp_destroy (struct drm_connector *connector)
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.dpms = intel_dp_dpms,
.mode_fixup = intel_dp_mode_fixup,
- .prepare = intel_dp_prepare,
+ .prepare = intel_encoder_prepare,
.mode_set = intel_dp_mode_set,
- .commit = intel_dp_commit,
+ .commit = intel_encoder_commit,
};
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -1502,17 +1470,27 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
.best_encoder = intel_attached_encoder,
};
+static void intel_dp_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_dp_enc_destroy,
};
void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON)
- intel_dp_check_link_status(intel_dp);
+ if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
+ intel_dp_check_link_status(intel_encoder);
}
/* Return which DP Port should be selected for Transcoder DP control */
@@ -1522,18 +1500,18 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_dp *intel_dp;
-
if (encoder->crtc != crtc)
continue;
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
- return intel_dp->output_reg;
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ return dp_priv->output_reg;
+ }
}
-
return -1;
}
@@ -1562,28 +1540,30 @@ intel_dp_init(struct drm_device *dev, int output_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
- struct intel_dp *intel_dp;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
+ struct intel_dp_priv *dp_priv;
const char *name = NULL;
int type;
- intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
- if (!intel_dp)
+ intel_encoder = kcalloc(sizeof(struct intel_encoder) +
+ sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
+ if (!intel_encoder)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_dp);
+ kfree(intel_encoder);
return;
}
- intel_encoder = &intel_dp->base;
- if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
+ dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
+
+ if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D))
if (intel_dpd_is_edp(dev))
- intel_dp->is_pch_edp = true;
+ dp_priv->is_pch_edp = true;
- if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
+ if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else {
@@ -1604,16 +1584,18 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
- if (IS_eDP(intel_dp))
+ if (IS_eDP(intel_encoder))
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- intel_dp->output_reg = output_reg;
- intel_dp->has_audio = false;
- intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
+ dp_priv->intel_encoder = intel_encoder;
+ dp_priv->output_reg = output_reg;
+ dp_priv->has_audio = false;
+ dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
+ intel_encoder->dev_priv = dp_priv;
drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
@@ -1648,12 +1630,12 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
- intel_dp_i2c_init(intel_dp, intel_connector, name);
+ intel_dp_i2c_init(intel_encoder, intel_connector, name);
- intel_encoder->ddc_bus = &intel_dp->adapter;
+ intel_encoder->ddc_bus = &dp_priv->adapter;
intel_encoder->hot_plug = intel_dp_hot_plug;
- if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
+ if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
/* initialize panel mode from VBT if available for eDP */
if (dev_priv->lfp_lvds_vbt_mode) {
dev_priv->panel_fixed_mode =
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0e92aa0..b219014 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -32,20 +32,6 @@
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
-
-#define wait_for(COND, MS, W) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
- int ret__ = 0; \
- while (! (COND)) { \
- if (time_after(jiffies, timeout__)) { \
- ret__ = -ETIMEDOUT; \
- break; \
- } \
- if (W) msleep(W); \
- } \
- ret__; \
-})
-
/*
* Display related stuff
*/
@@ -116,6 +102,7 @@ struct intel_encoder {
struct i2c_adapter *ddc_bus;
bool load_detect_temp;
bool needs_tv_clock;
+ void *dev_priv;
void (*hot_plug)(struct intel_encoder *);
int crtc_mask;
int clone_mask;
@@ -123,6 +110,7 @@ struct intel_encoder {
struct intel_connector {
struct drm_connector base;
+ void *dev_priv;
};
struct intel_crtc;
@@ -168,7 +156,7 @@ struct intel_crtc {
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
- bool cursor_visible, cursor_on;
+ bool cursor_visble;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -200,18 +188,10 @@ extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
-extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
- struct drm_display_mode *adjusted_mode);
-extern void intel_pch_panel_fitting(struct drm_device *dev,
- int fitting_mode,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
extern int intel_panel_fitter_pipe (struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
-extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
@@ -219,8 +199,7 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
-extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+extern void intel_wait_for_vblank(struct drm_device *dev);
extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a399f4b..227feca 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -38,7 +38,7 @@
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
-static const struct intel_dvo_device intel_dvo_devices[] = {
+static struct intel_dvo_device intel_dvo_devices[] = {
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "sil164",
@@ -77,33 +77,20 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
}
};
-struct intel_dvo {
- struct intel_encoder base;
-
- struct intel_dvo_device dev;
-
- struct drm_display_mode *panel_fixed_mode;
- bool panel_wants_dither;
-};
-
-static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
-{
- return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
-}
-
static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
- u32 dvo_reg = intel_dvo->dev.dvo_reg;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+ u32 dvo_reg = dvo->dvo_reg;
u32 temp = I915_READ(dvo_reg);
if (mode == DRM_MODE_DPMS_ON) {
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
- intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
+ dvo->dev_ops->dpms(dvo, mode);
} else {
- intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
+ dvo->dev_ops->dpms(dvo, mode);
I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
I915_READ(dvo_reg);
}
@@ -113,36 +100,38 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* XXX: Validate clock range */
- if (intel_dvo->panel_fixed_mode) {
- if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
+ if (dvo->panel_fixed_mode) {
+ if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
+ if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay)
return MODE_PANEL;
}
- return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
+ return dvo->dev_ops->mode_valid(dvo, mode);
}
static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- if (intel_dvo->panel_fixed_mode != NULL) {
-#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
+ if (dvo->panel_fixed_mode != NULL) {
+#define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x
C(hdisplay);
C(hsync_start);
C(hsync_end);
@@ -156,8 +145,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
#undef C
}
- if (intel_dvo->dev.dev_ops->mode_fixup)
- return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
+ if (dvo->dev_ops->mode_fixup)
+ return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode);
return true;
}
@@ -169,10 +158,11 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
int pipe = intel_crtc->pipe;
u32 dvo_val;
- u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
+ u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
switch (dvo_reg) {
@@ -188,7 +178,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
break;
}
- intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
+ dvo->dev_ops->mode_set(dvo, mode, adjusted_mode);
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
@@ -224,38 +214,40 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
- return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
+ return dvo->dev_ops->detect(dvo);
}
static int intel_dvo_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
/* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (!list_empty(&connector->probed_modes))
return 1;
- if (intel_dvo->panel_fixed_mode != NULL) {
+
+ if (dvo->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
+ mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
}
}
-
return 0;
}
-static void intel_dvo_destroy(struct drm_connector *connector)
+static void intel_dvo_destroy (struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -285,20 +277,28 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
-
- if (intel_dvo->dev.dev_ops->destroy)
- intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
-
- kfree(intel_dvo->panel_fixed_mode);
-
- intel_encoder_destroy(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+
+ if (dvo) {
+ if (dvo->dev_ops->destroy)
+ dvo->dev_ops->destroy(dvo);
+ if (dvo->panel_fixed_mode)
+ kfree(dvo->panel_fixed_mode);
+ }
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
.destroy = intel_dvo_enc_destroy,
};
+
/**
* Attempts to get a fixed panel timing for LVDS (currently only the i830).
*
@@ -306,13 +306,15 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
* chip being on DVOB/C and having multiple pipes.
*/
static struct drm_display_mode *
-intel_dvo_get_current_mode(struct drm_connector *connector)
+intel_dvo_get_current_mode (struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
- uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dvo_device *dvo = intel_encoder->dev_priv;
+ uint32_t dvo_reg = dvo->dvo_reg;
+ uint32_t dvo_val = I915_READ(dvo_reg);
struct drm_display_mode *mode = NULL;
/* If the DVO port is active, that'll be the LVDS, so we can pull out
@@ -325,6 +327,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
crtc = intel_get_crtc_from_pipe(dev, pipe);
if (crtc) {
mode = intel_crtc_mode_get(dev, crtc);
+
if (mode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
@@ -334,32 +337,28 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
}
}
}
-
return mode;
}
void intel_dvo_init(struct drm_device *dev)
{
struct intel_encoder *intel_encoder;
- struct intel_dvo *intel_dvo;
struct intel_connector *intel_connector;
+ struct intel_dvo_device *dvo;
struct i2c_adapter *i2cbus = NULL;
int ret = 0;
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
-
- intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
- if (!intel_dvo)
+ intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL);
+ if (!intel_encoder)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_dvo);
+ kfree(intel_encoder);
return;
}
- intel_encoder = &intel_dvo->base;
-
/* Set up the DDC bus */
intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
if (!intel_encoder->ddc_bus)
@@ -368,9 +367,10 @@ void intel_dvo_init(struct drm_device *dev)
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_connector->base;
- const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
int gpio;
+ dvo = &intel_dvo_devices[i];
+
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
* in the spec.
@@ -393,8 +393,11 @@ void intel_dvo_init(struct drm_device *dev)
continue;
}
- intel_dvo->dev = *dvo;
- ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
+ if (dvo->dev_ops!= NULL)
+ ret = dvo->dev_ops->init(dvo, i2cbus);
+ else
+ ret = false;
+
if (!ret)
continue;
@@ -426,6 +429,9 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
+ intel_encoder->dev_priv = dvo;
+ intel_encoder->i2c_bus = i2cbus;
+
drm_encoder_init(dev, &intel_encoder->enc,
&intel_dvo_enc_funcs, encoder_type);
drm_encoder_helper_add(&intel_encoder->enc,
@@ -441,9 +447,9 @@ void intel_dvo_init(struct drm_device *dev)
* headers, likely), so for now, just get the current
* mode being output through DVO.
*/
- intel_dvo->panel_fixed_mode =
+ dvo->panel_fixed_mode =
intel_dvo_get_current_mode(connector);
- intel_dvo->panel_wants_dither = true;
+ dvo->panel_wants_dither = true;
}
drm_sysfs_connector_add(connector);
@@ -455,6 +461,6 @@ void intel_dvo_init(struct drm_device *dev)
if (i2cbus != NULL)
intel_i2c_destroy(i2cbus);
free_intel:
- kfree(intel_dvo);
+ kfree(intel_encoder);
kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ccd4c97..197887e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -37,17 +37,11 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct intel_hdmi {
- struct intel_encoder base;
+struct intel_hdmi_priv {
u32 sdvox_reg;
bool has_hdmi_sink;
};
-static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
-{
- return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
-}
-
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -56,7 +50,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
@@ -65,7 +60,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
- if (intel_hdmi->has_hdmi_sink) {
+ if (hdmi_priv->has_hdmi_sink) {
sdvox |= SDVO_AUDIO_ENABLE;
if (HAS_PCH_CPT(dev))
sdvox |= HDMI_MODE_SELECT;
@@ -78,25 +73,26 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
sdvox |= SDVO_PIPE_B_SELECT;
}
- I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(hdmi_priv->sdvox_reg, sdvox);
+ POSTING_READ(hdmi_priv->sdvox_reg);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
u32 temp;
- temp = I915_READ(intel_hdmi->sdvox_reg);
+ temp = I915_READ(hdmi_priv->sdvox_reg);
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
+ POSTING_READ(hdmi_priv->sdvox_reg);
}
if (mode != DRM_MODE_DPMS_ON) {
@@ -105,15 +101,15 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
temp |= SDVO_ENABLE;
}
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(hdmi_priv->sdvox_reg, temp);
+ POSTING_READ(hdmi_priv->sdvox_reg);
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(intel_hdmi->sdvox_reg, temp);
- POSTING_READ(intel_hdmi->sdvox_reg);
+ I915_WRITE(hdmi_priv->sdvox_reg, temp);
+ POSTING_READ(hdmi_priv->sdvox_reg);
}
}
@@ -142,17 +138,19 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
- intel_hdmi->has_hdmi_sink = false;
- edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
+ hdmi_priv->has_hdmi_sink = false;
+ edid = drm_get_edid(connector,
+ intel_encoder->ddc_bus);
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
- intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
}
connector->display_info.raw_edid = NULL;
kfree(edid);
@@ -164,13 +162,13 @@ intel_hdmi_detect(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
- return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
+ return intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -201,8 +199,18 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
.best_encoder = intel_attached_encoder,
};
+static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_hdmi_enc_destroy,
};
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
@@ -211,19 +219,21 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
- struct intel_hdmi *intel_hdmi;
+ struct intel_hdmi_priv *hdmi_priv;
- intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
- if (!intel_hdmi)
+ intel_encoder = kcalloc(sizeof(struct intel_encoder) +
+ sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
+ if (!intel_encoder)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_hdmi);
+ kfree(intel_encoder);
return;
}
- intel_encoder = &intel_hdmi->base;
+ hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
+
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
@@ -264,7 +274,8 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
if (!intel_encoder->ddc_bus)
goto err_connector;
- intel_hdmi->sdvox_reg = sdvox_reg;
+ hdmi_priv->sdvox_reg = sdvox_reg;
+ intel_encoder->dev_priv = hdmi_priv;
drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
@@ -287,7 +298,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
err_connector:
drm_connector_cleanup(connector);
- kfree(intel_hdmi);
+ kfree(intel_encoder);
kfree(intel_connector);
return;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b819c10..0a2e600 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -41,18 +41,12 @@
#include
/* Private structure for the integrated LVDS support */
-struct intel_lvds {
- struct intel_encoder base;
+struct intel_lvds_priv {
int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
};
-static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
-{
- return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
-}
-
/**
* Sets the backlight level.
*
@@ -96,7 +90,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
static void intel_lvds_set_power(struct drm_device *dev, bool on)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, status_reg, lvds_reg;
+ u32 pp_status, ctl_reg, status_reg, lvds_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
@@ -114,8 +108,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
POWER_TARGET_ON);
- if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
- DRM_ERROR("timed out waiting to enable LVDS pipe");
+ do {
+ pp_status = I915_READ(status_reg);
+ } while ((pp_status & PP_ON) == 0);
intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
} else {
@@ -123,8 +118,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
~POWER_TARGET_ON);
- if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
- DRM_ERROR("timed out waiting for LVDS pipe to turn off");
+ do {
+ pp_status = I915_READ(status_reg);
+ } while (pp_status & PP_ON);
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
POSTING_READ(lvds_reg);
@@ -223,8 +219,9 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
struct drm_encoder *tmp_encoder;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
/* Should never happen!! */
@@ -244,20 +241,26 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
/* If we don't have a panel mode, there is nothing we can do */
if (dev_priv->panel_fixed_mode == NULL)
return true;
-
/*
* We have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
-
- if (HAS_PCH_SPLIT(dev)) {
- intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
- mode, adjusted_mode);
- return true;
- }
+ adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
+ adjusted_mode->hsync_start =
+ dev_priv->panel_fixed_mode->hsync_start;
+ adjusted_mode->hsync_end =
+ dev_priv->panel_fixed_mode->hsync_end;
+ adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
+ adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
+ adjusted_mode->vsync_start =
+ dev_priv->panel_fixed_mode->vsync_start;
+ adjusted_mode->vsync_end =
+ dev_priv->panel_fixed_mode->vsync_end;
+ adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
+ adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
/* Make sure pre-965s set dither correctly */
if (!IS_I965G(dev)) {
@@ -270,6 +273,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
+ /* full screen scale for now */
+ if (HAS_PCH_SPLIT(dev))
+ goto out;
+
/* 965+ wants fuzzy fitting */
if (IS_I965G(dev))
pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
@@ -281,10 +288,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- I915_WRITE(BCLRPAT_A, 0);
- I915_WRITE(BCLRPAT_B, 0);
+ if (!HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(BCLRPAT_A, 0);
+ I915_WRITE(BCLRPAT_B, 0);
+ }
- switch (intel_lvds->fitting_mode) {
+ switch (lvds_priv->fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
@@ -369,8 +378,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
out:
- intel_lvds->pfit_control = pfit_control;
- intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+ lvds_priv->pfit_control = pfit_control;
+ lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
dev_priv->lvds_border_bits = border;
/*
@@ -418,7 +427,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
/*
* The LVDS pin pair will already have been turned on in the
@@ -434,8 +444,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
- I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
+ I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
}
/**
@@ -590,17 +600,18 @@ static int intel_lvds_set_property(struct drm_connector *connector,
connector->encoder) {
struct drm_crtc *crtc = connector->encoder->crtc;
struct drm_encoder *encoder = connector->encoder;
- struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return 0;
}
- if (intel_lvds->fitting_mode == value) {
+ if (lvds_priv->fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
}
- intel_lvds->fitting_mode = value;
+ lvds_priv->fitting_mode = value;
if (crtc && crtc->enabled) {
/*
* If the CRTC is enabled, the display will be changed
@@ -636,8 +647,19 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.destroy = intel_lvds_destroy,
};
+
+static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_lvds_enc_destroy,
};
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
@@ -821,13 +843,13 @@ static int lvds_is_present_in_vbt(struct drm_device *dev)
void intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
+ struct intel_lvds_priv *lvds_priv;
u32 lvds;
int pipe, gpio = GPIOC;
@@ -850,20 +872,20 @@ void intel_lvds_init(struct drm_device *dev)
gpio = PCH_GPIOC;
}
- intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
- if (!intel_lvds) {
+ intel_encoder = kzalloc(sizeof(struct intel_encoder) +
+ sizeof(struct intel_lvds_priv), GFP_KERNEL);
+ if (!intel_encoder) {
return;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_lvds);
+ kfree(intel_encoder);
return;
}
- intel_encoder = &intel_lvds->base;
- encoder = &intel_encoder->enc;
connector = &intel_connector->base;
+ encoder = &intel_encoder->enc;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -883,6 +905,8 @@ void intel_lvds_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
+ lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1);
+ intel_encoder->dev_priv = lvds_priv;
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
/*
@@ -892,7 +916,7 @@ void intel_lvds_init(struct drm_device *dev)
drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
- intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
+ lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -1000,6 +1024,6 @@ failed:
intel_i2c_destroy(intel_encoder->ddc_bus);
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
- kfree(intel_lvds);
+ kfree(intel_encoder);
kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 4f00390..d39aea2 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1367,8 +1367,7 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->flip_addr = overlay->reg_bo->gtt_offset;
} else {
ret = i915_gem_attach_phys_object(dev, reg_bo,
- I915_GEM_PHYS_OVERLAY_REGS,
- 0);
+ I915_GEM_PHYS_OVERLAY_REGS);
if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
@@ -1417,99 +1416,3 @@ void intel_cleanup_overlay(struct drm_device *dev)
kfree(dev_priv->overlay);
}
}
-
-struct intel_overlay_error_state {
- struct overlay_registers regs;
- unsigned long base;
- u32 dovsta;
- u32 isr;
-};
-
-struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_overlay *overlay = dev_priv->overlay;
- struct intel_overlay_error_state *error;
- struct overlay_registers __iomem *regs;
-
- if (!overlay || !overlay->active)
- return NULL;
-
- error = kmalloc(sizeof(*error), GFP_ATOMIC);
- if (error == NULL)
- return NULL;
-
- error->dovsta = I915_READ(DOVSTA);
- error->isr = I915_READ(ISR);
- if (OVERLAY_NONPHYSICAL(overlay->dev))
- error->base = (long) overlay->reg_bo->gtt_offset;
- else
- error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
-
- regs = intel_overlay_map_regs_atomic(overlay);
- if (!regs)
- goto err;
-
- memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
- intel_overlay_unmap_regs_atomic(overlay);
-
- return error;
-
-err:
- kfree(error);
- return NULL;
-}
-
-void
-intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error)
-{
- seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
- error->dovsta, error->isr);
- seq_printf(m, " Register file at 0x%08lx:\n",
- error->base);
-
-#define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x)
- P(OBUF_0Y);
- P(OBUF_1Y);
- P(OBUF_0U);
- P(OBUF_0V);
- P(OBUF_1U);
- P(OBUF_1V);
- P(OSTRIDE);
- P(YRGB_VPH);
- P(UV_VPH);
- P(HORZ_PH);
- P(INIT_PHS);
- P(DWINPOS);
- P(DWINSZ);
- P(SWIDTH);
- P(SWIDTHSW);
- P(SHEIGHT);
- P(YRGBSCALE);
- P(UVSCALE);
- P(OCLRC0);
- P(OCLRC1);
- P(DCLRKV);
- P(DCLRKM);
- P(SCLRKVH);
- P(SCLRKVL);
- P(SCLRKEN);
- P(OCONFIG);
- P(OCMD);
- P(OSTART_0Y);
- P(OSTART_1Y);
- P(OSTART_0U);
- P(OSTART_0V);
- P(OSTART_1U);
- P(OSTART_1V);
- P(OTILEOFF_0Y);
- P(OTILEOFF_1Y);
- P(OTILEOFF_0U);
- P(OTILEOFF_0V);
- P(OTILEOFF_1U);
- P(OTILEOFF_1V);
- P(FASTHSCALE);
- P(UVSCALEV);
-#undef P
-}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
deleted file mode 100644
index e7f5299..0000000
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright © 2006-2010 Intel Corporation
- * Copyright (c) 2006 Dave Airlie
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt
- * Dave Airlie
- * Jesse Barnes
- * Chris Wilson
- */
-
-#include "intel_drv.h"
-
-void
-intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
- struct drm_display_mode *adjusted_mode)
-{
- adjusted_mode->hdisplay = fixed_mode->hdisplay;
- adjusted_mode->hsync_start = fixed_mode->hsync_start;
- adjusted_mode->hsync_end = fixed_mode->hsync_end;
- adjusted_mode->htotal = fixed_mode->htotal;
-
- adjusted_mode->vdisplay = fixed_mode->vdisplay;
- adjusted_mode->vsync_start = fixed_mode->vsync_start;
- adjusted_mode->vsync_end = fixed_mode->vsync_end;
- adjusted_mode->vtotal = fixed_mode->vtotal;
-
- adjusted_mode->clock = fixed_mode->clock;
-
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
-}
-
-/* adjusted_mode has been preset to be the panel's fixed mode */
-void
-intel_pch_panel_fitting(struct drm_device *dev,
- int fitting_mode,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int x, y, width, height;
-
- x = y = width = height = 0;
-
- /* Native modes don't need fitting */
- if (adjusted_mode->hdisplay == mode->hdisplay &&
- adjusted_mode->vdisplay == mode->vdisplay)
- goto done;
-
- switch (fitting_mode) {
- case DRM_MODE_SCALE_CENTER:
- width = mode->hdisplay;
- height = mode->vdisplay;
- x = (adjusted_mode->hdisplay - width + 1)/2;
- y = (adjusted_mode->vdisplay - height + 1)/2;
- break;
-
- case DRM_MODE_SCALE_ASPECT:
- /* Scale but preserve the aspect ratio */
- {
- u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
- u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
- if (scaled_width > scaled_height) { /* pillar */
- width = scaled_height / mode->vdisplay;
- x = (adjusted_mode->hdisplay - width + 1) / 2;
- y = 0;
- height = adjusted_mode->vdisplay;
- } else if (scaled_width < scaled_height) { /* letter */
- height = scaled_width / mode->hdisplay;
- y = (adjusted_mode->vdisplay - height + 1) / 2;
- x = 0;
- width = adjusted_mode->hdisplay;
- } else {
- x = y = 0;
- width = adjusted_mode->hdisplay;
- height = adjusted_mode->vdisplay;
- }
- }
- break;
-
- default:
- case DRM_MODE_SCALE_FULLSCREEN:
- x = y = 0;
- width = adjusted_mode->hdisplay;
- height = adjusted_mode->vdisplay;
- break;
- }
-
-done:
- dev_priv->pch_pf_pos = (x << 16) | y;
- dev_priv->pch_pf_size = (width << 16) | height;
-}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 51e9c9e..26362f8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,35 +33,18 @@
#include "i915_drm.h"
#include "i915_trace.h"
-static u32 i915_gem_get_seqno(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 seqno;
-
- seqno = dev_priv->next_seqno;
-
- /* reserve 0 for non-seqno */
- if (++dev_priv->next_seqno == 0)
- dev_priv->next_seqno = 1;
-
- return seqno;
-}
-
static void
render_ring_flush(struct drm_device *dev,
struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 cmd;
-
#if WATCH_EXEC
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
invalidate_domains, flush_domains);
#endif
-
- trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
+ u32 cmd;
+ trace_i915_gem_request_flush(dev, ring->next_seqno,
invalidate_domains, flush_domains);
if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
@@ -250,10 +233,9 @@ render_ring_add_request(struct drm_device *dev,
struct drm_file *file_priv,
u32 flush_domains)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
u32 seqno;
-
- seqno = i915_gem_get_seqno(dev);
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ seqno = intel_ring_get_seqno(dev, ring);
if (IS_GEN6(dev)) {
BEGIN_LP_RING(6);
@@ -423,9 +405,7 @@ bsd_ring_add_request(struct drm_device *dev,
u32 flush_domains)
{
u32 seqno;
-
- seqno = i915_gem_get_seqno(dev);
-
+ seqno = intel_ring_get_seqno(dev, ring);
intel_ring_begin(dev, ring, 4);
intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(dev, ring,
@@ -499,7 +479,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
exec_len = (uint32_t) exec->batch_len;
- trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
+ trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
count = nbox ? nbox : 1;
@@ -535,16 +515,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, MI_FLUSH |
- MI_NO_WRITE_FLUSH |
- MI_INVALIDATE_ISP );
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
- }
/* XXX breadcrumb */
-
return 0;
}
@@ -617,10 +588,9 @@ err:
int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
+ int ret;
struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *obj;
- int ret;
-
ring->dev = dev;
if (I915_NEED_GFX_HWS(dev)) {
@@ -633,14 +603,16 @@ int intel_init_ring_buffer(struct drm_device *dev,
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
ret = -ENOMEM;
- goto err_hws;
+ goto cleanup;
}
ring->gem_object = obj;
ret = i915_gem_object_pin(obj, ring->alignment);
- if (ret)
- goto err_unref;
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ goto cleanup;
+ }
obj_priv = to_intel_bo(obj);
ring->map.size = ring->size;
@@ -652,14 +624,18 @@ int intel_init_ring_buffer(struct drm_device *dev,
drm_core_ioremap_wc(&ring->map, dev);
if (ring->map.handle == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
ret = -EINVAL;
- goto err_unpin;
+ goto cleanup;
}
ring->virtual_start = ring->map.handle;
ret = ring->init(dev, ring);
- if (ret)
- goto err_unmap;
+ if (ret != 0) {
+ intel_cleanup_ring_buffer(dev, ring);
+ return ret;
+ }
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_kernel_lost_context(dev);
@@ -673,15 +649,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
return ret;
-
-err_unmap:
- drm_core_ioremapfree(&ring->map, dev);
-err_unpin:
- i915_gem_object_unpin(obj);
-err_unref:
- drm_gem_object_unreference(obj);
- ring->gem_object = NULL;
-err_hws:
+cleanup:
cleanup_status_page(dev, ring);
return ret;
}
@@ -714,11 +682,9 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
}
virt = (unsigned int *)(ring->virtual_start + ring->tail);
- rem /= 8;
- while (rem--) {
- *virt++ = MI_NOOP;
+ rem /= 4;
+ while (rem--)
*virt++ = MI_NOOP;
- }
ring->tail = 0;
ring->space = ring->head - 8;
@@ -763,14 +729,21 @@ void intel_ring_begin(struct drm_device *dev,
intel_wrap_ring_buffer(dev, ring);
if (unlikely(ring->space < n))
intel_wait_ring_buffer(dev, ring, n);
+}
- ring->space -= n;
+void intel_ring_emit(struct drm_device *dev,
+ struct intel_ring_buffer *ring, unsigned int data)
+{
+ unsigned int *virt = ring->virtual_start + ring->tail;
+ *virt = data;
+ ring->tail += 4;
+ ring->tail &= ring->size - 1;
+ ring->space -= 4;
}
void intel_ring_advance(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
- ring->tail &= ring->size - 1;
ring->advance_ring(dev, ring);
}
@@ -789,6 +762,18 @@ void intel_fill_struct(struct drm_device *dev,
intel_ring_advance(dev, ring);
}
+u32 intel_ring_get_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ u32 seqno;
+ seqno = ring->next_seqno;
+
+ /* reserve 0 for non-seqno */
+ if (++ring->next_seqno == 0)
+ ring->next_seqno = 1;
+ return seqno;
+}
+
struct intel_ring_buffer render_ring = {
.name = "render ring",
.regs = {
@@ -806,6 +791,7 @@ struct intel_ring_buffer render_ring = {
.head = 0,
.tail = 0,
.space = 0,
+ .next_seqno = 1,
.user_irq_refcount = 0,
.irq_gem_seqno = 0,
.waiting_gem_seqno = 0,
@@ -844,6 +830,7 @@ struct intel_ring_buffer bsd_ring = {
.head = 0,
.tail = 0,
.space = 0,
+ .next_seqno = 1,
.user_irq_refcount = 0,
.irq_gem_seqno = 0,
.waiting_gem_seqno = 0,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 525e7d3..d5568d3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -26,6 +26,7 @@ struct intel_ring_buffer {
unsigned int head;
unsigned int tail;
unsigned int space;
+ u32 next_seqno;
struct intel_hw_status_page status_page;
u32 irq_gem_seqno; /* last seq seem at irq time */
@@ -105,16 +106,8 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring);
void intel_ring_begin(struct drm_device *dev,
struct intel_ring_buffer *ring, int n);
-
-static inline void intel_ring_emit(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- unsigned int data)
-{
- unsigned int *virt = ring->virtual_start + ring->tail;
- *virt = data;
- ring->tail += 4;
-}
-
+void intel_ring_emit(struct drm_device *dev,
+ struct intel_ring_buffer *ring, u32 data);
void intel_fill_struct(struct drm_device *dev,
struct intel_ring_buffer *ring,
void *data,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 093e914..d9d4d51 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -31,8 +31,8 @@
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
-#include "drm_edid.h"
#include "intel_drv.h"
+#include "drm_edid.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
@@ -47,10 +47,9 @@
#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
-#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
-static const char *tv_format_names[] = {
+static char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
"PAL_B" , "PAL_D" , "PAL_G" ,
"PAL_H" , "PAL_I" , "PAL_M" ,
@@ -62,9 +61,7 @@ static const char *tv_format_names[] = {
#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
-struct intel_sdvo {
- struct intel_encoder base;
-
+struct intel_sdvo_priv {
u8 slave_addr;
/* Register for the SDVO device: SDVOB or SDVOC */
@@ -98,7 +95,7 @@ struct intel_sdvo {
bool is_tv;
/* This is for current tv format name */
- int tv_format_index;
+ char *tv_format_name;
/**
* This is set if we treat the device as HDMI, instead of DVI.
@@ -135,40 +132,37 @@ struct intel_sdvo {
};
struct intel_sdvo_connector {
- struct intel_connector base;
-
/* Mark the type of connector */
uint16_t output_flag;
/* This contains all current supported TV format */
- u8 tv_format_supported[TV_FORMAT_NUM];
+ char *tv_format_supported[TV_FORMAT_NUM];
int format_supported_num;
- struct drm_property *tv_format;
+ struct drm_property *tv_format_property;
+ struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
+
+ /**
+ * Returned SDTV resolutions allowed for the current format, if the
+ * device reported it.
+ */
+ struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
/* add the property for the SDVO-TV */
- struct drm_property *left;
- struct drm_property *right;
- struct drm_property *top;
- struct drm_property *bottom;
- struct drm_property *hpos;
- struct drm_property *vpos;
- struct drm_property *contrast;
- struct drm_property *saturation;
- struct drm_property *hue;
- struct drm_property *sharpness;
- struct drm_property *flicker_filter;
- struct drm_property *flicker_filter_adaptive;
- struct drm_property *flicker_filter_2d;
- struct drm_property *tv_chroma_filter;
- struct drm_property *tv_luma_filter;
- struct drm_property *dot_crawl;
+ struct drm_property *left_property;
+ struct drm_property *right_property;
+ struct drm_property *top_property;
+ struct drm_property *bottom_property;
+ struct drm_property *hpos_property;
+ struct drm_property *vpos_property;
/* add the property for the SDVO-TV/LVDS */
- struct drm_property *brightness;
+ struct drm_property *brightness_property;
+ struct drm_property *contrast_property;
+ struct drm_property *saturation_property;
+ struct drm_property *hue_property;
/* Add variable to record current setting for the above property */
u32 left_margin, right_margin, top_margin, bottom_margin;
-
/* this is to get the range of margin.*/
u32 max_hscan, max_vscan;
u32 max_hpos, cur_hpos;
@@ -177,54 +171,36 @@ struct intel_sdvo_connector {
u32 cur_contrast, max_contrast;
u32 cur_saturation, max_saturation;
u32 cur_hue, max_hue;
- u32 cur_sharpness, max_sharpness;
- u32 cur_flicker_filter, max_flicker_filter;
- u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
- u32 cur_flicker_filter_2d, max_flicker_filter_2d;
- u32 cur_tv_chroma_filter, max_tv_chroma_filter;
- u32 cur_tv_luma_filter, max_tv_luma_filter;
- u32 cur_dot_crawl, max_dot_crawl;
};
-static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder)
-{
- return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base);
-}
-
-static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
-{
- return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base);
-}
-
-static bool
-intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
-static bool
-intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_connector *intel_sdvo_connector,
- int type);
static bool
-intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_connector *intel_sdvo_connector);
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder,
+ uint16_t flags);
+static void
+intel_sdvo_tv_create_property(struct drm_connector *connector, int type);
+static void
+intel_sdvo_create_enhance_property(struct drm_connector *connector);
/**
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
*/
-static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
+static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 bval = val, cval = val;
int i;
- if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
- I915_WRITE(intel_sdvo->sdvo_reg, val);
- I915_READ(intel_sdvo->sdvo_reg);
+ if (sdvo_priv->sdvo_reg == PCH_SDVOB) {
+ I915_WRITE(sdvo_priv->sdvo_reg, val);
+ I915_READ(sdvo_priv->sdvo_reg);
return;
}
- if (intel_sdvo->sdvo_reg == SDVOB) {
+ if (sdvo_priv->sdvo_reg == SDVOB) {
cval = I915_READ(SDVOC);
} else {
bval = I915_READ(SDVOB);
@@ -243,27 +219,33 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
}
}
-static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
+static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr,
+ u8 *ch)
{
- u8 out_buf[2] = { addr, 0 };
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ u8 out_buf[2];
u8 buf[2];
+ int ret;
+
struct i2c_msg msgs[] = {
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
}
};
- int ret;
- if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2)
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2)
{
*ch = buf[0];
return true;
@@ -273,26 +255,35 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
return false;
}
-static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
+static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr,
+ u8 ch)
{
- u8 out_buf[2] = { addr, ch };
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ u8 out_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 2,
.buf = out_buf,
}
};
- return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
+ out_buf[0] = addr;
+ out_buf[1] = ch;
+
+ if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1)
+ {
+ return true;
+ }
+ return false;
}
#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
/** Mapping of command numbers to names, for debug output */
static const struct _sdvo_cmd_name {
u8 cmd;
- const char *name;
+ char *name;
} sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
@@ -337,14 +328,13 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
-
/* Add the op code for SDVO enhancements */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
@@ -363,27 +353,6 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
-
/* HDMI op code */
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
@@ -408,15 +377,17 @@ static const struct _sdvo_cmd_name {
};
#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
-#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC")
+#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
-static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
- const void *args, int args_len)
+static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
+ void *args, int args_len)
{
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int i;
DRM_DEBUG_KMS("%s: W: %02X ",
- SDVO_NAME(intel_sdvo), cmd);
+ SDVO_NAME(sdvo_priv), cmd);
for (i = 0; i < args_len; i++)
DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
for (; i < 8; i++)
@@ -432,20 +403,19 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
DRM_LOG_KMS("\n");
}
-static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
- const void *args, int args_len)
+static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd,
+ void *args, int args_len)
{
int i;
- intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+ intel_sdvo_debug_write(intel_encoder, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
- if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
- ((u8*)args)[i]))
- return false;
+ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i,
+ ((u8*)args)[i]);
}
- return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
+ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd);
}
static const char *cmd_status_names[] = {
@@ -458,13 +428,14 @@ static const char *cmd_status_names[] = {
"Scaling not supported"
};
-static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
+static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder,
void *response, int response_len,
u8 status)
{
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int i;
- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
for (i = 0; i < response_len; i++)
DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
for (; i < 8; i++)
@@ -476,8 +447,8 @@ static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
DRM_LOG_KMS("\n");
}
-static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
- void *response, int response_len)
+static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder,
+ void *response, int response_len)
{
int i;
u8 status;
@@ -486,26 +457,24 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
while (retry--) {
/* Read the command response */
for (i = 0; i < response_len; i++) {
- if (!intel_sdvo_read_byte(intel_sdvo,
- SDVO_I2C_RETURN_0 + i,
- &((u8 *)response)[i]))
- return false;
+ intel_sdvo_read_byte(intel_encoder,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]);
}
/* read the return status */
- if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS,
- &status))
- return false;
+ intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS,
+ &status);
- intel_sdvo_debug_response(intel_sdvo, response, response_len,
+ intel_sdvo_debug_response(intel_encoder, response, response_len,
status);
if (status != SDVO_CMD_STATUS_PENDING)
- break;
+ return status;
mdelay(50);
}
- return status == SDVO_CMD_STATUS_SUCCESS;
+ return status;
}
static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
@@ -525,36 +494,37 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
* another I2C transaction after issuing the DDC bus switch, it will be
* switched to the internal SDVO register.
*/
-static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder,
u8 target)
{
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
struct i2c_msg msgs[] = {
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 2,
.buf = out_buf,
},
/* the following two are to read the response */
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 1,
.buf = cmd_buf,
},
{
- .addr = intel_sdvo->slave_addr >> 1,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = I2C_M_RD,
.len = 1,
.buf = ret_value,
},
};
- intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
&target, 1);
/* write the DDC switch command argument */
- intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
+ intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target);
out_buf[0] = SDVO_I2C_OPCODE;
out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
@@ -563,7 +533,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
ret_value[0] = 0;
ret_value[1] = 0;
- ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
+ ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3);
if (ret != 3) {
/* failure in I2C transfer */
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
@@ -577,29 +547,23 @@ static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
return;
}
-static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
+static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1)
{
- if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
- return false;
+ struct intel_sdvo_set_target_input_args targets = {0};
+ u8 status;
- return intel_sdvo_read_response(intel_sdvo, NULL, 0);
-}
+ if (target_0 && target_1)
+ return SDVO_CMD_STATUS_NOTSUPP;
-static bool
-intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len)
-{
- if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0))
- return false;
+ if (target_1)
+ targets.target_1 = 1;
- return intel_sdvo_read_response(intel_sdvo, value, len);
-}
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets,
+ sizeof(targets));
-static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
-{
- struct intel_sdvo_set_target_input_args targets = {0};
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_TARGET_INPUT,
- &targets, sizeof(targets));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+
+ return (status == SDVO_CMD_STATUS_SUCCESS);
}
/**
@@ -608,12 +572,14 @@ static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
* This function is making an assumption about the layout of the response,
* which should be checked against the docs.
*/
-static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2)
+static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2)
{
struct intel_sdvo_get_trained_inputs_response response;
+ u8 status;
- if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
- &response, sizeof(response)))
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
*input_1 = response.input0_trained;
@@ -621,18 +587,21 @@ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *i
return true;
}
-static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
u16 outputs)
{
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_ACTIVE_OUTPUTS,
- &outputs, sizeof(outputs));
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
+ sizeof(outputs));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder,
int mode)
{
- u8 state = SDVO_ENCODER_STATE_ON;
+ u8 status, state = SDVO_ENCODER_STATE_ON;
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -649,63 +618,88 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
break;
}
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
+ sizeof(state));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+
+ return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder,
int *clock_min,
int *clock_max)
{
struct intel_sdvo_pixel_clock_range clocks;
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ NULL, 0);
+
+ status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks));
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
- &clocks, sizeof(clocks)))
+ if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
/* Convert the values from units of 10 kHz to kHz. */
*clock_min = clocks.min * 10;
*clock_max = clocks.max * 10;
+
return true;
}
-static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
u16 outputs)
{
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_TARGET_OUTPUT,
- &outputs, sizeof(outputs));
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
+ sizeof(outputs));
+
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
+static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
- intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return true;
}
-static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_timing(intel_sdvo,
+ return intel_sdvo_set_timing(intel_encoder,
SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
}
-static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_set_timing(intel_sdvo,
+ return intel_sdvo_set_timing(intel_encoder,
SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
}
static bool
-intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder,
uint16_t clock,
uint16_t width,
uint16_t height)
{
struct intel_sdvo_preferred_input_timing_args args;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ uint8_t status;
memset(&args, 0, sizeof(args));
args.clock = clock;
@@ -713,32 +707,59 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
args.height = height;
args.interlace = 0;
- if (intel_sdvo->is_lvds &&
- (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
- intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
+ if (sdvo_priv->is_lvds &&
+ (sdvo_priv->sdvo_lvds_fixed_mode->hdisplay != width ||
+ sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height))
args.scaled = 1;
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
- &args, sizeof(args));
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ &args, sizeof(args));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return true;
}
-static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder,
struct intel_sdvo_dtd *dtd)
{
- return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
- &dtd->part1, sizeof(dtd->part1)) &&
- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
- &dtd->part2, sizeof(dtd->part2));
+ bool status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ NULL, 0);
+
+ status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
+ sizeof(dtd->part1));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ NULL, 0);
+
+ status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
+ sizeof(dtd->part2));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return false;
}
-static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val)
+static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
{
- return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return true;
}
static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
- const struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
uint16_t width, height;
uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
@@ -787,7 +808,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
}
static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
- const struct intel_sdvo_dtd *dtd)
+ struct intel_sdvo_dtd *dtd)
{
mode->hdisplay = dtd->part1.h_active;
mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
@@ -819,33 +840,45 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
-static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder,
struct intel_sdvo_encode *encode)
{
- if (intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_SUPP_ENCODE,
- encode, sizeof(*encode)))
- return true;
+ uint8_t status;
- /* non-support means DVI */
- memset(encode, 0, sizeof(*encode));
- return false;
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode));
+ if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
+ memset(encode, 0, sizeof(*encode));
+ return false;
+ }
+
+ return true;
}
-static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder,
uint8_t mode)
{
- return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+ uint8_t status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+
+ return (status == SDVO_CMD_STATUS_SUCCESS);
}
-static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
+static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder,
uint8_t mode)
{
- return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+ uint8_t status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+
+ return (status == SDVO_CMD_STATUS_SUCCESS);
}
#if 0
-static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
+static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
{
int i, j;
uint8_t set_buf_index[2];
@@ -854,7 +887,8 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
uint8_t buf[48];
uint8_t *pos;
- intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+ intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
+ intel_sdvo_read_response(encoder, &av_split, 1);
for (i = 0; i <= av_split; i++) {
set_buf_index[0] = i; set_buf_index[1] = 0;
@@ -874,7 +908,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
}
#endif
-static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
+static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder,
int index,
uint8_t *data, int8_t size, uint8_t tx_rate)
{
@@ -883,18 +917,15 @@ static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
set_buf_index[0] = index;
set_buf_index[1] = 0;
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
- set_buf_index, 2))
- return false;
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2);
for (; size > 0; size -= 8) {
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8))
- return false;
-
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8);
data += 8;
}
- return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
}
static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
@@ -969,7 +1000,7 @@ struct dip_infoframe {
} __attribute__ ((packed)) u;
} __attribute__((packed));
-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder,
struct drm_display_mode * mode)
{
struct dip_infoframe avi_if = {
@@ -980,105 +1011,133 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
4 + avi_if.len);
- return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if,
- 4 + avi_if.len,
- SDVO_HBUF_TX_VSYNC);
+ intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if,
+ 4 + avi_if.len,
+ SDVO_HBUF_TX_VSYNC);
}
-static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
+static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
{
+
struct intel_sdvo_tv_format format;
- uint32_t format_map;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ uint32_t format_map, i;
+ uint8_t status;
- format_map = 1 << intel_sdvo->tv_format_index;
+ for (i = 0; i < TV_FORMAT_NUM; i++)
+ if (tv_format_names[i] == sdvo_priv->tv_format_name)
+ break;
+
+ format_map = 1 << i;
memset(&format, 0, sizeof(format));
- memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+ memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
+ sizeof(format) : sizeof(format_map));
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format,
+ sizeof(format));
- BUILD_BUG_ON(sizeof(format) != 6);
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_TV_FORMAT,
- &format, sizeof(format));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ DRM_DEBUG_KMS("%s: Failed to set TV format\n",
+ SDVO_NAME(sdvo_priv));
}
-static bool
-intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
- struct drm_display_mode *mode)
+static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct intel_sdvo_dtd output_dtd;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv;
- if (!intel_sdvo_set_target_output(intel_sdvo,
- intel_sdvo->attached_output))
- return false;
+ if (dev_priv->is_tv) {
+ struct intel_sdvo_dtd output_dtd;
+ bool success;
- intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
- if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
- return false;
+ /* We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+ * timings, even though this isn't really the right place in
+ * the sequence to do it. Oh well.
+ */
- return true;
-}
-static bool
-intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct intel_sdvo_dtd input_dtd;
+ /* Set output timings */
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ intel_sdvo_set_target_output(intel_encoder,
+ dev_priv->attached_output);
+ intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
- /* Reset the input timing to the screen. Assume always input 0. */
- if (!intel_sdvo_set_target_input(intel_sdvo))
- return false;
+ /* Set the input timing to the screen. Assume always input 0. */
+ intel_sdvo_set_target_input(intel_encoder, true, false);
- if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
- mode->clock / 10,
- mode->hdisplay,
- mode->vdisplay))
- return false;
- if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
- &input_dtd))
- return false;
+ success = intel_sdvo_create_preferred_input_timing(intel_encoder,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay);
+ if (success) {
+ struct intel_sdvo_dtd input_dtd;
- intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
- intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
+ intel_sdvo_get_preferred_input_timing(intel_encoder,
+ &input_dtd);
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
- drm_mode_set_crtcinfo(adjusted_mode, 0);
- mode->clock = adjusted_mode->clock;
- return true;
-}
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
-static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ mode->clock = adjusted_mode->clock;
- /* We need to construct preferred input timings based on our
- * output timings. To do that, we have to set the output
- * timings, even though this isn't really the right place in
- * the sequence to do it. Oh well.
- */
- if (intel_sdvo->is_tv) {
- if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
+ adjusted_mode->clock *=
+ intel_sdvo_get_pixel_multiplier(mode);
+ } else {
return false;
+ }
+ } else if (dev_priv->is_lvds) {
+ struct intel_sdvo_dtd output_dtd;
+ bool success;
- if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode))
- return false;
- } else if (intel_sdvo->is_lvds) {
- drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
+ drm_mode_set_crtcinfo(dev_priv->sdvo_lvds_fixed_mode, 0);
+ /* Set output timings */
+ intel_sdvo_get_dtd_from_mode(&output_dtd,
+ dev_priv->sdvo_lvds_fixed_mode);
- if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
- intel_sdvo->sdvo_lvds_fixed_mode))
- return false;
+ intel_sdvo_set_target_output(intel_encoder,
+ dev_priv->attached_output);
+ intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
- if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode))
- return false;
- }
+ /* Set the input timing to the screen. Assume always input 0. */
+ intel_sdvo_set_target_input(intel_encoder, true, false);
- /* Make the CRTC code factor in the SDVO pixel multiplier. The
- * SDVO device will be told of the multiplier during mode_set.
- */
- adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+ success = intel_sdvo_create_preferred_input_timing(
+ intel_encoder,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay);
+
+ if (success) {
+ struct intel_sdvo_dtd input_dtd;
+
+ intel_sdvo_get_preferred_input_timing(intel_encoder,
+ &input_dtd);
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ mode->clock = adjusted_mode->clock;
+
+ adjusted_mode->clock *=
+ intel_sdvo_get_pixel_multiplier(mode);
+ } else {
+ return false;
+ }
+
+ } else {
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will be told of the multiplier during mode_set.
+ */
+ adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
+ }
return true;
}
@@ -1090,11 +1149,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 sdvox = 0;
- int sdvo_pixel_multiply, rate;
+ int sdvo_pixel_multiply;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd;
+ u8 status;
if (!mode)
return;
@@ -1105,50 +1166,41 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* channel on the motherboard. In a two-input device, the first input
* will be SDVOB and the second SDVOC.
*/
- in_out.in0 = intel_sdvo->attached_output;
+ in_out.in0 = sdvo_priv->attached_output;
in_out.in1 = 0;
- if (!intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_IN_OUT_MAP,
- &in_out, sizeof(in_out)))
- return;
-
- if (intel_sdvo->is_hdmi) {
- if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
- return;
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
+ &in_out, sizeof(in_out));
+ status = intel_sdvo_read_response(intel_encoder, NULL, 0);
+ if (sdvo_priv->is_hdmi) {
+ intel_sdvo_set_avi_infoframe(intel_encoder, mode);
sdvox |= SDVO_AUDIO_ENABLE;
}
/* We have tried to get input timing in mode_fixup, and filled into
adjusted_mode */
- if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
+ if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
+ input_dtd.part2.sdvo_flags = sdvo_priv->sdvo_flags;
} else
intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
/* If it's a TV, we already set the output timing in mode_fixup.
* Otherwise, the output timing is equal to the input timing.
*/
- if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) {
+ if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
/* Set the output timing to the screen */
- if (!intel_sdvo_set_target_output(intel_sdvo,
- intel_sdvo->attached_output))
- return;
-
- if (!intel_sdvo_set_output_timing(intel_sdvo, &input_dtd))
- return;
+ intel_sdvo_set_target_output(intel_encoder,
+ sdvo_priv->attached_output);
+ intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
}
/* Set the input timing to the screen. Assume always input 0. */
- if (!intel_sdvo_set_target_input(intel_sdvo))
- return;
+ intel_sdvo_set_target_input(intel_encoder, true, false);
- if (intel_sdvo->is_tv) {
- if (!intel_sdvo_set_tv_format(intel_sdvo))
- return;
- }
+ if (sdvo_priv->is_tv)
+ intel_sdvo_set_tv_format(intel_encoder);
/* We would like to use intel_sdvo_create_preferred_input_timing() to
* provide the device with a timing it can support, if it supports that
@@ -1165,18 +1217,23 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo_set_input_timing(encoder, &input_dtd);
}
#else
- if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
- return;
+ intel_sdvo_set_input_timing(intel_encoder, &input_dtd);
#endif
- sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
- switch (sdvo_pixel_multiply) {
- case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
- case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
- case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+ switch (intel_sdvo_get_pixel_multiplier(mode)) {
+ case 1:
+ intel_sdvo_set_clock_rate_mult(intel_encoder,
+ SDVO_CLOCK_RATE_MULT_1X);
+ break;
+ case 2:
+ intel_sdvo_set_clock_rate_mult(intel_encoder,
+ SDVO_CLOCK_RATE_MULT_2X);
+ break;
+ case 4:
+ intel_sdvo_set_clock_rate_mult(intel_encoder,
+ SDVO_CLOCK_RATE_MULT_4X);
+ break;
}
- if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate))
- return;
/* Set the SDVO control regs. */
if (IS_I965G(dev)) {
@@ -1186,8 +1243,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
} else {
- sdvox |= I915_READ(intel_sdvo->sdvo_reg);
- switch (intel_sdvo->sdvo_reg) {
+ sdvox |= I915_READ(sdvo_priv->sdvo_reg);
+ switch (sdvo_priv->sdvo_reg) {
case SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
break;
@@ -1200,6 +1257,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
+ sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
if (IS_I965G(dev)) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
@@ -1208,28 +1266,28 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
}
- if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL)
+ if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL)
sdvox |= SDVO_STALL_SELECT;
- intel_sdvo_write_sdvox(intel_sdvo, sdvox);
+ intel_sdvo_write_sdvox(intel_encoder, sdvox);
}
static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
u32 temp;
if (mode != DRM_MODE_DPMS_ON) {
- intel_sdvo_set_active_outputs(intel_sdvo, 0);
+ intel_sdvo_set_active_outputs(intel_encoder, 0);
if (0)
- intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
+ intel_sdvo_set_encoder_power_state(intel_encoder, mode);
if (mode == DRM_MODE_DPMS_OFF) {
- temp = I915_READ(intel_sdvo->sdvo_reg);
+ temp = I915_READ(sdvo_priv->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
- intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
+ intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE);
}
}
} else {
@@ -1237,25 +1295,28 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
int i;
u8 status;
- temp = I915_READ(intel_sdvo->sdvo_reg);
+ temp = I915_READ(sdvo_priv->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0)
- intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+ intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE);
for (i = 0; i < 2; i++)
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev);
+
+ status = intel_sdvo_get_trained_inputs(intel_encoder, &input1,
+ &input2);
+
- status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
* A lot of SDVO devices fail to notify of sync, but it's
* a given it the status is a success, we succeeded.
*/
if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(intel_sdvo));
+ "sync\n", SDVO_NAME(sdvo_priv));
}
if (0)
- intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
- intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+ intel_sdvo_set_encoder_power_state(intel_encoder, mode);
+ intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output);
}
return;
}
@@ -1264,31 +1325,42 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- if (intel_sdvo->pixel_clock_min > mode->clock)
+ if (sdvo_priv->pixel_clock_min > mode->clock)
return MODE_CLOCK_LOW;
- if (intel_sdvo->pixel_clock_max < mode->clock)
+ if (sdvo_priv->pixel_clock_max < mode->clock)
return MODE_CLOCK_HIGH;
- if (intel_sdvo->is_lvds) {
- if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+ if (sdvo_priv->is_lvds == true) {
+ if (sdvo_priv->sdvo_lvds_fixed_mode == NULL)
return MODE_PANEL;
- if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+ if (mode->hdisplay > sdvo_priv->sdvo_lvds_fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > sdvo_priv->sdvo_lvds_fixed_mode->vdisplay)
return MODE_PANEL;
}
return MODE_OK;
}
-static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
+static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps)
{
- return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps));
+ u8 status;
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+
+ return true;
}
/* No use! */
@@ -1296,12 +1368,12 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
{
struct drm_connector *connector = NULL;
- struct intel_sdvo *iout = NULL;
- struct intel_sdvo *sdvo;
+ struct intel_encoder *iout = NULL;
+ struct intel_sdvo_priv *sdvo;
/* find the sdvo connector */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- iout = to_intel_sdvo(connector);
+ iout = to_intel_encoder(connector);
if (iout->type != INTEL_OUTPUT_SDVO)
continue;
@@ -1323,69 +1395,75 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector)
{
u8 response[2];
u8 status;
- struct intel_sdvo *intel_sdvo;
+ struct intel_encoder *intel_encoder;
DRM_DEBUG_KMS("\n");
if (!connector)
return 0;
- intel_sdvo = to_intel_sdvo(connector);
+ intel_encoder = to_intel_encoder(connector);
+
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &response, 2);
+
+ if (response[0] !=0)
+ return 1;
- return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
- &response, 2) && response[0];
+ return 0;
}
void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
{
u8 response[2];
u8 status;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
+ struct intel_encoder *intel_encoder = to_intel_encoder(connector);
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_sdvo, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_encoder, &response, 2);
if (on) {
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = intel_sdvo_read_response(intel_sdvo, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &response, 2);
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
} else {
response[0] = 0;
response[1] = 0;
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
}
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_sdvo, &response, 2);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ intel_sdvo_read_response(intel_encoder, &response, 2);
}
#endif
static bool
-intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
+intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
{
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int caps = 0;
- if (intel_sdvo->caps.output_flags &
+ if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
caps++;
- if (intel_sdvo->caps.output_flags &
+ if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
caps++;
- if (intel_sdvo->caps.output_flags &
+ if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
caps++;
- if (intel_sdvo->caps.output_flags &
+ if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
caps++;
- if (intel_sdvo->caps.output_flags &
+ if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
caps++;
- if (intel_sdvo->caps.output_flags &
+ if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
caps++;
- if (intel_sdvo->caps.output_flags &
+ if (sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
caps++;
@@ -1397,11 +1475,11 @@ intel_find_analog_connector(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct intel_sdvo *intel_sdvo;
+ struct intel_encoder *intel_encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- intel_sdvo = enc_to_intel_sdvo(encoder);
- if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) {
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (encoder == intel_attached_encoder(connector))
return connector;
@@ -1415,8 +1493,8 @@ static int
intel_analog_is_connected(struct drm_device *dev)
{
struct drm_connector *analog_connector;
-
analog_connector = intel_find_analog_connector(dev);
+
if (!analog_connector)
return false;
@@ -1431,52 +1509,54 @@ enum drm_connector_status
intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
enum drm_connector_status status = connector_status_connected;
struct edid *edid = NULL;
- edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
+ edid = drm_get_edid(connector, intel_encoder->ddc_bus);
/* This is only applied to SDVO cards with multiple outputs */
- if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
+ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
uint8_t saved_ddc, temp_ddc;
- saved_ddc = intel_sdvo->ddc_bus;
- temp_ddc = intel_sdvo->ddc_bus >> 1;
+ saved_ddc = sdvo_priv->ddc_bus;
+ temp_ddc = sdvo_priv->ddc_bus >> 1;
/*
* Don't use the 1 as the argument of DDC bus switch to get
* the EDID. It is used for SDVO SPD ROM.
*/
while(temp_ddc > 1) {
- intel_sdvo->ddc_bus = temp_ddc;
- edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
+ sdvo_priv->ddc_bus = temp_ddc;
+ edid = drm_get_edid(connector, intel_encoder->ddc_bus);
if (edid) {
/*
* When we can get the EDID, maybe it is the
* correct DDC bus. Update it.
*/
- intel_sdvo->ddc_bus = temp_ddc;
+ sdvo_priv->ddc_bus = temp_ddc;
break;
}
temp_ddc >>= 1;
}
if (edid == NULL)
- intel_sdvo->ddc_bus = saved_ddc;
+ sdvo_priv->ddc_bus = saved_ddc;
}
/* when there is no edid and no monitor is connected with VGA
* port, try to use the CRT ddc to read the EDID for DVI-connector
*/
- if (edid == NULL && intel_sdvo->analog_ddc_bus &&
+ if (edid == NULL && sdvo_priv->analog_ddc_bus &&
!intel_analog_is_connected(connector->dev))
- edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
+ edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus);
if (edid != NULL) {
bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
+ bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK);
/* DDC bus is shared, match EDID to connector type */
if (is_digital && need_digital)
- intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
+ sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
else if (is_digital != need_digital)
status = connector_status_disconnected;
@@ -1492,29 +1572,33 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
{
uint16_t response;
+ u8 status;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
enum drm_connector_status ret;
- if (!intel_sdvo_write_cmd(intel_sdvo,
- SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
- return connector_status_unknown;
- if (intel_sdvo->is_tv) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
+ if (sdvo_priv->is_tv) {
/* add 30ms delay when the output type is SDVO-TV */
mdelay(30);
}
- if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
- return connector_status_unknown;
+ status = intel_sdvo_read_response(intel_encoder, &response, 2);
DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return connector_status_unknown;
+
if (response == 0)
return connector_status_disconnected;
- intel_sdvo->attached_output = response;
+ sdvo_priv->attached_output = response;
- if ((intel_sdvo_connector->output_flag & response) == 0)
+ if ((sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
else if (response & SDVO_TMDS_MASK)
ret = intel_sdvo_hdmi_sink_detect(connector);
@@ -1523,16 +1607,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
/* May update encoder flag for like clock for SDVO TV, etc.*/
if (ret == connector_status_connected) {
- intel_sdvo->is_tv = false;
- intel_sdvo->is_lvds = false;
- intel_sdvo->base.needs_tv_clock = false;
+ sdvo_priv->is_tv = false;
+ sdvo_priv->is_lvds = false;
+ intel_encoder->needs_tv_clock = false;
if (response & SDVO_TV_MASK) {
- intel_sdvo->is_tv = true;
- intel_sdvo->base.needs_tv_clock = true;
+ sdvo_priv->is_tv = true;
+ intel_encoder->needs_tv_clock = true;
}
if (response & SDVO_LVDS_MASK)
- intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+ sdvo_priv->is_lvds = true;
}
return ret;
@@ -1541,11 +1625,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
int num_modes;
/* set the bus switch and get the modes */
- num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
+ num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1554,11 +1639,11 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* which case we'll look there for the digital DDC data.
*/
if (num_modes == 0 &&
- intel_sdvo->analog_ddc_bus &&
+ sdvo_priv->analog_ddc_bus &&
!intel_analog_is_connected(connector->dev)) {
/* Switch to the analog ddc bus and try that
*/
- (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus);
+ (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus);
}
}
@@ -1630,43 +1715,52 @@ struct drm_display_mode sdvo_tv_modes[] = {
static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
+ uint8_t status;
+
/* Read the list of supported input resolutions for the selected TV
* format.
*/
- format_map = 1 << intel_sdvo->tv_format_index;
+ for (i = 0; i < TV_FORMAT_NUM; i++)
+ if (tv_format_names[i] == sdvo_priv->tv_format_name)
+ break;
+
+ format_map = (1 << i);
memcpy(&tv_res, &format_map,
- min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
+ sizeof(struct intel_sdvo_sdtv_resolution_request) >
+ sizeof(format_map) ? sizeof(format_map) :
+ sizeof(struct intel_sdvo_sdtv_resolution_request));
- if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
- return;
+ intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output);
- BUILD_BUG_ON(sizeof(tv_res) != 3);
- if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
- &tv_res, sizeof(tv_res)))
- return;
- if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ &tv_res, sizeof(tv_res));
+ status = intel_sdvo_read_response(intel_encoder, &reply, 3);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
return;
for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
if (reply & (1 << i)) {
struct drm_display_mode *nmode;
nmode = drm_mode_duplicate(connector->dev,
- &sdvo_tv_modes[i]);
+ &sdvo_tv_modes[i]);
if (nmode)
drm_mode_probed_add(connector, nmode);
}
+
}
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_display_mode *newmode;
/*
@@ -1674,7 +1768,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
+ intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
if (list_empty(&connector->probed_modes) == false)
goto end;
@@ -1693,9 +1787,8 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
end:
list_for_each_entry(newmode, &connector->probed_modes, head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
- intel_sdvo->sdvo_lvds_fixed_mode =
+ sdvo_priv->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
- intel_sdvo->is_lvds = true;
break;
}
}
@@ -1704,67 +1797,66 @@ end:
static int intel_sdvo_get_modes(struct drm_connector *connector)
{
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- if (IS_TV(intel_sdvo_connector))
+ if (IS_TV(sdvo_connector))
intel_sdvo_get_tv_modes(connector);
- else if (IS_LVDS(intel_sdvo_connector))
+ else if (IS_LVDS(sdvo_connector))
intel_sdvo_get_lvds_modes(connector);
else
intel_sdvo_get_ddc_modes(connector);
- return !list_empty(&connector->probed_modes);
+ if (list_empty(&connector->probed_modes))
+ return 0;
+ return 1;
}
-static void
-intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+static
+void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
{
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
struct drm_device *dev = connector->dev;
- if (intel_sdvo_connector->left)
- drm_property_destroy(dev, intel_sdvo_connector->left);
- if (intel_sdvo_connector->right)
- drm_property_destroy(dev, intel_sdvo_connector->right);
- if (intel_sdvo_connector->top)
- drm_property_destroy(dev, intel_sdvo_connector->top);
- if (intel_sdvo_connector->bottom)
- drm_property_destroy(dev, intel_sdvo_connector->bottom);
- if (intel_sdvo_connector->hpos)
- drm_property_destroy(dev, intel_sdvo_connector->hpos);
- if (intel_sdvo_connector->vpos)
- drm_property_destroy(dev, intel_sdvo_connector->vpos);
- if (intel_sdvo_connector->saturation)
- drm_property_destroy(dev, intel_sdvo_connector->saturation);
- if (intel_sdvo_connector->contrast)
- drm_property_destroy(dev, intel_sdvo_connector->contrast);
- if (intel_sdvo_connector->hue)
- drm_property_destroy(dev, intel_sdvo_connector->hue);
- if (intel_sdvo_connector->sharpness)
- drm_property_destroy(dev, intel_sdvo_connector->sharpness);
- if (intel_sdvo_connector->flicker_filter)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
- if (intel_sdvo_connector->flicker_filter_2d)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
- if (intel_sdvo_connector->flicker_filter_adaptive)
- drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
- if (intel_sdvo_connector->tv_luma_filter)
- drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
- if (intel_sdvo_connector->tv_chroma_filter)
- drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
- if (intel_sdvo_connector->dot_crawl)
- drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
- if (intel_sdvo_connector->brightness)
- drm_property_destroy(dev, intel_sdvo_connector->brightness);
+ if (IS_TV(sdvo_priv)) {
+ if (sdvo_priv->left_property)
+ drm_property_destroy(dev, sdvo_priv->left_property);
+ if (sdvo_priv->right_property)
+ drm_property_destroy(dev, sdvo_priv->right_property);
+ if (sdvo_priv->top_property)
+ drm_property_destroy(dev, sdvo_priv->top_property);
+ if (sdvo_priv->bottom_property)
+ drm_property_destroy(dev, sdvo_priv->bottom_property);
+ if (sdvo_priv->hpos_property)
+ drm_property_destroy(dev, sdvo_priv->hpos_property);
+ if (sdvo_priv->vpos_property)
+ drm_property_destroy(dev, sdvo_priv->vpos_property);
+ if (sdvo_priv->saturation_property)
+ drm_property_destroy(dev,
+ sdvo_priv->saturation_property);
+ if (sdvo_priv->contrast_property)
+ drm_property_destroy(dev,
+ sdvo_priv->contrast_property);
+ if (sdvo_priv->hue_property)
+ drm_property_destroy(dev, sdvo_priv->hue_property);
+ }
+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
+ if (sdvo_priv->brightness_property)
+ drm_property_destroy(dev,
+ sdvo_priv->brightness_property);
+ }
+ return;
}
static void intel_sdvo_destroy(struct drm_connector *connector)
{
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
- if (intel_sdvo_connector->tv_format)
+ if (sdvo_connector->tv_format_property)
drm_property_destroy(connector->dev,
- intel_sdvo_connector->tv_format);
+ sdvo_connector->tv_format_property);
intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
@@ -1778,118 +1870,132 @@ intel_sdvo_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
+ struct drm_crtc *crtc = encoder->crtc;
+ int ret = 0;
+ bool changed = false;
+ uint8_t cmd, status;
uint16_t temp_value;
- uint8_t cmd;
- int ret;
ret = drm_connector_property_set_value(connector, property, val);
- if (ret)
- return ret;
-
-#define CHECK_PROPERTY(name, NAME) \
- if (intel_sdvo_connector->name == property) { \
- if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
- if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
- cmd = SDVO_CMD_SET_##NAME; \
- intel_sdvo_connector->cur_##name = temp_value; \
- goto set_value; \
- }
+ if (ret < 0)
+ goto out;
- if (property == intel_sdvo_connector->tv_format) {
- if (val >= TV_FORMAT_NUM)
- return -EINVAL;
+ if (property == sdvo_connector->tv_format_property) {
+ if (val >= TV_FORMAT_NUM) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (sdvo_priv->tv_format_name ==
+ sdvo_connector->tv_format_supported[val])
+ goto out;
- if (intel_sdvo->tv_format_index ==
- intel_sdvo_connector->tv_format_supported[val])
- return 0;
+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val];
+ changed = true;
+ }
- intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val];
- goto done;
- } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
+ if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) {
+ cmd = 0;
temp_value = val;
- if (intel_sdvo_connector->left == property) {
+ if (sdvo_connector->left_property == property) {
drm_connector_property_set_value(connector,
- intel_sdvo_connector->right, val);
- if (intel_sdvo_connector->left_margin == temp_value)
- return 0;
-
- intel_sdvo_connector->left_margin = temp_value;
- intel_sdvo_connector->right_margin = temp_value;
- temp_value = intel_sdvo_connector->max_hscan -
- intel_sdvo_connector->left_margin;
+ sdvo_connector->right_property, val);
+ if (sdvo_connector->left_margin == temp_value)
+ goto out;
+
+ sdvo_connector->left_margin = temp_value;
+ sdvo_connector->right_margin = temp_value;
+ temp_value = sdvo_connector->max_hscan -
+ sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- goto set_value;
- } else if (intel_sdvo_connector->right == property) {
+ } else if (sdvo_connector->right_property == property) {
drm_connector_property_set_value(connector,
- intel_sdvo_connector->left, val);
- if (intel_sdvo_connector->right_margin == temp_value)
- return 0;
-
- intel_sdvo_connector->left_margin = temp_value;
- intel_sdvo_connector->right_margin = temp_value;
- temp_value = intel_sdvo_connector->max_hscan -
- intel_sdvo_connector->left_margin;
+ sdvo_connector->left_property, val);
+ if (sdvo_connector->right_margin == temp_value)
+ goto out;
+
+ sdvo_connector->left_margin = temp_value;
+ sdvo_connector->right_margin = temp_value;
+ temp_value = sdvo_connector->max_hscan -
+ sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
- goto set_value;
- } else if (intel_sdvo_connector->top == property) {
+ } else if (sdvo_connector->top_property == property) {
drm_connector_property_set_value(connector,
- intel_sdvo_connector->bottom, val);
- if (intel_sdvo_connector->top_margin == temp_value)
- return 0;
-
- intel_sdvo_connector->top_margin = temp_value;
- intel_sdvo_connector->bottom_margin = temp_value;
- temp_value = intel_sdvo_connector->max_vscan -
- intel_sdvo_connector->top_margin;
+ sdvo_connector->bottom_property, val);
+ if (sdvo_connector->top_margin == temp_value)
+ goto out;
+
+ sdvo_connector->top_margin = temp_value;
+ sdvo_connector->bottom_margin = temp_value;
+ temp_value = sdvo_connector->max_vscan -
+ sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- goto set_value;
- } else if (intel_sdvo_connector->bottom == property) {
+ } else if (sdvo_connector->bottom_property == property) {
drm_connector_property_set_value(connector,
- intel_sdvo_connector->top, val);
- if (intel_sdvo_connector->bottom_margin == temp_value)
- return 0;
-
- intel_sdvo_connector->top_margin = temp_value;
- intel_sdvo_connector->bottom_margin = temp_value;
- temp_value = intel_sdvo_connector->max_vscan -
- intel_sdvo_connector->top_margin;
+ sdvo_connector->top_property, val);
+ if (sdvo_connector->bottom_margin == temp_value)
+ goto out;
+ sdvo_connector->top_margin = temp_value;
+ sdvo_connector->bottom_margin = temp_value;
+ temp_value = sdvo_connector->max_vscan -
+ sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
- goto set_value;
+ } else if (sdvo_connector->hpos_property == property) {
+ if (sdvo_connector->cur_hpos == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_POSITION_H;
+ sdvo_connector->cur_hpos = temp_value;
+ } else if (sdvo_connector->vpos_property == property) {
+ if (sdvo_connector->cur_vpos == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_POSITION_V;
+ sdvo_connector->cur_vpos = temp_value;
+ } else if (sdvo_connector->saturation_property == property) {
+ if (sdvo_connector->cur_saturation == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_SATURATION;
+ sdvo_connector->cur_saturation = temp_value;
+ } else if (sdvo_connector->contrast_property == property) {
+ if (sdvo_connector->cur_contrast == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_CONTRAST;
+ sdvo_connector->cur_contrast = temp_value;
+ } else if (sdvo_connector->hue_property == property) {
+ if (sdvo_connector->cur_hue == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_HUE;
+ sdvo_connector->cur_hue = temp_value;
+ } else if (sdvo_connector->brightness_property == property) {
+ if (sdvo_connector->cur_brightness == temp_value)
+ goto out;
+
+ cmd = SDVO_CMD_SET_BRIGHTNESS;
+ sdvo_connector->cur_brightness = temp_value;
+ }
+ if (cmd) {
+ intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
+ status = intel_sdvo_read_response(intel_encoder,
+ NULL, 0);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO command \n");
+ return -EINVAL;
+ }
+ changed = true;
}
- CHECK_PROPERTY(hpos, HPOS)
- CHECK_PROPERTY(vpos, VPOS)
- CHECK_PROPERTY(saturation, SATURATION)
- CHECK_PROPERTY(contrast, CONTRAST)
- CHECK_PROPERTY(hue, HUE)
- CHECK_PROPERTY(brightness, BRIGHTNESS)
- CHECK_PROPERTY(sharpness, SHARPNESS)
- CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
- CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
- CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
- CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
- CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
- CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
}
-
- return -EINVAL; /* unknown property */
-
-set_value:
- if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2))
- return -EIO;
-
-
-done:
- if (encoder->crtc) {
- struct drm_crtc *crtc = encoder->crtc;
-
+ if (changed && crtc)
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
- crtc->y, crtc->fb);
- }
-
- return 0;
-#undef CHECK_PROPERTY
+ crtc->y, crtc->fb);
+out:
+ return ret;
}
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
@@ -1916,16 +2022,22 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
- if (intel_sdvo->analog_ddc_bus)
- intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
+ if (intel_encoder->i2c_bus)
+ intel_i2c_destroy(intel_encoder->i2c_bus);
+ if (intel_encoder->ddc_bus)
+ intel_i2c_destroy(intel_encoder->ddc_bus);
+ if (sdvo_priv->analog_ddc_bus)
+ intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
- if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+ if (sdvo_priv->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(encoder->dev,
- intel_sdvo->sdvo_lvds_fixed_mode);
+ sdvo_priv->sdvo_lvds_fixed_mode);
- intel_encoder_destroy(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
}
static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
@@ -1942,7 +2054,7 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
*/
static void
intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
- struct intel_sdvo *sdvo, u32 reg)
+ struct intel_sdvo_priv *sdvo, u32 reg)
{
struct sdvo_device_mapping *mapping;
@@ -1955,46 +2067,57 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
}
static bool
-intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
+intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device)
{
- return intel_sdvo_set_target_output(intel_sdvo,
- device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
- &intel_sdvo->is_hdmi, 1);
+ struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
+ uint8_t status;
+
+ if (device == 0)
+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
+ else
+ intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
+
+ intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
+ status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return false;
+ return true;
}
-static struct intel_sdvo *
-intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan)
+static struct intel_encoder *
+intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
{
struct drm_device *dev = chan->drm_dev;
struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder = NULL;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
- if (intel_sdvo->base.ddc_bus == &chan->adapter)
- return intel_sdvo;
+ intel_encoder = enc_to_intel_encoder(encoder);
+ if (intel_encoder->ddc_bus == &chan->adapter)
+ break;
}
-
- return NULL;
+ return intel_encoder;
}
static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msgs[], int num)
{
- struct intel_sdvo *intel_sdvo;
+ struct intel_encoder *intel_encoder;
+ struct intel_sdvo_priv *sdvo_priv;
struct i2c_algo_bit_data *algo_data;
const struct i2c_algorithm *algo;
algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
- intel_sdvo =
- intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
- (algo_data->data));
- if (intel_sdvo == NULL)
+ intel_encoder =
+ intel_sdvo_chan_to_intel_encoder(
+ (struct intel_i2c_chan *)(algo_data->data));
+ if (intel_encoder == NULL)
return -EINVAL;
- algo = intel_sdvo->base.i2c_bus->algo;
+ sdvo_priv = intel_encoder->dev_priv;
+ algo = intel_encoder->i2c_bus->algo;
- intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus);
+ intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus);
return algo->master_xfer(i2c_adap, msgs, num);
}
@@ -2039,9 +2162,27 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
return 0x72;
}
+static bool
+intel_sdvo_connector_alloc (struct intel_connector **ret)
+{
+ struct intel_connector *intel_connector;
+ struct intel_sdvo_connector *sdvo_connector;
+
+ *ret = kzalloc(sizeof(*intel_connector) +
+ sizeof(*sdvo_connector), GFP_KERNEL);
+ if (!*ret)
+ return false;
+
+ intel_connector = *ret;
+ sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
+ intel_connector->dev_priv = sdvo_connector;
+
+ return true;
+}
+
static void
-intel_sdvo_connector_init(struct drm_encoder *encoder,
- struct drm_connector *connector)
+intel_sdvo_connector_create (struct drm_encoder *encoder,
+ struct drm_connector *connector)
{
drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
connector->connector_type);
@@ -2057,470 +2198,582 @@ intel_sdvo_connector_init(struct drm_encoder *encoder,
}
static bool
-intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct intel_sdvo_connector *sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
- if (!intel_sdvo_connector)
+ if (!intel_sdvo_connector_alloc(&intel_connector))
return false;
+ sdvo_connector = intel_connector->dev_priv;
+
if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
} else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
}
- intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
- if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode)
- && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device)
- && intel_sdvo->is_hdmi) {
+ if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode)
+ && intel_sdvo_get_digital_encoding_mode(intel_encoder, device)
+ && sdvo_priv->is_hdmi) {
/* enable hdmi encoding mode if supported */
- intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
- intel_sdvo_set_colorimetry(intel_sdvo,
+ intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
+ intel_sdvo_set_colorimetry(intel_encoder,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT));
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
- intel_sdvo_connector_init(encoder, connector);
+ intel_sdvo_connector_create(encoder, connector);
return true;
}
static bool
-intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
+intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct intel_sdvo_connector *sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
- if (!intel_sdvo_connector)
- return false;
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
- intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ sdvo_connector = intel_connector->dev_priv;
- intel_sdvo->controlled_output |= type;
- intel_sdvo_connector->output_flag = type;
+ sdvo_priv->controlled_output |= type;
+ sdvo_connector->output_flag = type;
- intel_sdvo->is_tv = true;
- intel_sdvo->base.needs_tv_clock = true;
- intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+ sdvo_priv->is_tv = true;
+ intel_encoder->needs_tv_clock = true;
+ intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
- intel_sdvo_connector_init(encoder, connector);
+ intel_sdvo_connector_create(encoder, connector);
- if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
- goto err;
+ intel_sdvo_tv_create_property(connector, type);
- if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
- goto err;
+ intel_sdvo_create_enhance_property(connector);
return true;
-
-err:
- intel_sdvo_destroy_enhance_property(connector);
- kfree(intel_sdvo_connector);
- return false;
}
static bool
-intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
+intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct intel_sdvo_connector *sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
- if (!intel_sdvo_connector)
- return false;
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
- intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+ sdvo_connector = intel_connector->dev_priv;
if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
} else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT));
+ intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT);
- intel_sdvo_connector_init(encoder, connector);
+ intel_sdvo_connector_create(encoder, connector);
return true;
}
static bool
-intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device)
{
- struct drm_encoder *encoder = &intel_sdvo->base.enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
struct drm_connector *connector;
struct intel_connector *intel_connector;
- struct intel_sdvo_connector *intel_sdvo_connector;
+ struct intel_sdvo_connector *sdvo_connector;
- intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
- if (!intel_sdvo_connector)
- return false;
+ if (!intel_sdvo_connector_alloc(&intel_connector))
+ return false;
- intel_connector = &intel_sdvo_connector->base;
- connector = &intel_connector->base;
+ connector = &intel_connector->base;
encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+ sdvo_connector = intel_connector->dev_priv;
+
+ sdvo_priv->is_lvds = true;
if (device == 0) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0;
+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
} else if (device == 1) {
- intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
- intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1;
+ sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
- intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT));
-
- intel_sdvo_connector_init(encoder, connector);
- if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
- goto err;
+ intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- return true;
-
-err:
- intel_sdvo_destroy_enhance_property(connector);
- kfree(intel_sdvo_connector);
- return false;
+ intel_sdvo_connector_create(encoder, connector);
+ intel_sdvo_create_enhance_property(connector);
+ return true;
}
static bool
-intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
+intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
{
- intel_sdvo->is_tv = false;
- intel_sdvo->base.needs_tv_clock = false;
- intel_sdvo->is_lvds = false;
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+
+ sdvo_priv->is_tv = false;
+ intel_encoder->needs_tv_clock = false;
+ sdvo_priv->is_lvds = false;
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
if (flags & SDVO_OUTPUT_TMDS0)
- if (!intel_sdvo_dvi_init(intel_sdvo, 0))
+ if (!intel_sdvo_dvi_init(intel_encoder, 0))
return false;
if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
- if (!intel_sdvo_dvi_init(intel_sdvo, 1))
+ if (!intel_sdvo_dvi_init(intel_encoder, 1))
return false;
/* TV has no XXX1 function block */
if (flags & SDVO_OUTPUT_SVID0)
- if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0))
+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0))
return false;
if (flags & SDVO_OUTPUT_CVBS0)
- if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
+ if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0))
return false;
if (flags & SDVO_OUTPUT_RGB0)
- if (!intel_sdvo_analog_init(intel_sdvo, 0))
+ if (!intel_sdvo_analog_init(intel_encoder, 0))
return false;
if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
- if (!intel_sdvo_analog_init(intel_sdvo, 1))
+ if (!intel_sdvo_analog_init(intel_encoder, 1))
return false;
if (flags & SDVO_OUTPUT_LVDS0)
- if (!intel_sdvo_lvds_init(intel_sdvo, 0))
+ if (!intel_sdvo_lvds_init(intel_encoder, 0))
return false;
if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
- if (!intel_sdvo_lvds_init(intel_sdvo, 1))
+ if (!intel_sdvo_lvds_init(intel_encoder, 1))
return false;
if ((flags & SDVO_OUTPUT_MASK) == 0) {
unsigned char bytes[2];
- intel_sdvo->controlled_output = 0;
- memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
+ sdvo_priv->controlled_output = 0;
+ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
- SDVO_NAME(intel_sdvo),
+ SDVO_NAME(sdvo_priv),
bytes[0], bytes[1]);
return false;
}
- intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
return true;
}
-static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_connector *intel_sdvo_connector,
- int type)
+static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
struct intel_sdvo_tv_format format;
uint32_t format_map, i;
+ uint8_t status;
- if (!intel_sdvo_set_target_output(intel_sdvo, type))
- return false;
+ intel_sdvo_set_target_output(intel_encoder, type);
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
- &format, sizeof(format)))
- return false;
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &format, sizeof(format));
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ return;
- memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+ memcpy(&format_map, &format, sizeof(format) > sizeof(format_map) ?
+ sizeof(format_map) : sizeof(format));
if (format_map == 0)
- return false;
+ return;
- intel_sdvo_connector->format_supported_num = 0;
+ sdvo_connector->format_supported_num = 0;
for (i = 0 ; i < TV_FORMAT_NUM; i++)
- if (format_map & (1 << i))
- intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i;
+ if (format_map & (1 << i)) {
+ sdvo_connector->tv_format_supported
+ [sdvo_connector->format_supported_num++] =
+ tv_format_names[i];
+ }
- intel_sdvo_connector->tv_format =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "mode", intel_sdvo_connector->format_supported_num);
- if (!intel_sdvo_connector->tv_format)
- return false;
+ sdvo_connector->tv_format_property =
+ drm_property_create(
+ connector->dev, DRM_MODE_PROP_ENUM,
+ "mode", sdvo_connector->format_supported_num);
- for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
+ for (i = 0; i < sdvo_connector->format_supported_num; i++)
drm_property_add_enum(
- intel_sdvo_connector->tv_format, i,
- i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
+ sdvo_connector->tv_format_property, i,
+ i, sdvo_connector->tv_format_supported[i]);
- intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
- drm_connector_attach_property(&intel_sdvo_connector->base.base,
- intel_sdvo_connector->tv_format, 0);
- return true;
+ sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0];
+ drm_connector_attach_property(
+ connector, sdvo_connector->tv_format_property, 0);
}
-#define ENHANCEMENT(name, NAME) do { \
- if (enhancements.name) { \
- if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
- !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
- return false; \
- intel_sdvo_connector->max_##name = data_value[0]; \
- intel_sdvo_connector->cur_##name = response; \
- intel_sdvo_connector->name = \
- drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
- if (!intel_sdvo_connector->name) return false; \
- intel_sdvo_connector->name->values[0] = 0; \
- intel_sdvo_connector->name->values[1] = data_value[0]; \
- drm_connector_attach_property(connector, \
- intel_sdvo_connector->name, \
- intel_sdvo_connector->cur_##name); \
- DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
- data_value[0], data_value[1], response); \
- } \
-} while(0)
-
-static bool
-intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_connector *intel_sdvo_connector,
- struct intel_sdvo_enhancements_reply enhancements)
+static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
- struct drm_connector *connector = &intel_sdvo_connector->base.base;
+ struct drm_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
+ struct intel_sdvo_enhancements_reply sdvo_data;
+ struct drm_device *dev = connector->dev;
+ uint8_t status;
uint16_t response, data_value[2];
- /* when horizontal overscan is supported, Add the left/right property */
- if (enhancements.overscan_h) {
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_MAX_OVERSCAN_H,
- &data_value, 4))
- return false;
-
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_OVERSCAN_H,
- &response, 2))
- return false;
-
- intel_sdvo_connector->max_hscan = data_value[0];
- intel_sdvo_connector->left_margin = data_value[0] - response;
- intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
- intel_sdvo_connector->left =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "left_margin", 2);
- if (!intel_sdvo_connector->left)
- return false;
-
- intel_sdvo_connector->left->values[0] = 0;
- intel_sdvo_connector->left->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- intel_sdvo_connector->left,
- intel_sdvo_connector->left_margin);
-
- intel_sdvo_connector->right =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "right_margin", 2);
- if (!intel_sdvo_connector->right)
- return false;
-
- intel_sdvo_connector->right->values[0] = 0;
- intel_sdvo_connector->right->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- intel_sdvo_connector->right,
- intel_sdvo_connector->right_margin);
- DRM_DEBUG_KMS("h_overscan: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
+ intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder, &sdvo_data,
+ sizeof(sdvo_data));
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS(" incorrect response is returned\n");
+ return;
}
-
- if (enhancements.overscan_v) {
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_MAX_OVERSCAN_V,
- &data_value, 4))
- return false;
-
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_OVERSCAN_V,
- &response, 2))
- return false;
-
- intel_sdvo_connector->max_vscan = data_value[0];
- intel_sdvo_connector->top_margin = data_value[0] - response;
- intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
- intel_sdvo_connector->top =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "top_margin", 2);
- if (!intel_sdvo_connector->top)
- return false;
-
- intel_sdvo_connector->top->values[0] = 0;
- intel_sdvo_connector->top->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- intel_sdvo_connector->top,
- intel_sdvo_connector->top_margin);
-
- intel_sdvo_connector->bottom =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "bottom_margin", 2);
- if (!intel_sdvo_connector->bottom)
- return false;
-
- intel_sdvo_connector->bottom->values[0] = 0;
- intel_sdvo_connector->bottom->values[1] = data_value[0];
- drm_connector_attach_property(connector,
- intel_sdvo_connector->bottom,
- intel_sdvo_connector->bottom_margin);
- DRM_DEBUG_KMS("v_overscan: max %d, "
- "default %d, current %d\n",
- data_value[0], data_value[1], response);
+ response = *((uint16_t *)&sdvo_data);
+ if (!response) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return;
}
-
- ENHANCEMENT(hpos, HPOS);
- ENHANCEMENT(vpos, VPOS);
- ENHANCEMENT(saturation, SATURATION);
- ENHANCEMENT(contrast, CONTRAST);
- ENHANCEMENT(hue, HUE);
- ENHANCEMENT(sharpness, SHARPNESS);
- ENHANCEMENT(brightness, BRIGHTNESS);
- ENHANCEMENT(flicker_filter, FLICKER_FILTER);
- ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
- ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
- ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
- ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
-
- if (enhancements.dot_crawl) {
- if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
- return false;
-
- intel_sdvo_connector->max_dot_crawl = 1;
- intel_sdvo_connector->cur_dot_crawl = response & 0x1;
- intel_sdvo_connector->dot_crawl =
- drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
- if (!intel_sdvo_connector->dot_crawl)
- return false;
-
- intel_sdvo_connector->dot_crawl->values[0] = 0;
- intel_sdvo_connector->dot_crawl->values[1] = 1;
- drm_connector_attach_property(connector,
- intel_sdvo_connector->dot_crawl,
- intel_sdvo_connector->cur_dot_crawl);
- DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+ if (IS_TV(sdvo_priv)) {
+ /* when horizontal overscan is supported, Add the left/right
+ * property
+ */
+ if (sdvo_data.overscan_h) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO max "
+ "h_overscan\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
+ return;
+ }
+ sdvo_priv->max_hscan = data_value[0];
+ sdvo_priv->left_margin = data_value[0] - response;
+ sdvo_priv->right_margin = sdvo_priv->left_margin;
+ sdvo_priv->left_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "left_margin", 2);
+ sdvo_priv->left_property->values[0] = 0;
+ sdvo_priv->left_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->left_property,
+ sdvo_priv->left_margin);
+ sdvo_priv->right_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "right_margin", 2);
+ sdvo_priv->right_property->values[0] = 0;
+ sdvo_priv->right_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->right_property,
+ sdvo_priv->right_margin);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.overscan_v) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO max "
+ "v_overscan\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
+ return;
+ }
+ sdvo_priv->max_vscan = data_value[0];
+ sdvo_priv->top_margin = data_value[0] - response;
+ sdvo_priv->bottom_margin = sdvo_priv->top_margin;
+ sdvo_priv->top_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "top_margin", 2);
+ sdvo_priv->top_property->values[0] = 0;
+ sdvo_priv->top_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->top_property,
+ sdvo_priv->top_margin);
+ sdvo_priv->bottom_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "bottom_margin", 2);
+ sdvo_priv->bottom_property->values[0] = 0;
+ sdvo_priv->bottom_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->bottom_property,
+ sdvo_priv->bottom_margin);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.position_h) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_POSITION_H, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
+ return;
+ }
+ sdvo_priv->max_hpos = data_value[0];
+ sdvo_priv->cur_hpos = response;
+ sdvo_priv->hpos_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "hpos", 2);
+ sdvo_priv->hpos_property->values[0] = 0;
+ sdvo_priv->hpos_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->hpos_property,
+ sdvo_priv->cur_hpos);
+ DRM_DEBUG_KMS("h_position: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.position_v) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_POSITION_V, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
+ return;
+ }
+ sdvo_priv->max_vpos = data_value[0];
+ sdvo_priv->cur_vpos = response;
+ sdvo_priv->vpos_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "vpos", 2);
+ sdvo_priv->vpos_property->values[0] = 0;
+ sdvo_priv->vpos_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->vpos_property,
+ sdvo_priv->cur_vpos);
+ DRM_DEBUG_KMS("v_position: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.saturation) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_SATURATION, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
+ return;
+ }
+ sdvo_priv->max_saturation = data_value[0];
+ sdvo_priv->cur_saturation = response;
+ sdvo_priv->saturation_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "saturation", 2);
+ sdvo_priv->saturation_property->values[0] = 0;
+ sdvo_priv->saturation_property->values[1] =
+ data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->saturation_property,
+ sdvo_priv->cur_saturation);
+ DRM_DEBUG_KMS("saturation: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.contrast) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_CONTRAST, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
+ return;
+ }
+ sdvo_priv->max_contrast = data_value[0];
+ sdvo_priv->cur_contrast = response;
+ sdvo_priv->contrast_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "contrast", 2);
+ sdvo_priv->contrast_property->values[0] = 0;
+ sdvo_priv->contrast_property->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->contrast_property,
+ sdvo_priv->cur_contrast);
+ DRM_DEBUG_KMS("contrast: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+ if (sdvo_data.hue) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_HUE, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_HUE, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
+ return;
+ }
+ sdvo_priv->max_hue = data_value[0];
+ sdvo_priv->cur_hue = response;
+ sdvo_priv->hue_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "hue", 2);
+ sdvo_priv->hue_property->values[0] = 0;
+ sdvo_priv->hue_property->values[1] =
+ data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->hue_property,
+ sdvo_priv->cur_hue);
+ DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
}
-
- return true;
-}
-
-static bool
-intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_connector *intel_sdvo_connector,
- struct intel_sdvo_enhancements_reply enhancements)
-{
- struct drm_device *dev = intel_sdvo->base.enc.dev;
- struct drm_connector *connector = &intel_sdvo_connector->base.base;
- uint16_t response, data_value[2];
-
- ENHANCEMENT(brightness, BRIGHTNESS);
-
- return true;
-}
-#undef ENHANCEMENT
-
-static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_connector *intel_sdvo_connector)
-{
- union {
- struct intel_sdvo_enhancements_reply reply;
- uint16_t response;
- } enhancements;
-
- if (!intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
- &enhancements, sizeof(enhancements)))
- return false;
-
- if (enhancements.response == 0) {
- DRM_DEBUG_KMS("No enhancement is supported\n");
- return true;
+ if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) {
+ if (sdvo_data.brightness) {
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &data_value, 4);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
+ return;
+ }
+ intel_sdvo_write_cmd(intel_encoder,
+ SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
+ status = intel_sdvo_read_response(intel_encoder,
+ &response, 2);
+ if (status != SDVO_CMD_STATUS_SUCCESS) {
+ DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
+ return;
+ }
+ sdvo_priv->max_brightness = data_value[0];
+ sdvo_priv->cur_brightness = response;
+ sdvo_priv->brightness_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "brightness", 2);
+ sdvo_priv->brightness_property->values[0] = 0;
+ sdvo_priv->brightness_property->values[1] =
+ data_value[0];
+ drm_connector_attach_property(connector,
+ sdvo_priv->brightness_property,
+ sdvo_priv->cur_brightness);
+ DRM_DEBUG_KMS("brightness: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
}
-
- if (IS_TV(intel_sdvo_connector))
- return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
- else if(IS_LVDS(intel_sdvo_connector))
- return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
- else
- return true;
-
+ return;
}
bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
- struct intel_sdvo *intel_sdvo;
+ struct intel_sdvo_priv *sdvo_priv;
u8 ch[0x40];
int i;
u32 i2c_reg, ddc_reg, analog_ddc_reg;
- intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
- if (!intel_sdvo)
+ intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
+ if (!intel_encoder) {
return false;
+ }
- intel_sdvo->sdvo_reg = sdvo_reg;
+ sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1);
+ sdvo_priv->sdvo_reg = sdvo_reg;
- intel_encoder = &intel_sdvo->base;
+ intel_encoder->dev_priv = sdvo_priv;
intel_encoder->type = INTEL_OUTPUT_SDVO;
if (HAS_PCH_SPLIT(dev)) {
@@ -2542,14 +2795,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
if (!intel_encoder->i2c_bus)
goto err_inteloutput;
- intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
+ sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
/* Save the bit-banging i2c functionality for use by the DDC wrapper */
intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
- if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) {
+ if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err_i2c;
@@ -2559,16 +2812,17 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
/* setup the DDC bus. */
if (IS_SDVOB(sdvo_reg)) {
intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
- intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOB/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
- intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
+ sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
"SDVOC/VGA DDC BUS");
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
- if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
+
+ if (intel_encoder->ddc_bus == NULL)
goto err_i2c;
/* Wrap with our custom algo which switches to DDC mode */
@@ -2579,56 +2833,53 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
/* In default case sdvo lvds is false */
- if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
- goto err_enc;
+ intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
- if (intel_sdvo_output_setup(intel_sdvo,
- intel_sdvo->caps.output_flags) != true) {
+ if (intel_sdvo_output_setup(intel_encoder,
+ sdvo_priv->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
- goto err_enc;
+ goto err_i2c;
}
- intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
+ intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
- if (!intel_sdvo_set_target_input(intel_sdvo))
- goto err_enc;
+ intel_sdvo_set_target_input(intel_encoder, true, false);
+
+ intel_sdvo_get_input_pixel_clock_range(intel_encoder,
+ &sdvo_priv->pixel_clock_min,
+ &sdvo_priv->pixel_clock_max);
- if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
- &intel_sdvo->pixel_clock_min,
- &intel_sdvo->pixel_clock_max))
- goto err_enc;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"input 1: %c, input 2: %c, "
"output 1: %c, output 2: %c\n",
- SDVO_NAME(intel_sdvo),
- intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
- intel_sdvo->caps.device_rev_id,
- intel_sdvo->pixel_clock_min / 1000,
- intel_sdvo->pixel_clock_max / 1000,
- (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
- (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ SDVO_NAME(sdvo_priv),
+ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
+ sdvo_priv->caps.device_rev_id,
+ sdvo_priv->pixel_clock_min / 1000,
+ sdvo_priv->pixel_clock_max / 1000,
+ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
/* check currently supported outputs */
- intel_sdvo->caps.output_flags &
+ sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
- intel_sdvo->caps.output_flags &
+ sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+
return true;
-err_enc:
- drm_encoder_cleanup(&intel_encoder->enc);
err_i2c:
- if (intel_sdvo->analog_ddc_bus != NULL)
- intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
+ if (sdvo_priv->analog_ddc_bus != NULL)
+ intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
if (intel_encoder->ddc_bus != NULL)
intel_i2c_destroy(intel_encoder->ddc_bus);
if (intel_encoder->i2c_bus != NULL)
intel_i2c_destroy(intel_encoder->i2c_bus);
err_inteloutput:
- kfree(intel_sdvo);
+ kfree(intel_encoder);
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index a386b02..ba5cdf8 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -312,7 +312,7 @@ struct intel_sdvo_set_target_input_args {
# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
-/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+/** 5 bytes of bit flags for TV formats shared by all TV format functions */
struct intel_sdvo_tv_format {
unsigned int ntsc_m:1;
unsigned int ntsc_j:1;
@@ -596,32 +596,32 @@ struct intel_sdvo_enhancements_reply {
unsigned int overscan_h:1;
unsigned int overscan_v:1;
- unsigned int hpos:1;
- unsigned int vpos:1;
+ unsigned int position_h:1;
+ unsigned int position_v:1;
unsigned int sharpness:1;
unsigned int dot_crawl:1;
unsigned int dither:1;
- unsigned int tv_chroma_filter:1;
- unsigned int tv_luma_filter:1;
+ unsigned int max_tv_chroma_filter:1;
+ unsigned int max_tv_luma_filter:1;
} __attribute__((packed));
/* Picture enhancement limits below are dependent on the current TV format,
* and thus need to be queried and set after it.
*/
-#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
-#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
-#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
+#define SDVO_CMD_GET_MAX_FLICKER_FITER 0x4d
+#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER 0x7b
+#define SDVO_CMD_GET_MAX_2D_FLICKER_FITER 0x52
#define SDVO_CMD_GET_MAX_SATURATION 0x55
#define SDVO_CMD_GET_MAX_HUE 0x58
#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
-#define SDVO_CMD_GET_MAX_HPOS 0x67
-#define SDVO_CMD_GET_MAX_VPOS 0x6a
-#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
-#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
-#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
+#define SDVO_CMD_GET_MAX_POSITION_H 0x67
+#define SDVO_CMD_GET_MAX_POSITION_V 0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS_V 0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA 0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA 0x77
struct intel_sdvo_enhancement_limits_reply {
u16 max_value;
u16 default_value;
@@ -638,10 +638,10 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
-#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
-#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
-#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
-#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
+#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER 0x50
+#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER 0x51
+#define SDVO_CMD_GET_2D_FLICKER_FITER 0x53
+#define SDVO_CMD_SET_2D_FLICKER_FITER 0x54
#define SDVO_CMD_GET_SATURATION 0x56
#define SDVO_CMD_SET_SATURATION 0x57
#define SDVO_CMD_GET_HUE 0x59
@@ -654,16 +654,16 @@ struct intel_sdvo_enhancement_limits_reply {
#define SDVO_CMD_SET_OVERSCAN_H 0x63
#define SDVO_CMD_GET_OVERSCAN_V 0x65
#define SDVO_CMD_SET_OVERSCAN_V 0x66
-#define SDVO_CMD_GET_HPOS 0x68
-#define SDVO_CMD_SET_HPOS 0x69
-#define SDVO_CMD_GET_VPOS 0x6b
-#define SDVO_CMD_SET_VPOS 0x6c
+#define SDVO_CMD_GET_POSITION_H 0x68
+#define SDVO_CMD_SET_POSITION_H 0x69
+#define SDVO_CMD_GET_POSITION_V 0x6b
+#define SDVO_CMD_SET_POSITION_V 0x6c
#define SDVO_CMD_GET_SHARPNESS 0x6e
#define SDVO_CMD_SET_SHARPNESS 0x6f
-#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
-#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
-#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
-#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
+#define SDVO_CMD_GET_TV_CHROMA 0x75
+#define SDVO_CMD_SET_TV_CHROMA 0x76
+#define SDVO_CMD_GET_TV_LUMA 0x78
+#define SDVO_CMD_SET_TV_LUMA 0x79
struct intel_sdvo_enhancements_arg {
u16 value;
}__attribute__((packed));
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d2029ef..cc3726a 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -44,9 +44,7 @@ enum tv_margin {
};
/** Private structure for the integrated TV support */
-struct intel_tv {
- struct intel_encoder base;
-
+struct intel_tv_priv {
int type;
char *tv_format;
int margin[4];
@@ -898,11 +896,6 @@ static const struct tv_mode tv_modes[] = {
},
};
-static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
-{
- return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
-}
-
static void
intel_tv_dpms(struct drm_encoder *encoder, int mode)
{
@@ -936,17 +929,19 @@ intel_tv_mode_lookup (char *tv_format)
}
static const struct tv_mode *
-intel_tv_mode_find (struct intel_tv *intel_tv)
+intel_tv_mode_find (struct intel_encoder *intel_encoder)
{
- return intel_tv_mode_lookup(intel_tv->tv_format);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+
+ return intel_tv_mode_lookup(tv_priv->tv_format);
}
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
@@ -962,8 +957,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct drm_device *dev = encoder->dev;
struct drm_mode_config *drm_config = &dev->mode_config;
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder);
struct drm_encoder *other_encoder;
if (!tv_mode)
@@ -988,8 +983,9 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
u32 tv_ctl;
u32 hctl1, hctl2, hctl3;
u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
@@ -1005,7 +1001,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
tv_ctl = I915_READ(TV_CTL);
tv_ctl &= TV_CTL_SAVE;
- switch (intel_tv->type) {
+ switch (tv_priv->type) {
default:
case DRM_MODE_CONNECTOR_Unknown:
case DRM_MODE_CONNECTOR_Composite:
@@ -1158,11 +1154,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* Wait for vblank for the disable to take effect */
if (!IS_I9XX(dev))
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev);
I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
/* Wait for vblank for the disable to take effect. */
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev);
/* Filter ctl must be set before TV_WIN_SIZE */
I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
@@ -1172,12 +1168,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
else
ysize = 2*tv_mode->nbr_end + 1;
- xpos += intel_tv->margin[TV_MARGIN_LEFT];
- ypos += intel_tv->margin[TV_MARGIN_TOP];
- xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
- intel_tv->margin[TV_MARGIN_RIGHT]);
- ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
- intel_tv->margin[TV_MARGIN_BOTTOM]);
+ xpos += tv_priv->margin[TV_MARGIN_LEFT];
+ ypos += tv_priv->margin[TV_MARGIN_TOP];
+ xsize -= (tv_priv->margin[TV_MARGIN_LEFT] +
+ tv_priv->margin[TV_MARGIN_RIGHT]);
+ ysize -= (tv_priv->margin[TV_MARGIN_TOP] +
+ tv_priv->margin[TV_MARGIN_BOTTOM]);
I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
@@ -1226,12 +1222,11 @@ static const struct drm_display_mode reported_modes[] = {
* \return false if TV is disconnected.
*/
static int
-intel_tv_detect_type (struct intel_tv *intel_tv)
+intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
{
- struct drm_encoder *encoder = &intel_tv->base.enc;
+ struct drm_encoder *encoder = &intel_encoder->enc;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
unsigned long irqflags;
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
@@ -1268,11 +1263,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
DAC_C_0_7_V);
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev);
tv_dac = I915_READ(TV_DAC);
I915_WRITE(TV_DAC, save_tv_dac);
I915_WRITE(TV_CTL, save_tv_ctl);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ intel_wait_for_vblank(dev);
/*
* A B C
* 0 1 1 Composite
@@ -1309,11 +1304,12 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
static void intel_tv_find_better_format(struct drm_connector *connector)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int i;
- if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
+ if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
tv_mode->component_only)
return;
@@ -1321,12 +1317,12 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
tv_mode = tv_modes + i;
- if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
+ if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
tv_mode->component_only)
break;
}
- intel_tv->tv_format = tv_mode->name;
+ tv_priv->tv_format = tv_mode->name;
drm_connector_property_set_value(connector,
connector->dev->mode_config.tv_mode_property, i);
}
@@ -1340,31 +1336,31 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
static enum drm_connector_status
intel_tv_detect(struct drm_connector *connector)
{
+ struct drm_crtc *crtc;
struct drm_display_mode mode;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- int type;
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
+ int dpms_mode;
+ int type = tv_priv->type;
mode = reported_modes[0];
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
if (encoder->crtc && encoder->crtc->enabled) {
- type = intel_tv_detect_type(intel_tv);
+ type = intel_tv_detect_type(encoder->crtc, intel_encoder);
} else {
- struct drm_crtc *crtc;
- int dpms_mode;
-
- crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
+ crtc = intel_get_load_detect_pipe(intel_encoder, connector,
&mode, &dpms_mode);
if (crtc) {
- type = intel_tv_detect_type(intel_tv);
- intel_release_load_detect_pipe(&intel_tv->base, connector,
+ type = intel_tv_detect_type(crtc, intel_encoder);
+ intel_release_load_detect_pipe(intel_encoder, connector,
dpms_mode);
} else
type = -1;
}
- intel_tv->type = type;
+ tv_priv->type = type;
if (type < 0)
return connector_status_disconnected;
@@ -1395,8 +1391,8 @@ intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
@@ -1421,8 +1417,8 @@ intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
int j, count = 0;
u64 tmp;
@@ -1487,7 +1483,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
{
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder = intel_attached_encoder(connector);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
struct drm_crtc *crtc = encoder->crtc;
int ret = 0;
bool changed = false;
@@ -1497,30 +1494,30 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
goto out;
if (property == dev->mode_config.tv_left_margin_property &&
- intel_tv->margin[TV_MARGIN_LEFT] != val) {
- intel_tv->margin[TV_MARGIN_LEFT] = val;
+ tv_priv->margin[TV_MARGIN_LEFT] != val) {
+ tv_priv->margin[TV_MARGIN_LEFT] = val;
changed = true;
} else if (property == dev->mode_config.tv_right_margin_property &&
- intel_tv->margin[TV_MARGIN_RIGHT] != val) {
- intel_tv->margin[TV_MARGIN_RIGHT] = val;
+ tv_priv->margin[TV_MARGIN_RIGHT] != val) {
+ tv_priv->margin[TV_MARGIN_RIGHT] = val;
changed = true;
} else if (property == dev->mode_config.tv_top_margin_property &&
- intel_tv->margin[TV_MARGIN_TOP] != val) {
- intel_tv->margin[TV_MARGIN_TOP] = val;
+ tv_priv->margin[TV_MARGIN_TOP] != val) {
+ tv_priv->margin[TV_MARGIN_TOP] = val;
changed = true;
} else if (property == dev->mode_config.tv_bottom_margin_property &&
- intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
- intel_tv->margin[TV_MARGIN_BOTTOM] = val;
+ tv_priv->margin[TV_MARGIN_BOTTOM] != val) {
+ tv_priv->margin[TV_MARGIN_BOTTOM] = val;
changed = true;
} else if (property == dev->mode_config.tv_mode_property) {
if (val >= ARRAY_SIZE(tv_modes)) {
ret = -EINVAL;
goto out;
}
- if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
+ if (!strcmp(tv_priv->tv_format, tv_modes[val].name))
goto out;
- intel_tv->tv_format = tv_modes[val].name;
+ tv_priv->tv_format = tv_modes[val].name;
changed = true;
} else {
ret = -EINVAL;
@@ -1556,8 +1553,16 @@ static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs =
.best_encoder = intel_attached_encoder,
};
+static void intel_tv_enc_destroy(struct drm_encoder *encoder)
+{
+ struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
- .destroy = intel_encoder_destroy,
+ .destroy = intel_tv_enc_destroy,
};
/*
@@ -1601,9 +1606,9 @@ intel_tv_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
- struct intel_tv *intel_tv;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
+ struct intel_tv_priv *tv_priv;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
char **tv_format_names;
int i, initial_mode = 0;
@@ -1642,18 +1647,18 @@ intel_tv_init(struct drm_device *dev)
(tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
return;
- intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
- if (!intel_tv) {
+ intel_encoder = kzalloc(sizeof(struct intel_encoder) +
+ sizeof(struct intel_tv_priv), GFP_KERNEL);
+ if (!intel_encoder) {
return;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_tv);
+ kfree(intel_encoder);
return;
}
- intel_encoder = &intel_tv->base;
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
@@ -1663,20 +1668,22 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_ENCODER_TVDAC);
drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
+ tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
- intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
+ intel_encoder->dev_priv = tv_priv;
+ tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
- intel_tv->margin[TV_MARGIN_LEFT] = 54;
- intel_tv->margin[TV_MARGIN_TOP] = 36;
- intel_tv->margin[TV_MARGIN_RIGHT] = 46;
- intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
+ tv_priv->margin[TV_MARGIN_LEFT] = 54;
+ tv_priv->margin[TV_MARGIN_TOP] = 36;
+ tv_priv->margin[TV_MARGIN_RIGHT] = 46;
+ tv_priv->margin[TV_MARGIN_BOTTOM] = 37;
- intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
+ tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
@@ -1696,16 +1703,16 @@ intel_tv_init(struct drm_device *dev)
initial_mode);
drm_connector_attach_property(connector,
dev->mode_config.tv_left_margin_property,
- intel_tv->margin[TV_MARGIN_LEFT]);
+ tv_priv->margin[TV_MARGIN_LEFT]);
drm_connector_attach_property(connector,
dev->mode_config.tv_top_margin_property,
- intel_tv->margin[TV_MARGIN_TOP]);
+ tv_priv->margin[TV_MARGIN_TOP]);
drm_connector_attach_property(connector,
dev->mode_config.tv_right_margin_property,
- intel_tv->margin[TV_MARGIN_RIGHT]);
+ tv_priv->margin[TV_MARGIN_RIGHT]);
drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
- intel_tv->margin[TV_MARGIN_BOTTOM]);
+ tv_priv->margin[TV_MARGIN_BOTTOM]);
out:
drm_sysfs_connector_add(connector);
}
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index c908c5f..5808731 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -28,7 +28,7 @@ struct evdev {
int minor;
struct input_handle handle;
wait_queue_head_t wait;
- struct evdev_client *grab;
+ struct evdev_client __rcu *grab;
struct list_head client_list;
spinlock_t client_lock; /* protects client_list */
struct mutex mutex;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 29e850a..1318ee0 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -127,7 +127,10 @@ static void handle_tx(struct vhost_net *net)
size_t len, total_len = 0;
int err, wmem;
size_t hdr_size;
- struct socket *sock = rcu_dereference(vq->private_data);
+ struct socket *sock;
+
+ sock = rcu_dereference_check(vq->private_data,
+ lockdep_is_held(&vq->mutex));
if (!sock)
return;
@@ -582,7 +585,10 @@ static void vhost_net_disable_vq(struct vhost_net *n,
static void vhost_net_enable_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
- struct socket *sock = vq->private_data;
+ struct socket *sock;
+
+ sock = rcu_dereference_protected(vq->private_data,
+ lockdep_is_held(&vq->mutex));
if (!sock)
return;
if (vq == n->vqs + VHOST_NET_VQ_TX) {
@@ -598,7 +604,8 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
struct socket *sock;
mutex_lock(&vq->mutex);
- sock = vq->private_data;
+ sock = rcu_dereference_protected(vq->private_data,
+ lockdep_is_held(&vq->mutex));
vhost_net_disable_vq(n, vq);
rcu_assign_pointer(vq->private_data, NULL);
mutex_unlock(&vq->mutex);
@@ -736,7 +743,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
/* start polling new socket */
- oldsock = vq->private_data;
+ oldsock = rcu_dereference_protected(vq->private_data,
+ lockdep_is_held(&vq->mutex));
if (sock != oldsock) {
vhost_net_disable_vq(n, vq);
rcu_assign_pointer(vq->private_data, sock);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e05557d..b5c4947 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -284,7 +284,7 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
vhost_dev_cleanup(dev);
memory->nregions = 0;
- dev->memory = memory;
+ RCU_INIT_POINTER(dev->memory, memory);
return 0;
}
@@ -316,8 +316,9 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
fput(dev->log_file);
dev->log_file = NULL;
/* No one will access memory at this point */
- kfree(dev->memory);
- dev->memory = NULL;
+ kfree(rcu_dereference_protected(dev->memory,
+ lockdep_is_held(&dev->mutex)));
+ RCU_INIT_POINTER(dev->memory, NULL);
if (dev->mm)
mmput(dev->mm);
dev->mm = NULL;
@@ -401,14 +402,22 @@ static int vq_access_ok(unsigned int num,
/* Caller should have device mutex but not vq mutex */
int vhost_log_access_ok(struct vhost_dev *dev)
{
- return memory_access_ok(dev, dev->memory, 1);
+ struct vhost_memory *mp;
+
+ mp = rcu_dereference_protected(dev->memory,
+ lockdep_is_held(&dev->mutex));
+ return memory_access_ok(dev, mp, 1);
}
/* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */
static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
{
- return vq_memory_access_ok(log_base, vq->dev->memory,
+ struct vhost_memory *mp;
+
+ mp = rcu_dereference_protected(vq->dev->memory,
+ lockdep_is_held(&vq->mutex));
+ return vq_memory_access_ok(log_base, mp,
vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
(!vq->log_used || log_access_ok(log_base, vq->log_addr,
sizeof *vq->used +
@@ -448,7 +457,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
kfree(newmem);
return -EFAULT;
}
- oldmem = d->memory;
+ oldmem = rcu_dereference_protected(d->memory,
+ lockdep_is_held(&d->mutex));
rcu_assign_pointer(d->memory, newmem);
synchronize_rcu();
kfree(oldmem);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index afd7729..af3c11d 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -106,7 +106,7 @@ struct vhost_virtqueue {
* vhost_work execution acts instead of rcu_read_lock() and the end of
* vhost_work execution acts instead of rcu_read_lock().
* Writers use virtqueue mutex. */
- void *private_data;
+ void __rcu *private_data;
/* Log write descriptors */
void __user *log_base;
struct vhost_log log[VHOST_NET_MAX_SG];
@@ -116,7 +116,7 @@ struct vhost_dev {
/* Readers use RCU to access memory table pointer
* log base pointer and features.
* Writers use mutex below.*/
- struct vhost_memory *memory;
+ struct vhost_memory __rcu *memory;
struct mm_struct *mm;
struct mutex mutex;
unsigned acked_features;
@@ -173,7 +173,11 @@ enum {
static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
{
- unsigned acked_features = rcu_dereference(dev->acked_features);
+ unsigned acked_features;
+
+ acked_features =
+ rcu_dereference_index_check(dev->acked_features,
+ lockdep_is_held(&dev->mutex));
return acked_features & (1 << bit);
}
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 4317f17..6af1c00 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -775,7 +775,6 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
start * sects_per_block,
nblocks * sects_per_block,
GFP_NOFS,
- BLKDEV_IFL_WAIT |
BLKDEV_IFL_BARRIER);
if (ret < 0)
return ret;
@@ -786,8 +785,7 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
ret = blkdev_issue_discard(nilfs->ns_bdev,
start * sects_per_block,
nblocks * sects_per_block,
- GFP_NOFS,
- BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+ GFP_NOFS, BLKDEV_IFL_BARRIER);
return ret;
}
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed3e92e..3cb7d04 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -75,7 +75,7 @@ struct cgroup_subsys_state {
unsigned long flags;
/* ID for this css, if possible */
- struct css_id *id;
+ struct css_id __rcu *id;
};
/* bits in struct cgroup_subsys_state flags field */
@@ -205,7 +205,7 @@ struct cgroup {
struct list_head children; /* my children */
struct cgroup *parent; /* my parent */
- struct dentry *dentry; /* cgroup fs entry, RCU protected */
+ struct dentry __rcu *dentry; /* cgroup fs entry, RCU protected */
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c1a62c5..320d6c9 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -16,7 +16,11 @@
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3)))
+#ifdef CONFIG_SPARSE_RCU_POINTER
+# define __rcu __attribute__((noderef, address_space(4)))
+#else
# define __rcu
+#endif
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
#else
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 4d2c395..4aaeab3 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -84,7 +84,7 @@ struct thread_group_cred {
atomic_t usage;
pid_t tgid; /* thread group process ID */
spinlock_t lock;
- struct key *session_keyring; /* keyring inherited over fork */
+ struct key __rcu *session_keyring; /* keyring inherited over fork */
struct key *process_keyring; /* keyring private to this process */
struct rcu_head rcu; /* RCU deletion hook */
};
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index f59ed29..133c0ba 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -31,7 +31,7 @@ struct embedded_fd_set {
struct fdtable {
unsigned int max_fds;
- struct file ** fd; /* current fd array */
+ struct file __rcu **fd; /* current fd array */
fd_set *close_on_exec;
fd_set *open_fds;
struct rcu_head rcu;
@@ -46,7 +46,7 @@ struct files_struct {
* read mostly part
*/
atomic_t count;
- struct fdtable *fdt;
+ struct fdtable __rcu *fdt;
struct fdtable fdtab;
/*
* written part on a separate cache line in SMP
@@ -55,7 +55,7 @@ struct files_struct {
int next_fd;
struct embedded_fd_set close_on_exec_init;
struct embedded_fd_set open_fds_init;
- struct file * fd_array[NR_OPEN_DEFAULT];
+ struct file __rcu * fd_array[NR_OPEN_DEFAULT];
};
#define rcu_dereference_check_fdtable(files, fdtfd) \
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 76041b6..aa3dc8d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1380,7 +1380,7 @@ struct super_block {
* Saved mount options for lazy filesystems using
* generic_show_options()
*/
- char *s_options;
+ char __rcu *s_options;
};
extern struct timespec current_fs_time(struct super_block *sb);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 5f2f4c4..af3f06b 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -129,8 +129,8 @@ struct blk_scsi_cmd_filter {
struct disk_part_tbl {
struct rcu_head rcu_head;
int len;
- struct hd_struct *last_lookup;
- struct hd_struct *part[];
+ struct hd_struct __rcu *last_lookup;
+ struct hd_struct __rcu *part[];
};
struct gendisk {
@@ -149,7 +149,7 @@ struct gendisk {
* non-critical accesses use RCU. Always access through
* helpers.
*/
- struct disk_part_tbl *part_tbl;
+ struct disk_part_tbl __rcu *part_tbl;
struct hd_struct part0;
const struct block_device_operations *fops;
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index d5b3876..1f4517d 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -139,7 +139,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
#endif
#if defined(CONFIG_NO_HZ)
-#if defined(CONFIG_TINY_RCU)
+#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
extern void rcu_enter_nohz(void);
extern void rcu_exit_nohz(void);
diff --git a/include/linux/idr.h b/include/linux/idr.h
index e968db7..cdb715e 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -50,14 +50,14 @@
struct idr_layer {
unsigned long bitmap; /* A zero bit means "space here" */
- struct idr_layer *ary[1<mm == mm
* new_owner->alloc_lock is held
*/
- struct task_struct *owner;
+ struct task_struct __rcu *owner;
#endif
#ifdef CONFIG_PROC_FS
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 508f8cf..d0edf7d 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -185,7 +185,7 @@ struct nfs_inode {
struct nfs4_cached_acl *nfs4_acl;
/* NFSv4 state */
struct list_head open_states;
- struct nfs_delegation *delegation;
+ struct nfs_delegation __rcu *delegation;
fmode_t delegation_state;
struct rw_semaphore rwsem;
#endif /* CONFIG_NFS_V4*/
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index b2f1a4d..2026f9e 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -49,28 +49,28 @@
struct notifier_block {
int (*notifier_call)(struct notifier_block *, unsigned long, void *);
- struct notifier_block *next;
+ struct notifier_block __rcu *next;
int priority;
};
struct atomic_notifier_head {
spinlock_t lock;
- struct notifier_block *head;
+ struct notifier_block __rcu *head;
};
struct blocking_notifier_head {
struct rw_semaphore rwsem;
- struct notifier_block *head;
+ struct notifier_block __rcu *head;
};
struct raw_notifier_head {
- struct notifier_block *head;
+ struct notifier_block __rcu *head;
};
struct srcu_notifier_head {
struct mutex mutex;
struct srcu_struct srcu;
- struct notifier_block *head;
+ struct notifier_block __rcu *head;
};
#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 634b8e6..a39cbed 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -47,6 +47,8 @@ static inline void *radix_tree_indirect_to_ptr(void *ptr)
{
return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
}
+#define radix_tree_indirect_to_ptr(ptr) \
+ radix_tree_indirect_to_ptr((void __force *)(ptr))
static inline int radix_tree_is_indirect_ptr(void *ptr)
{
@@ -61,7 +63,7 @@ static inline int radix_tree_is_indirect_ptr(void *ptr)
struct radix_tree_root {
unsigned int height;
gfp_t gfp_mask;
- struct radix_tree_node *rnode;
+ struct radix_tree_node __rcu *rnode;
};
#define RADIX_TREE_INIT(mask) { \
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4ec3b38..f31ef61 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -10,6 +10,21 @@
#include
/*
+ * Why is there no list_empty_rcu()? Because list_empty() serves this
+ * purpose. The list_empty() function fetches the RCU-protected pointer
+ * and compares it to the address of the list head, but neither dereferences
+ * this pointer itself nor provides this pointer to the caller. Therefore,
+ * it is not necessary to use rcu_dereference(), so that list_empty() can
+ * be used anywhere you would want to use a list_empty_rcu().
+ */
+
+/*
+ * return the ->next pointer of a list_head in an rcu safe
+ * way, we must not access it directly
+ */
+#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
+
+/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
@@ -20,7 +35,7 @@ static inline void __list_add_rcu(struct list_head *new,
{
new->next = next;
new->prev = prev;
- rcu_assign_pointer(prev->next, new);
+ rcu_assign_pointer(list_next_rcu(prev), new);
next->prev = new;
}
@@ -138,7 +153,7 @@ static inline void list_replace_rcu(struct list_head *old,
{
new->next = old->next;
new->prev = old->prev;
- rcu_assign_pointer(new->prev->next, new);
+ rcu_assign_pointer(list_next_rcu(new->prev), new);
new->next->prev = new;
old->prev = LIST_POISON2;
}
@@ -193,7 +208,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
*/
last->next = at;
- rcu_assign_pointer(head->next, first);
+ rcu_assign_pointer(list_next_rcu(head), first);
first->prev = head;
at->prev = last;
}
@@ -208,7 +223,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_entry_rcu(ptr, type, member) \
- container_of(rcu_dereference_raw(ptr), type, member)
+ ({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \
+ container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
+ })
/**
* list_first_entry_rcu - get the first element from a list
@@ -225,9 +242,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
list_entry_rcu((ptr)->next, type, member)
#define __list_for_each_rcu(pos, head) \
- for (pos = rcu_dereference_raw((head)->next); \
+ for (pos = rcu_dereference_raw(list_next_rcu(head)); \
pos != (head); \
- pos = rcu_dereference_raw(pos->next))
+ pos = rcu_dereference_raw(list_next_rcu((pos)))
/**
* list_for_each_entry_rcu - iterate over rcu list of given type
@@ -257,9 +274,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_continue_rcu(pos, head) \
- for ((pos) = rcu_dereference_raw((pos)->next); \
+ for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \
prefetch((pos)->next), (pos) != (head); \
- (pos) = rcu_dereference_raw((pos)->next))
+ (pos) = rcu_dereference_raw(list_next_rcu(pos)))
/**
* list_for_each_entry_continue_rcu - continue iteration over list of given type
@@ -314,12 +331,19 @@ static inline void hlist_replace_rcu(struct hlist_node *old,
new->next = next;
new->pprev = old->pprev;
- rcu_assign_pointer(*new->pprev, new);
+ rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
if (next)
new->next->pprev = &new->next;
old->pprev = LIST_POISON2;
}
+/*
+ * return the first or the next element in an RCU protected hlist
+ */
+#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first)))
+#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next)))
+#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev)))
+
/**
* hlist_add_head_rcu
* @n: the element to add to the hash list.
@@ -346,7 +370,7 @@ static inline void hlist_add_head_rcu(struct hlist_node *n,
n->next = first;
n->pprev = &h->first;
- rcu_assign_pointer(h->first, n);
+ rcu_assign_pointer(hlist_first_rcu(h), n);
if (first)
first->pprev = &n->next;
}
@@ -374,7 +398,7 @@ static inline void hlist_add_before_rcu(struct hlist_node *n,
{
n->pprev = next->pprev;
n->next = next;
- rcu_assign_pointer(*(n->pprev), n);
+ rcu_assign_pointer(hlist_pprev_rcu(n), n);
next->pprev = &n->next;
}
@@ -401,15 +425,15 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
{
n->next = prev->next;
n->pprev = &prev->next;
- rcu_assign_pointer(prev->next, n);
+ rcu_assign_pointer(hlist_next_rcu(prev), n);
if (n->next)
n->next->pprev = &n->next;
}
-#define __hlist_for_each_rcu(pos, head) \
- for (pos = rcu_dereference((head)->first); \
- pos && ({ prefetch(pos->next); 1; }); \
- pos = rcu_dereference(pos->next))
+#define __hlist_for_each_rcu(pos, head) \
+ for (pos = rcu_dereference(hlist_first_rcu(head)); \
+ pos && ({ prefetch(pos->next); 1; }); \
+ pos = rcu_dereference(hlist_next_rcu(pos)))
/**
* hlist_for_each_entry_rcu - iterate over rcu list of given type
@@ -422,11 +446,11 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
* as long as the traversal is guarded by rcu_read_lock().
*/
-#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
- for (pos = rcu_dereference_raw((head)->first); \
+#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
+ for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \
pos && ({ prefetch(pos->next); 1; }) && \
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
- pos = rcu_dereference_raw(pos->next))
+ pos = rcu_dereference_raw(hlist_next_rcu(pos)))
/**
* hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index b70ffe5..2ae1371 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -37,6 +37,12 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
}
}
+#define hlist_nulls_first_rcu(head) \
+ (*((struct hlist_nulls_node __rcu __force **)&(head)->first))
+
+#define hlist_nulls_next_rcu(node) \
+ (*((struct hlist_nulls_node __rcu __force **)&(node)->next))
+
/**
* hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
@@ -88,7 +94,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
n->next = first;
n->pprev = &h->first;
- rcu_assign_pointer(h->first, n);
+ rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
if (!is_a_nulls(first))
first->pprev = &n->next;
}
@@ -100,11 +106,11 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
* @member: the name of the hlist_nulls_node within the struct.
*
*/
-#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
- for (pos = rcu_dereference_raw((head)->first); \
- (!is_a_nulls(pos)) && \
+#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
+ for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
+ (!is_a_nulls(pos)) && \
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
- pos = rcu_dereference_raw(pos->next))
+ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
#endif
#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 9fbc54a..89414d6 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -41,11 +41,15 @@
#include
#include
#include
+#include
#ifdef CONFIG_RCU_TORTURE_TEST
extern int rcutorture_runnable; /* for sysctl */
#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
+#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
+#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
+
/**
* struct rcu_head - callback structure for use with RCU
* @next: next update requests in a list
@@ -57,29 +61,94 @@ struct rcu_head {
};
/* Exported common interfaces */
-extern void rcu_barrier(void);
+extern void call_rcu_sched(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu));
+extern void synchronize_sched(void);
extern void rcu_barrier_bh(void);
extern void rcu_barrier_sched(void);
extern void synchronize_sched_expedited(void);
extern int sched_expedited_torture_stats(char *page);
+static inline void __rcu_read_lock_bh(void)
+{
+ local_bh_disable();
+}
+
+static inline void __rcu_read_unlock_bh(void)
+{
+ local_bh_enable();
+}
+
+#ifdef CONFIG_PREEMPT_RCU
+
+extern void __rcu_read_lock(void);
+extern void __rcu_read_unlock(void);
+void synchronize_rcu(void);
+
+/*
+ * Defined as a macro as it is a very low level header included from
+ * areas that don't even know about current. This gives the rcu_read_lock()
+ * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
+ * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
+ */
+#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+
+static inline void __rcu_read_lock(void)
+{
+ preempt_disable();
+}
+
+static inline void __rcu_read_unlock(void)
+{
+ preempt_enable();
+}
+
+static inline void synchronize_rcu(void)
+{
+ synchronize_sched();
+}
+
+static inline int rcu_preempt_depth(void)
+{
+ return 0;
+}
+
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
/* Internal to kernel */
extern void rcu_init(void);
+extern void rcu_sched_qs(int cpu);
+extern void rcu_bh_qs(int cpu);
+extern void rcu_check_callbacks(int cpu, int user);
+struct notifier_block;
+
+#ifdef CONFIG_NO_HZ
+
+extern void rcu_enter_nohz(void);
+extern void rcu_exit_nohz(void);
+
+#else /* #ifdef CONFIG_NO_HZ */
+
+static inline void rcu_enter_nohz(void)
+{
+}
+
+static inline void rcu_exit_nohz(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_NO_HZ */
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
#include
-#elif defined(CONFIG_TINY_RCU)
+#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
#include
#else
#error "Unknown RCU implementation specified to kernel configuration"
#endif
-#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
-#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
-#define INIT_RCU_HEAD(ptr) do { \
- (ptr)->next = NULL; (ptr)->func = NULL; \
-} while (0)
-
/*
* init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
* initialization and destruction of rcu_head on the stack. rcu_head structures
@@ -120,14 +189,15 @@ extern struct lockdep_map rcu_sched_lock_map;
extern int debug_lockdep_rcu_enabled(void);
/**
- * rcu_read_lock_held - might we be in RCU read-side critical section?
+ * rcu_read_lock_held() - might we be in RCU read-side critical section?
*
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
* read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
* this assumes we are in an RCU read-side critical section unless it can
- * prove otherwise.
+ * prove otherwise. This is useful for debug checks in functions that
+ * require that they be called within an RCU read-side critical section.
*
- * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
+ * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled.
*/
static inline int rcu_read_lock_held(void)
@@ -144,14 +214,16 @@ static inline int rcu_read_lock_held(void)
extern int rcu_read_lock_bh_held(void);
/**
- * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
+ * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
*
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
* RCU-sched read-side critical section. In absence of
* CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
* critical section unless it can prove otherwise. Note that disabling
* of preemption (including disabling irqs) counts as an RCU-sched
- * read-side critical section.
+ * read-side critical section. This is useful for debug checks in functions
+ * that required that they be called within an RCU-sched read-side
+ * critical section.
*
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot
* and while lockdep is disabled.
@@ -211,7 +283,11 @@ static inline int rcu_read_lock_sched_held(void)
extern int rcu_my_thread_group_empty(void);
-#define __do_rcu_dereference_check(c) \
+/**
+ * rcu_lockdep_assert - emit lockdep splat if specified condition not met
+ * @c: condition to check
+ */
+#define rcu_lockdep_assert(c) \
do { \
static bool __warned; \
if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
@@ -220,41 +296,155 @@ extern int rcu_my_thread_group_empty(void);
} \
} while (0)
+#else /* #ifdef CONFIG_PROVE_RCU */
+
+#define rcu_lockdep_assert(c) do { } while (0)
+
+#endif /* #else #ifdef CONFIG_PROVE_RCU */
+
+/*
+ * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
+ * and rcu_assign_pointer(). Some of these could be folded into their
+ * callers, but they are left separate in order to ease introduction of
+ * multiple flavors of pointers to match the multiple flavors of RCU
+ * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
+ * the future.
+ */
+#define __rcu_access_pointer(p, space) \
+ ({ \
+ typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
+ (void) (((typeof (*p) space *)p) == p); \
+ ((typeof(*p) __force __kernel *)(_________p1)); \
+ })
+#define __rcu_dereference_check(p, c, space) \
+ ({ \
+ typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
+ rcu_lockdep_assert(c); \
+ (void) (((typeof (*p) space *)p) == p); \
+ smp_read_barrier_depends(); \
+ ((typeof(*p) __force __kernel *)(_________p1)); \
+ })
+#define __rcu_dereference_protected(p, c, space) \
+ ({ \
+ rcu_lockdep_assert(c); \
+ (void) (((typeof (*p) space *)p) == p); \
+ ((typeof(*p) __force __kernel *)(p)); \
+ })
+
+#define __rcu_dereference_index_check(p, c) \
+ ({ \
+ typeof(p) _________p1 = ACCESS_ONCE(p); \
+ rcu_lockdep_assert(c); \
+ smp_read_barrier_depends(); \
+ (_________p1); \
+ })
+#define __rcu_assign_pointer(p, v, space) \
+ ({ \
+ if (!__builtin_constant_p(v) || \
+ ((v) != NULL)) \
+ smp_wmb(); \
+ (p) = (typeof(*v) __force space *)(v); \
+ })
+
+
/**
- * rcu_dereference_check - rcu_dereference with debug checking
+ * rcu_access_pointer() - fetch RCU pointer with no dereferencing
+ * @p: The pointer to read
+ *
+ * Return the value of the specified RCU-protected pointer, but omit the
+ * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
+ * when the value of this pointer is accessed, but the pointer is not
+ * dereferenced, for example, when testing an RCU-protected pointer against
+ * NULL. Although rcu_access_pointer() may also be used in cases where
+ * update-side locks prevent the value of the pointer from changing, you
+ * should instead use rcu_dereference_protected() for this use case.
+ */
+#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
+
+/**
+ * rcu_dereference_check() - rcu_dereference with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Do an rcu_dereference(), but check that the conditions under which the
- * dereference will take place are correct. Typically the conditions indicate
- * the various locking conditions that should be held at that point. The check
- * should return true if the conditions are satisfied.
+ * dereference will take place are correct. Typically the conditions
+ * indicate the various locking conditions that should be held at that
+ * point. The check should return true if the conditions are satisfied.
+ * An implicit check for being in an RCU read-side critical section
+ * (rcu_read_lock()) is included.
*
* For example:
*
- * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() ||
- * lockdep_is_held(&foo->lock));
+ * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
*
* could be used to indicate to lockdep that foo->bar may only be dereferenced
- * if either the RCU read lock is held, or that the lock required to replace
+ * if either rcu_read_lock() is held, or that the lock required to replace
* the bar struct at foo->bar is held.
*
* Note that the list of conditions may also include indications of when a lock
* need not be held, for example during initialisation or destruction of the
* target struct:
*
- * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() ||
- * lockdep_is_held(&foo->lock) ||
+ * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
* atomic_read(&foo->usage) == 0);
+ *
+ * Inserts memory barriers on architectures that require them
+ * (currently only the Alpha), prevents the compiler from refetching
+ * (and from merging fetches), and, more importantly, documents exactly
+ * which pointers are protected by RCU and checks that the pointer is
+ * annotated as __rcu.
*/
#define rcu_dereference_check(p, c) \
- ({ \
- __do_rcu_dereference_check(c); \
- rcu_dereference_raw(p); \
- })
+ __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
/**
- * rcu_dereference_protected - fetch RCU pointer when updates prevented
+ * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is the RCU-bh counterpart to rcu_dereference_check().
+ */
+#define rcu_dereference_bh_check(p, c) \
+ __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
+
+/**
+ * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is the RCU-sched counterpart to rcu_dereference_check().
+ */
+#define rcu_dereference_sched_check(p, c) \
+ __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
+ __rcu)
+
+#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
+
+/**
+ * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * Similar to rcu_dereference_check(), but omits the sparse checking.
+ * This allows rcu_dereference_index_check() to be used on integers,
+ * which can then be used as array indices. Attempting to use
+ * rcu_dereference_check() on an integer will give compiler warnings
+ * because the sparse address-space mechanism relies on dereferencing
+ * the RCU-protected pointer. Dereferencing integers is not something
+ * that even gcc will put up with.
+ *
+ * Note that this function does not implicitly check for RCU read-side
+ * critical sections. If this function gains lots of uses, it might
+ * make sense to provide versions for each flavor of RCU, but it does
+ * not make sense as of early 2010.
+ */
+#define rcu_dereference_index_check(p, c) \
+ __rcu_dereference_index_check((p), (c))
+
+/**
+ * rcu_dereference_protected() - fetch RCU pointer when updates prevented
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
* both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
@@ -263,35 +453,61 @@ extern int rcu_my_thread_group_empty(void);
* prevent the compiler from repeating this reference or combining it
* with other references, so it should not be used without protection
* of appropriate locks.
+ *
+ * This function is only for update-side use. Using this function
+ * when protected only by rcu_read_lock() will result in infrequent
+ * but very ugly failures.
*/
#define rcu_dereference_protected(p, c) \
- ({ \
- __do_rcu_dereference_check(c); \
- (p); \
- })
+ __rcu_dereference_protected((p), (c), __rcu)
-#else /* #ifdef CONFIG_PROVE_RCU */
+/**
+ * rcu_dereference_bh_protected() - fetch RCU-bh pointer when updates prevented
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is the RCU-bh counterpart to rcu_dereference_protected().
+ */
+#define rcu_dereference_bh_protected(p, c) \
+ __rcu_dereference_protected((p), (c), __rcu)
-#define rcu_dereference_check(p, c) rcu_dereference_raw(p)
-#define rcu_dereference_protected(p, c) (p)
+/**
+ * rcu_dereference_sched_protected() - fetch RCU-sched pointer when updates prevented
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is the RCU-sched counterpart to rcu_dereference_protected().
+ */
+#define rcu_dereference_sched_protected(p, c) \
+ __rcu_dereference_protected((p), (c), __rcu)
-#endif /* #else #ifdef CONFIG_PROVE_RCU */
/**
- * rcu_access_pointer - fetch RCU pointer with no dereferencing
+ * rcu_dereference() - fetch RCU-protected pointer for dereferencing
+ * @p: The pointer to read, prior to dereferencing
*
- * Return the value of the specified RCU-protected pointer, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
- * when the value of this pointer is accessed, but the pointer is not
- * dereferenced, for example, when testing an RCU-protected pointer against
- * NULL. This may also be used in cases where update-side locks prevent
- * the value of the pointer from changing, but rcu_dereference_protected()
- * is a lighter-weight primitive for this use case.
+ * This is a simple wrapper around rcu_dereference_check().
+ */
+#define rcu_dereference(p) rcu_dereference_check(p, 0)
+
+/**
+ * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Makes rcu_dereference_check() do the dirty work.
+ */
+#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
+
+/**
+ * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Makes rcu_dereference_check() do the dirty work.
*/
-#define rcu_access_pointer(p) ACCESS_ONCE(p)
+#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
/**
- * rcu_read_lock - mark the beginning of an RCU read-side critical section.
+ * rcu_read_lock() - mark the beginning of an RCU read-side critical section
*
* When synchronize_rcu() is invoked on one CPU while other CPUs
* are within RCU read-side critical sections, then the
@@ -302,7 +518,7 @@ extern int rcu_my_thread_group_empty(void);
* until after the all the other CPUs exit their critical sections.
*
* Note, however, that RCU callbacks are permitted to run concurrently
- * with RCU read-side critical sections. One way that this can happen
+ * with new RCU read-side critical sections. One way that this can happen
* is via the following sequence of events: (1) CPU 0 enters an RCU
* read-side critical section, (2) CPU 1 invokes call_rcu() to register
* an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
@@ -317,7 +533,20 @@ extern int rcu_my_thread_group_empty(void);
* will be deferred until the outermost RCU read-side critical section
* completes.
*
- * It is illegal to block while in an RCU read-side critical section.
+ * You can avoid reading and understanding the next paragraph by
+ * following this rule: don't put anything in an rcu_read_lock() RCU
+ * read-side critical section that would block in a !PREEMPT kernel.
+ * But if you want the full story, read on!
+ *
+ * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
+ * is illegal to block while in an RCU read-side critical section. In
+ * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
+ * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
+ * be preempted, but explicit blocking is illegal. Finally, in preemptible
+ * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds,
+ * RCU read-side critical sections may be preempted and they may also
+ * block, but only when acquiring spinlocks that are subject to priority
+ * inheritance.
*/
static inline void rcu_read_lock(void)
{
@@ -337,7 +566,7 @@ static inline void rcu_read_lock(void)
*/
/**
- * rcu_read_unlock - marks the end of an RCU read-side critical section.
+ * rcu_read_unlock() - marks the end of an RCU read-side critical section.
*
* See rcu_read_lock() for more information.
*/
@@ -349,15 +578,16 @@ static inline void rcu_read_unlock(void)
}
/**
- * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
+ * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
*
* This is equivalent of rcu_read_lock(), but to be used when updates
- * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks
- * consider completion of a softirq handler to be a quiescent state,
- * a process in RCU read-side critical section must be protected by
- * disabling softirqs. Read-side critical sections in interrupt context
- * can use just rcu_read_lock().
- *
+ * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
+ * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
+ * softirq handler to be a quiescent state, a process in RCU read-side
+ * critical section must be protected by disabling softirqs. Read-side
+ * critical sections in interrupt context can use just rcu_read_lock(),
+ * though this should at least be commented to avoid confusing people
+ * reading the code.
*/
static inline void rcu_read_lock_bh(void)
{
@@ -379,13 +609,12 @@ static inline void rcu_read_unlock_bh(void)
}
/**
- * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
+ * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
*
- * Should be used with either
- * - synchronize_sched()
- * or
- * - call_rcu_sched() and rcu_barrier_sched()
- * on the write-side to insure proper synchronization.
+ * This is equivalent of rcu_read_lock(), but to be used when updates
+ * are being done using call_rcu_sched() or synchronize_rcu_sched().
+ * Read-side critical sections can also be introduced by anything that
+ * disables preemption, including local_irq_disable() and friends.
*/
static inline void rcu_read_lock_sched(void)
{
@@ -420,54 +649,14 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
preempt_enable_notrace();
}
-
-/**
- * rcu_dereference_raw - fetch an RCU-protected pointer
- *
- * The caller must be within some flavor of RCU read-side critical
- * section, or must be otherwise preventing the pointer from changing,
- * for example, by holding an appropriate lock. This pointer may later
- * be safely dereferenced. It is the caller's responsibility to have
- * done the right thing, as this primitive does no checking of any kind.
- *
- * Inserts memory barriers on architectures that require them
- * (currently only the Alpha), and, more importantly, documents
- * exactly which pointers are protected by RCU.
- */
-#define rcu_dereference_raw(p) ({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- smp_read_barrier_depends(); \
- (_________p1); \
- })
-
-/**
- * rcu_dereference - fetch an RCU-protected pointer, checking for RCU
- *
- * Makes rcu_dereference_check() do the dirty work.
- */
-#define rcu_dereference(p) \
- rcu_dereference_check(p, rcu_read_lock_held())
-
/**
- * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh
+ * rcu_assign_pointer() - assign to RCU-protected pointer
+ * @p: pointer to assign to
+ * @v: value to assign (publish)
*
- * Makes rcu_dereference_check() do the dirty work.
- */
-#define rcu_dereference_bh(p) \
- rcu_dereference_check(p, rcu_read_lock_bh_held())
-
-/**
- * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
- *
- * Makes rcu_dereference_check() do the dirty work.
- */
-#define rcu_dereference_sched(p) \
- rcu_dereference_check(p, rcu_read_lock_sched_held())
-
-/**
- * rcu_assign_pointer - assign (publicize) a pointer to a newly
- * initialized structure that will be dereferenced by RCU read-side
- * critical sections. Returns the value assigned.
+ * Assigns the specified value to the specified RCU-protected
+ * pointer, ensuring that any concurrent RCU readers will see
+ * any prior initialization. Returns the value assigned.
*
* Inserts memory barriers on architectures that require them
* (pretty much all of them other than x86), and also prevents
@@ -476,14 +665,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* call documents which pointers will be dereferenced by RCU read-side
* code.
*/
-
#define rcu_assign_pointer(p, v) \
- ({ \
- if (!__builtin_constant_p(v) || \
- ((v) != NULL)) \
- smp_wmb(); \
- (p) = (v); \
- })
+ __rcu_assign_pointer((p), (v), __rcu)
+
+/**
+ * RCU_INIT_POINTER() - initialize an RCU protected pointer
+ *
+ * Initialize an RCU-protected pointer in such a way to avoid RCU-lockdep
+ * splats.
+ */
+#define RCU_INIT_POINTER(p, v) \
+ p = (typeof(*v) __force __rcu *)(v)
/* Infrastructure to implement the synchronize_() primitives. */
@@ -494,26 +686,37 @@ struct rcu_synchronize {
extern void wakeme_after_rcu(struct rcu_head *head);
+#ifdef CONFIG_PREEMPT_RCU
+
/**
- * call_rcu - Queue an RCU callback for invocation after a grace period.
+ * call_rcu() - Queue an RCU callback for invocation after a grace period.
* @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
+ * @func: actual callback function to be invoked after the grace period
*
- * The update function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. RCU read-side critical
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all pre-existing RCU read-side
+ * critical sections have completed. However, the callback function
+ * might well execute concurrently with RCU read-side critical sections
+ * that started after call_rcu() was invoked. RCU read-side critical
* sections are delimited by rcu_read_lock() and rcu_read_unlock(),
* and may be nested.
*/
extern void call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *head));
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+
+/* In classic RCU, call_rcu() is just call_rcu_sched(). */
+#define call_rcu call_rcu_sched
+
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
/**
- * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
+ * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
+ * @func: actual callback function to be invoked after the grace period
*
- * The update function will be invoked some time after a full grace
+ * The callback function will be invoked some time after a full grace
* period elapses, in other words after all currently executing RCU
* read-side critical sections have completed. call_rcu_bh() assumes
* that the read-side critical sections end on completion of a softirq
@@ -566,37 +769,4 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
}
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-#ifndef CONFIG_PROVE_RCU
-#define __do_rcu_dereference_check(c) do { } while (0)
-#endif /* #ifdef CONFIG_PROVE_RCU */
-
-#define __rcu_dereference_index_check(p, c) \
- ({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- __do_rcu_dereference_check(c); \
- smp_read_barrier_depends(); \
- (_________p1); \
- })
-
-/**
- * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
- * @p: The pointer to read, prior to dereferencing
- * @c: The conditions under which the dereference will take place
- *
- * Similar to rcu_dereference_check(), but omits the sparse checking.
- * This allows rcu_dereference_index_check() to be used on integers,
- * which can then be used as array indices. Attempting to use
- * rcu_dereference_check() on an integer will give compiler warnings
- * because the sparse address-space mechanism relies on dereferencing
- * the RCU-protected pointer. Dereferencing integers is not something
- * that even gcc will put up with.
- *
- * Note that this function does not implicitly check for RCU read-side
- * critical sections. If this function gains lots of uses, it might
- * make sense to provide versions for each flavor of RCU, but it does
- * not make sense as of early 2010.
- */
-#define rcu_dereference_index_check(p, c) \
- __rcu_dereference_index_check((p), (c))
-
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e2e8931..13877cb 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,103 +27,101 @@
#include
-void rcu_sched_qs(int cpu);
-void rcu_bh_qs(int cpu);
-static inline void rcu_note_context_switch(int cpu)
-{
- rcu_sched_qs(cpu);
-}
+#define rcu_init_sched() do { } while (0)
-#define __rcu_read_lock() preempt_disable()
-#define __rcu_read_unlock() preempt_enable()
-#define __rcu_read_lock_bh() local_bh_disable()
-#define __rcu_read_unlock_bh() local_bh_enable()
-#define call_rcu_sched call_rcu
+#ifdef CONFIG_TINY_RCU
-#define rcu_init_sched() do { } while (0)
-extern void rcu_check_callbacks(int cpu, int user);
+static inline void synchronize_rcu_expedited(void)
+{
+ synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
+}
-static inline int rcu_needs_cpu(int cpu)
+static inline void rcu_barrier(void)
{
- return 0;
+ rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
}
-/*
- * Return the number of grace periods.
- */
-static inline long rcu_batches_completed(void)
+#else /* #ifdef CONFIG_TINY_RCU */
+
+void rcu_barrier(void);
+void synchronize_rcu_expedited(void);
+
+#endif /* #else #ifdef CONFIG_TINY_RCU */
+
+static inline void synchronize_rcu_bh(void)
{
- return 0;
+ synchronize_sched();
}
-/*
- * Return the number of bottom-half grace periods.
- */
-static inline long rcu_batches_completed_bh(void)
+static inline void synchronize_rcu_bh_expedited(void)
{
- return 0;
+ synchronize_sched();
}
-static inline void rcu_force_quiescent_state(void)
+#ifdef CONFIG_TINY_RCU
+
+static inline void rcu_preempt_note_context_switch(void)
{
}
-static inline void rcu_bh_force_quiescent_state(void)
+static inline void exit_rcu(void)
{
}
-static inline void rcu_sched_force_quiescent_state(void)
+static inline int rcu_needs_cpu(int cpu)
{
+ return 0;
}
-extern void synchronize_sched(void);
+#else /* #ifdef CONFIG_TINY_RCU */
+
+void rcu_preempt_note_context_switch(void);
+extern void exit_rcu(void);
+int rcu_preempt_needs_cpu(void);
-static inline void synchronize_rcu(void)
+static inline int rcu_needs_cpu(int cpu)
{
- synchronize_sched();
+ return rcu_preempt_needs_cpu();
}
-static inline void synchronize_rcu_bh(void)
+#endif /* #else #ifdef CONFIG_TINY_RCU */
+
+static inline void rcu_note_context_switch(int cpu)
{
- synchronize_sched();
+ rcu_sched_qs(cpu);
+ rcu_preempt_note_context_switch();
}
-static inline void synchronize_rcu_expedited(void)
+/*
+ * Return the number of grace periods.
+ */
+static inline long rcu_batches_completed(void)
{
- synchronize_sched();
+ return 0;
}
-static inline void synchronize_rcu_bh_expedited(void)
+/*
+ * Return the number of bottom-half grace periods.
+ */
+static inline long rcu_batches_completed_bh(void)
{
- synchronize_sched();
+ return 0;
}
-struct notifier_block;
-
-#ifdef CONFIG_NO_HZ
-
-extern void rcu_enter_nohz(void);
-extern void rcu_exit_nohz(void);
-
-#else /* #ifdef CONFIG_NO_HZ */
-
-static inline void rcu_enter_nohz(void)
+static inline void rcu_force_quiescent_state(void)
{
}
-static inline void rcu_exit_nohz(void)
+static inline void rcu_bh_force_quiescent_state(void)
{
}
-#endif /* #else #ifdef CONFIG_NO_HZ */
-
-static inline void exit_rcu(void)
+static inline void rcu_sched_force_quiescent_state(void)
{
}
-static inline int rcu_preempt_depth(void)
+static inline void rcu_cpu_stall_reset(void)
{
- return 0;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index c0ed1c0..95518e6 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,64 +30,23 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
-struct notifier_block;
-
-extern void rcu_sched_qs(int cpu);
-extern void rcu_bh_qs(int cpu);
extern void rcu_note_context_switch(int cpu);
extern int rcu_needs_cpu(int cpu);
+extern void rcu_cpu_stall_reset(void);
#ifdef CONFIG_TREE_PREEMPT_RCU
-extern void __rcu_read_lock(void);
-extern void __rcu_read_unlock(void);
-extern void synchronize_rcu(void);
extern void exit_rcu(void);
-/*
- * Defined as macro as it is a very low level header
- * included from areas that don't even know about current
- */
-#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
-
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-static inline void __rcu_read_lock(void)
-{
- preempt_disable();
-}
-
-static inline void __rcu_read_unlock(void)
-{
- preempt_enable();
-}
-
-#define synchronize_rcu synchronize_sched
-
static inline void exit_rcu(void)
{
}
-static inline int rcu_preempt_depth(void)
-{
- return 0;
-}
-
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
-static inline void __rcu_read_lock_bh(void)
-{
- local_bh_disable();
-}
-static inline void __rcu_read_unlock_bh(void)
-{
- local_bh_enable();
-}
-
-extern void call_rcu_sched(struct rcu_head *head,
- void (*func)(struct rcu_head *rcu));
extern void synchronize_rcu_bh(void);
-extern void synchronize_sched(void);
extern void synchronize_rcu_expedited(void);
static inline void synchronize_rcu_bh_expedited(void)
@@ -95,7 +54,7 @@ static inline void synchronize_rcu_bh_expedited(void)
synchronize_sched_expedited();
}
-extern void rcu_check_callbacks(int cpu, int user);
+extern void rcu_barrier(void);
extern long rcu_batches_completed(void);
extern long rcu_batches_completed_bh(void);
@@ -104,18 +63,6 @@ extern void rcu_force_quiescent_state(void);
extern void rcu_bh_force_quiescent_state(void);
extern void rcu_sched_force_quiescent_state(void);
-#ifdef CONFIG_NO_HZ
-void rcu_enter_nohz(void);
-void rcu_exit_nohz(void);
-#else /* CONFIG_NO_HZ */
-static inline void rcu_enter_nohz(void)
-{
-}
-static inline void rcu_exit_nohz(void)
-{
-}
-#endif /* CONFIG_NO_HZ */
-
/* A context switch is a grace period for RCU-sched and RCU-bh. */
static inline int rcu_blocking_is_gp(void)
{
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1e2a6db..e18473f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1202,11 +1202,13 @@ struct task_struct {
unsigned int policy;
cpumask_t cpus_allowed;
-#ifdef CONFIG_TREE_PREEMPT_RCU
+#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
char rcu_read_unlock_special;
- struct rcu_node *rcu_blocked_node;
struct list_head rcu_node_entry;
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_TREE_PREEMPT_RCU
+ struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
@@ -1288,9 +1290,9 @@ struct task_struct {
struct list_head cpu_timers[3];
/* process credentials */
- const struct cred *real_cred; /* objective and real subjective task
+ const struct cred __rcu *real_cred; /* objective and real subjective task
* credentials (COW) */
- const struct cred *cred; /* effective (overridable) subjective task
+ const struct cred __rcu *cred; /* effective (overridable) subjective task
* credentials (COW) */
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
@@ -1418,7 +1420,7 @@ struct task_struct {
#endif
#ifdef CONFIG_CGROUPS
/* Control Group info protected by css_set_lock */
- struct css_set *cgroups;
+ struct css_set __rcu *cgroups;
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
@@ -1740,7 +1742,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
-#ifdef CONFIG_TREE_PREEMPT_RCU
+#ifdef CONFIG_PREEMPT_RCU
#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
@@ -1749,7 +1751,9 @@ static inline void rcu_copy_process(struct task_struct *p)
{
p->rcu_read_lock_nesting = 0;
p->rcu_read_unlock_special = 0;
+#ifdef CONFIG_TREE_PREEMPT_RCU
p->rcu_blocked_node = NULL;
+#endif
INIT_LIST_HEAD(&p->rcu_node_entry);
}
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 9f63538..6d14409 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -68,7 +68,7 @@ struct kmem_cache_order_objects {
* Slab cache management.
*/
struct kmem_cache {
- struct kmem_cache_cpu __percpu *cpu_slab;
+ struct kmem_cache_cpu *cpu_slab;
/* Used for retriving partial slabs etc */
unsigned long flags;
int size; /* The size of an object including meta data */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 4d5d2f5..58971e8 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -108,19 +108,43 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/**
- * srcu_dereference - fetch SRCU-protected pointer with checking
+ * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
+ * @p: the pointer to fetch and protect for later dereferencing
+ * @sp: pointer to the srcu_struct, which is used to check that we
+ * really are in an SRCU read-side critical section.
+ * @c: condition to check for update-side use
*
- * Makes rcu_dereference_check() do the dirty work.
+ * If PROVE_RCU is enabled, invoking this outside of an RCU read-side
+ * critical section will result in an RCU-lockdep splat, unless @c evaluates
+ * to 1. The @c argument will normally be a logical expression containing
+ * lockdep_is_held() calls.
*/
-#define srcu_dereference(p, sp) \
- rcu_dereference_check(p, srcu_read_lock_held(sp))
+#define srcu_dereference_check(p, sp, c) \
+ __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu)
+
+/**
+ * srcu_dereference - fetch SRCU-protected pointer for later dereferencing
+ * @p: the pointer to fetch and protect for later dereferencing
+ * @sp: pointer to the srcu_struct, which is used to check that we
+ * really are in an SRCU read-side critical section.
+ *
+ * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU
+ * is enabled, invoking this outside of an RCU read-side critical
+ * section will result in an RCU-lockdep splat.
+ */
+#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0)
/**
* srcu_read_lock - register a new reader for an SRCU-protected structure.
* @sp: srcu_struct in which to register the new reader.
*
* Enter an SRCU read-side critical section. Note that SRCU read-side
- * critical sections may be nested.
+ * critical sections may be nested. However, it is illegal to
+ * call anything that waits on an SRCU grace period for the same
+ * srcu_struct, whether directly or indirectly. Please note that
+ * one way to indirectly wait on an SRCU grace period is to acquire
+ * a mutex that is held elsewhere while calling synchronize_srcu() or
+ * synchronize_srcu_expedited().
*/
static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
{
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h
index 671538d..8eee9db 100644
--- a/include/linux/sunrpc/auth_gss.h
+++ b/include/linux/sunrpc/auth_gss.h
@@ -69,7 +69,7 @@ struct gss_cl_ctx {
enum rpc_gss_proc gc_proc;
u32 gc_seq;
spinlock_t gc_seq_lock;
- struct gss_ctx *gc_gss_ctx;
+ struct gss_ctx __rcu *gc_gss_ctx;
struct xdr_netobj gc_wire_ctx;
u32 gc_win;
unsigned long gc_expiry;
@@ -80,7 +80,7 @@ struct gss_upcall_msg;
struct gss_cred {
struct rpc_cred gc_base;
enum rpc_gss_svc gc_service;
- struct gss_cl_ctx *gc_ctx;
+ struct gss_cl_ctx __rcu *gc_ctx;
struct gss_upcall_msg *gc_upcall;
unsigned long gc_upcall_timestamp;
unsigned char gc_machine_cred : 1;
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index 726cc35..dd1fdb8 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -45,7 +45,8 @@ static inline u32 task_cls_classid(struct task_struct *p)
return 0;
rcu_read_lock();
- id = rcu_dereference(net_cls_subsys_id);
+ id = rcu_dereference_index_check(net_cls_subsys_id,
+ rcu_read_lock_held());
if (id >= 0)
classid = container_of(task_subsys_state(p, id),
struct cgroup_cls_state, css)->classid;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index e624dae..caf17db 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -75,7 +75,7 @@ struct nf_conntrack_helper;
/* nf_conn feature for connections that have a helper */
struct nf_conn_help {
/* Helper. if any */
- struct nf_conntrack_helper *helper;
+ struct nf_conntrack_helper __rcu *helper;
union nf_conntrack_help help;
diff --git a/init/Kconfig b/init/Kconfig
index 2de5b1c..a619a1a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -340,6 +340,7 @@ choice
config TREE_RCU
bool "Tree-based hierarchical RCU"
+ depends on !PREEMPT && SMP
help
This option selects the RCU implementation that is
designed for very large SMP system with hundreds or
@@ -347,7 +348,7 @@ config TREE_RCU
smaller systems.
config TREE_PREEMPT_RCU
- bool "Preemptable tree-based hierarchical RCU"
+ bool "Preemptible tree-based hierarchical RCU"
depends on PREEMPT
help
This option selects the RCU implementation that is
@@ -365,8 +366,22 @@ config TINY_RCU
is not required. This option greatly reduces the
memory footprint of RCU.
+config TINY_PREEMPT_RCU
+ bool "Preemptible UP-only small-memory-footprint RCU"
+ depends on !SMP && PREEMPT
+ help
+ This option selects the RCU implementation that is designed
+ for real-time UP systems. This option greatly reduces the
+ memory footprint of RCU.
+
endchoice
+config PREEMPT_RCU
+ def_bool ( TREE_PREEMPT_RCU || TINY_PREEMPT_RCU )
+ help
+ This option enables preemptible-RCU code that is common between
+ the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations.
+
config RCU_TRACE
bool "Enable tracing for RCU"
depends on TREE_RCU || TREE_PREEMPT_RCU
@@ -387,9 +402,12 @@ config RCU_FANOUT
help
This option controls the fanout of hierarchical implementations
of RCU, allowing RCU to work efficiently on machines with
- large numbers of CPUs. This value must be at least the cube
- root of NR_CPUS, which allows NR_CPUS up to 32,768 for 32-bit
- systems and up to 262,144 for 64-bit systems.
+ large numbers of CPUs. This value must be at least the fourth
+ root of NR_CPUS, which allows NR_CPUS to be insanely large.
+ The default value of RCU_FANOUT should be used for production
+ systems, but if you are stress-testing the RCU implementation
+ itself, small RCU_FANOUT values allow you to test large-system
+ code paths on small(er) systems.
Select a specific number if testing RCU itself.
Take the default if unsure.
diff --git a/kernel/Makefile b/kernel/Makefile
index 0b72d1a..17046b6 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_TREE_RCU) += rcutree.o
obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
obj-$(CONFIG_TINY_RCU) += rcutiny.o
+obj-$(CONFIG_TINY_PREEMPT_RCU) += rcutiny.o
obj-$(CONFIG_RELAY) += relay.o
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 192f88c..e5c5497 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -138,7 +138,7 @@ struct css_id {
* is called after synchronize_rcu(). But for safe use, css_is_removed()
* css_tryget() should be used for avoiding race.
*/
- struct cgroup_subsys_state *css;
+ struct cgroup_subsys_state __rcu *css;
/*
* ID of this css.
*/
diff --git a/kernel/pid.c b/kernel/pid.c
index d55c6fb..39b65b6 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -401,7 +401,7 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type)
struct task_struct *result = NULL;
if (pid) {
struct hlist_node *first;
- first = rcu_dereference_check(pid->tasks[type].first,
+ first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
rcu_read_lock_held() ||
lockdep_tasklist_lock_is_held());
if (first)
@@ -416,6 +416,7 @@ EXPORT_SYMBOL(pid_task);
*/
struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
{
+ rcu_lockdep_assert(rcu_read_lock_held());
return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
}
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 4d16983..6c79e85 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -73,12 +73,14 @@ int debug_lockdep_rcu_enabled(void)
EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
/**
- * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
+ * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
*
* Check for bottom half being disabled, which covers both the
* CONFIG_PROVE_RCU and not cases. Note that if someone uses
* rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
- * will show the situation.
+ * will show the situation. This is useful for debug checks in functions
+ * that require that they be called within an RCU read-side critical
+ * section.
*
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
*/
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 196ec02..d806735 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -59,6 +59,14 @@ int rcu_scheduler_active __read_mostly;
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+/* Forward declarations for rcutiny_plugin.h. */
+static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
+static void __call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu),
+ struct rcu_ctrlblk *rcp);
+
+#include "rcutiny_plugin.h"
+
#ifdef CONFIG_NO_HZ
static long rcu_dynticks_nesting = 1;
@@ -140,6 +148,7 @@ void rcu_check_callbacks(int cpu, int user)
rcu_sched_qs(cpu);
else if (!in_softirq())
rcu_bh_qs(cpu);
+ rcu_preempt_check_callbacks();
}
/*
@@ -162,6 +171,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
*rcp->donetail = NULL;
if (rcp->curtail == rcp->donetail)
rcp->curtail = &rcp->rcucblist;
+ rcu_preempt_remove_callbacks(rcp);
rcp->donetail = &rcp->rcucblist;
local_irq_restore(flags);
@@ -182,6 +192,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
{
__rcu_process_callbacks(&rcu_sched_ctrlblk);
__rcu_process_callbacks(&rcu_bh_ctrlblk);
+ rcu_preempt_process_callbacks();
}
/*
@@ -223,15 +234,15 @@ static void __call_rcu(struct rcu_head *head,
}
/*
- * Post an RCU callback to be invoked after the end of an RCU grace
+ * Post an RCU callback to be invoked after the end of an RCU-sched grace
* period. But since we have but one CPU, that would be after any
* quiescent state.
*/
-void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
__call_rcu(head, func, &rcu_sched_ctrlblk);
}
-EXPORT_SYMBOL_GPL(call_rcu);
+EXPORT_SYMBOL_GPL(call_rcu_sched);
/*
* Post an RCU bottom-half callback to be invoked after any subsequent
@@ -243,20 +254,6 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
-void rcu_barrier(void)
-{
- struct rcu_synchronize rcu;
-
- init_rcu_head_on_stack(&rcu.head);
- init_completion(&rcu.completion);
- /* Will wake me after RCU finished. */
- call_rcu(&rcu.head, wakeme_after_rcu);
- /* Wait for it. */
- wait_for_completion(&rcu.completion);
- destroy_rcu_head_on_stack(&rcu.head);
-}
-EXPORT_SYMBOL_GPL(rcu_barrier);
-
void rcu_barrier_bh(void)
{
struct rcu_synchronize rcu;
@@ -289,5 +286,3 @@ void __init rcu_init(void)
{
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
}
-
-#include "rcutiny_plugin.h"
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index d223a92..6ceca4f 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -1,7 +1,7 @@
/*
- * Read-Copy Update mechanism for mutual exclusion (tree-based version)
+ * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
* Internal non-public definitions that provide either classic
- * or preemptable semantics.
+ * or preemptible semantics.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,11 +17,587 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
- * Copyright IBM Corporation, 2009
+ * Copyright (c) 2010 Linaro
*
* Author: Paul E. McKenney
*/
+#ifdef CONFIG_TINY_PREEMPT_RCU
+
+#include
+
+/* Global control variables for preemptible RCU. */
+struct rcu_preempt_ctrlblk {
+ struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
+ struct rcu_head **nexttail;
+ /* Tasks blocked in a preemptible RCU */
+ /* read-side critical section while an */
+ /* preemptible-RCU grace period is in */
+ /* progress must wait for a later grace */
+ /* period. This pointer points to the */
+ /* ->next pointer of the last task that */
+ /* must wait for a later grace period, or */
+ /* to &->rcb.rcucblist if there is no */
+ /* such task. */
+ struct list_head blkd_tasks;
+ /* Tasks blocked in RCU read-side critical */
+ /* section. Tasks are placed at the head */
+ /* of this list and age towards the tail. */
+ struct list_head *gp_tasks;
+ /* Pointer to the first task blocking the */
+ /* current grace period, or NULL if there */
+ /* is not such task. */
+ struct list_head *exp_tasks;
+ /* Pointer to first task blocking the */
+ /* current expedited grace period, or NULL */
+ /* if there is no such task. If there */
+ /* is no current expedited grace period, */
+ /* then there cannot be any such task. */
+ u8 gpnum; /* Current grace period. */
+ u8 gpcpu; /* Last grace period blocked by the CPU. */
+ u8 completed; /* Last grace period completed. */
+ /* If all three are equal, RCU is idle. */
+};
+
+static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
+ .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
+ .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
+ .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
+ .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
+};
+
+static int rcu_preempted_readers_exp(void);
+static void rcu_report_exp_done(void);
+
+/*
+ * Return true if the CPU has not yet responded to the current grace period.
+ */
+static int rcu_cpu_blocking_cur_gp(void)
+{
+ return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
+}
+
+/*
+ * Check for a running RCU reader. Because there is only one CPU,
+ * there can be but one running RCU reader at a time. ;-)
+ */
+static int rcu_preempt_running_reader(void)
+{
+ return current->rcu_read_lock_nesting;
+}
+
+/*
+ * Check for preempted RCU readers blocking any grace period.
+ * If the caller needs a reliable answer, it must disable hard irqs.
+ */
+static int rcu_preempt_blocked_readers_any(void)
+{
+ return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
+}
+
+/*
+ * Check for preempted RCU readers blocking the current grace period.
+ * If the caller needs a reliable answer, it must disable hard irqs.
+ */
+static int rcu_preempt_blocked_readers_cgp(void)
+{
+ return rcu_preempt_ctrlblk.gp_tasks != NULL;
+}
+
+/*
+ * Return true if another preemptible-RCU grace period is needed.
+ */
+static int rcu_preempt_needs_another_gp(void)
+{
+ return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
+}
+
+/*
+ * Return true if a preemptible-RCU grace period is in progress.
+ * The caller must disable hardirqs.
+ */
+static int rcu_preempt_gp_in_progress(void)
+{
+ return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
+}
+
+/*
+ * Record a preemptible-RCU quiescent state for the specified CPU. Note
+ * that this just means that the task currently running on the CPU is
+ * in a quiescent state. There might be any number of tasks blocked
+ * while in an RCU read-side critical section.
+ *
+ * Unlike the other rcu_*_qs() functions, callers to this function
+ * must disable irqs in order to protect the assignment to
+ * ->rcu_read_unlock_special.
+ *
+ * Because this is a single-CPU implementation, the only way a grace
+ * period can end is if the CPU is in a quiescent state. The reason is
+ * that a blocked preemptible-RCU reader can exit its critical section
+ * only if the CPU is running it at the time. Therefore, when the
+ * last task blocking the current grace period exits its RCU read-side
+ * critical section, neither the CPU nor blocked tasks will be stopping
+ * the current grace period. (In contrast, SMP implementations
+ * might have CPUs running in RCU read-side critical sections that
+ * block later grace periods -- but this is not possible given only
+ * one CPU.)
+ */
+static void rcu_preempt_cpu_qs(void)
+{
+ /* Record both CPU and task as having responded to current GP. */
+ rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
+ current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
+
+ /*
+ * If there is no GP, or if blocked readers are still blocking GP,
+ * then there is nothing more to do.
+ */
+ if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp())
+ return;
+
+ /* Advance callbacks. */
+ rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
+ rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
+ rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
+
+ /* If there are no blocked readers, next GP is done instantly. */
+ if (!rcu_preempt_blocked_readers_any())
+ rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
+
+ /* If there are done callbacks, make RCU_SOFTIRQ process them. */
+ if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
+ raise_softirq(RCU_SOFTIRQ);
+}
+
+/*
+ * Start a new RCU grace period if warranted. Hard irqs must be disabled.
+ */
+static void rcu_preempt_start_gp(void)
+{
+ if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
+
+ /* Official start of GP. */
+ rcu_preempt_ctrlblk.gpnum++;
+
+ /* Any blocked RCU readers block new GP. */
+ if (rcu_preempt_blocked_readers_any())
+ rcu_preempt_ctrlblk.gp_tasks =
+ rcu_preempt_ctrlblk.blkd_tasks.next;
+
+ /* If there is no running reader, CPU is done with GP. */
+ if (!rcu_preempt_running_reader())
+ rcu_preempt_cpu_qs();
+ }
+}
+
+/*
+ * We have entered the scheduler, and the current task might soon be
+ * context-switched away from. If this task is in an RCU read-side
+ * critical section, we will no longer be able to rely on the CPU to
+ * record that fact, so we enqueue the task on the blkd_tasks list.
+ * If the task started after the current grace period began, as recorded
+ * by ->gpcpu, we enqueue at the beginning of the list. Otherwise
+ * before the element referenced by ->gp_tasks (or at the tail if
+ * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
+ * The task will dequeue itself when it exits the outermost enclosing
+ * RCU read-side critical section. Therefore, the current grace period
+ * cannot be permitted to complete until the ->gp_tasks pointer becomes
+ * NULL.
+ *
+ * Caller must disable preemption.
+ */
+void rcu_preempt_note_context_switch(void)
+{
+ struct task_struct *t = current;
+ unsigned long flags;
+
+ local_irq_save(flags); /* must exclude scheduler_tick(). */
+ if (rcu_preempt_running_reader() &&
+ (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
+
+ /* Possibly blocking in an RCU read-side critical section. */
+ t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
+
+ /*
+ * If this CPU has already checked in, then this task
+ * will hold up the next grace period rather than the
+ * current grace period. Queue the task accordingly.
+ * If the task is queued for the current grace period
+ * (i.e., this CPU has not yet passed through a quiescent
+ * state for the current grace period), then as long
+ * as that task remains queued, the current grace period
+ * cannot end.
+ */
+ list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
+ if (rcu_cpu_blocking_cur_gp())
+ rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
+ }
+
+ /*
+ * Either we were not in an RCU read-side critical section to
+ * begin with, or we have now recorded that critical section
+ * globally. Either way, we can now note a quiescent state
+ * for this CPU. Again, if we were in an RCU read-side critical
+ * section, and if that critical section was blocking the current
+ * grace period, then the fact that the task has been enqueued
+ * means that current grace period continues to be blocked.
+ */
+ rcu_preempt_cpu_qs();
+ local_irq_restore(flags);
+}
+
+/*
+ * Tiny-preemptible RCU implementation for rcu_read_lock().
+ * Just increment ->rcu_read_lock_nesting, shared state will be updated
+ * if we block.
+ */
+void __rcu_read_lock(void)
+{
+ current->rcu_read_lock_nesting++;
+ barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
+}
+EXPORT_SYMBOL_GPL(__rcu_read_lock);
+
+/*
+ * Handle special cases during rcu_read_unlock(), such as needing to
+ * notify RCU core processing or task having blocked during the RCU
+ * read-side critical section.
+ */
+static void rcu_read_unlock_special(struct task_struct *t)
+{
+ int empty;
+ int empty_exp;
+ unsigned long flags;
+ struct list_head *np;
+ int special;
+
+ /*
+ * NMI handlers cannot block and cannot safely manipulate state.
+ * They therefore cannot possibly be special, so just leave.
+ */
+ if (in_nmi())
+ return;
+
+ local_irq_save(flags);
+
+ /*
+ * If RCU core is waiting for this CPU to exit critical section,
+ * let it know that we have done so.
+ */
+ special = t->rcu_read_unlock_special;
+ if (special & RCU_READ_UNLOCK_NEED_QS)
+ rcu_preempt_cpu_qs();
+
+ /* Hardware IRQ handlers cannot block. */
+ if (in_irq()) {
+ local_irq_restore(flags);
+ return;
+ }
+
+ /* Clean up if blocked during RCU read-side critical section. */
+ if (special & RCU_READ_UNLOCK_BLOCKED) {
+ t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
+
+ /*
+ * Remove this task from the ->blkd_tasks list and adjust
+ * any pointers that might have been referencing it.
+ */
+ empty = !rcu_preempt_blocked_readers_cgp();
+ empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
+ np = t->rcu_node_entry.next;
+ if (np == &rcu_preempt_ctrlblk.blkd_tasks)
+ np = NULL;
+ list_del(&t->rcu_node_entry);
+ if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
+ rcu_preempt_ctrlblk.gp_tasks = np;
+ if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
+ rcu_preempt_ctrlblk.exp_tasks = np;
+ INIT_LIST_HEAD(&t->rcu_node_entry);
+
+ /*
+ * If this was the last task on the current list, and if
+ * we aren't waiting on the CPU, report the quiescent state
+ * and start a new grace period if needed.
+ */
+ if (!empty && !rcu_preempt_blocked_readers_cgp()) {
+ rcu_preempt_cpu_qs();
+ rcu_preempt_start_gp();
+ }
+
+ /*
+ * If this was the last task on the expedited lists,
+ * then we need wake up the waiting task.
+ */
+ if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
+ rcu_report_exp_done();
+ }
+ local_irq_restore(flags);
+}
+
+/*
+ * Tiny-preemptible RCU implementation for rcu_read_unlock().
+ * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
+ * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
+ * invoke rcu_read_unlock_special() to clean up after a context switch
+ * in an RCU read-side critical section and other special cases.
+ */
+void __rcu_read_unlock(void)
+{
+ struct task_struct *t = current;
+
+ barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
+ --t->rcu_read_lock_nesting;
+ barrier(); /* decrement before load of ->rcu_read_unlock_special */
+ if (t->rcu_read_lock_nesting == 0 &&
+ unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+ rcu_read_unlock_special(t);
+#ifdef CONFIG_PROVE_LOCKING
+ WARN_ON_ONCE(t->rcu_read_lock_nesting < 0);
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
+}
+EXPORT_SYMBOL_GPL(__rcu_read_unlock);
+
+/*
+ * Check for a quiescent state from the current CPU. When a task blocks,
+ * the task is recorded in the rcu_preempt_ctrlblk structure, which is
+ * checked elsewhere. This is called from the scheduling-clock interrupt.
+ *
+ * Caller must disable hard irqs.
+ */
+static void rcu_preempt_check_callbacks(void)
+{
+ struct task_struct *t = current;
+
+ if (rcu_preempt_gp_in_progress() &&
+ (!rcu_preempt_running_reader() ||
+ !rcu_cpu_blocking_cur_gp()))
+ rcu_preempt_cpu_qs();
+ if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
+ rcu_preempt_ctrlblk.rcb.donetail)
+ raise_softirq(RCU_SOFTIRQ);
+ if (rcu_preempt_gp_in_progress() &&
+ rcu_cpu_blocking_cur_gp() &&
+ rcu_preempt_running_reader())
+ t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
+}
+
+/*
+ * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
+ * update, so this is invoked from __rcu_process_callbacks() to
+ * handle that case. Of course, it is invoked for all flavors of
+ * RCU, but RCU callbacks can appear only on one of the lists, and
+ * neither ->nexttail nor ->donetail can possibly be NULL, so there
+ * is no need for an explicit check.
+ */
+static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
+{
+ if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
+ rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
+}
+
+/*
+ * Process callbacks for preemptible RCU.
+ */
+static void rcu_preempt_process_callbacks(void)
+{
+ __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
+}
+
+/*
+ * Queue a preemptible -RCU callback for invocation after a grace period.
+ */
+void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+ unsigned long flags;
+
+ debug_rcu_head_queue(head);
+ head->func = func;
+ head->next = NULL;
+
+ local_irq_save(flags);
+ *rcu_preempt_ctrlblk.nexttail = head;
+ rcu_preempt_ctrlblk.nexttail = &head->next;
+ rcu_preempt_start_gp(); /* checks to see if GP needed. */
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+void rcu_barrier(void)
+{
+ struct rcu_synchronize rcu;
+
+ init_rcu_head_on_stack(&rcu.head);
+ init_completion(&rcu.completion);
+ /* Will wake me after RCU finished. */
+ call_rcu(&rcu.head, wakeme_after_rcu);
+ /* Wait for it. */
+ wait_for_completion(&rcu.completion);
+ destroy_rcu_head_on_stack(&rcu.head);
+}
+EXPORT_SYMBOL_GPL(rcu_barrier);
+
+/*
+ * synchronize_rcu - wait until a grace period has elapsed.
+ *
+ * Control will return to the caller some time after a full grace
+ * period has elapsed, in other words after all currently executing RCU
+ * read-side critical sections have completed. RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
+ */
+void synchronize_rcu(void)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ if (!rcu_scheduler_active)
+ return;
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+ WARN_ON_ONCE(rcu_preempt_running_reader());
+ if (!rcu_preempt_blocked_readers_any())
+ return;
+
+ /* Once we get past the fastpath checks, same code as rcu_barrier(). */
+ rcu_barrier();
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu);
+
+static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
+static unsigned long sync_rcu_preempt_exp_count;
+static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
+
+/*
+ * Return non-zero if there are any tasks in RCU read-side critical
+ * sections blocking the current preemptible-RCU expedited grace period.
+ * If there is no preemptible-RCU expedited grace period currently in
+ * progress, returns zero unconditionally.
+ */
+static int rcu_preempted_readers_exp(void)
+{
+ return rcu_preempt_ctrlblk.exp_tasks != NULL;
+}
+
+/*
+ * Report the exit from RCU read-side critical section for the last task
+ * that queued itself during or before the current expedited preemptible-RCU
+ * grace period.
+ */
+static void rcu_report_exp_done(void)
+{
+ wake_up(&sync_rcu_preempt_exp_wq);
+}
+
+/*
+ * Wait for an rcu-preempt grace period, but expedite it. The basic idea
+ * is to rely in the fact that there is but one CPU, and that it is
+ * illegal for a task to invoke synchronize_rcu_expedited() while in a
+ * preemptible-RCU read-side critical section. Therefore, any such
+ * critical sections must correspond to blocked tasks, which must therefore
+ * be on the ->blkd_tasks list. So just record the current head of the
+ * list in the ->exp_tasks pointer, and wait for all tasks including and
+ * after the task pointed to by ->exp_tasks to drain.
+ */
+void synchronize_rcu_expedited(void)
+{
+ unsigned long flags;
+ struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
+ unsigned long snap;
+
+ barrier(); /* ensure prior action seen before grace period. */
+
+ WARN_ON_ONCE(rcu_preempt_running_reader());
+
+ /*
+ * Acquire lock so that there is only one preemptible RCU grace
+ * period in flight. Of course, if someone does the expedited
+ * grace period for us while we are acquiring the lock, just leave.
+ */
+ snap = sync_rcu_preempt_exp_count + 1;
+ mutex_lock(&sync_rcu_preempt_exp_mutex);
+ if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
+ goto unlock_mb_ret; /* Others did our work for us. */
+
+ local_irq_save(flags);
+
+ /*
+ * All RCU readers have to already be on blkd_tasks because
+ * we cannot legally be executing in an RCU read-side critical
+ * section.
+ */
+
+ /* Snapshot current head of ->blkd_tasks list. */
+ rpcp->exp_tasks = rpcp->blkd_tasks.next;
+ if (rpcp->exp_tasks == &rpcp->blkd_tasks)
+ rpcp->exp_tasks = NULL;
+ local_irq_restore(flags);
+
+ /* Wait for tail of ->blkd_tasks list to drain. */
+ if (rcu_preempted_readers_exp())
+ wait_event(sync_rcu_preempt_exp_wq,
+ !rcu_preempted_readers_exp());
+
+ /* Clean up and exit. */
+ barrier(); /* ensure expedited GP seen before counter increment. */
+ sync_rcu_preempt_exp_count++;
+unlock_mb_ret:
+ mutex_unlock(&sync_rcu_preempt_exp_mutex);
+ barrier(); /* ensure subsequent action seen after grace period. */
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
+
+/*
+ * Does preemptible RCU need the CPU to stay out of dynticks mode?
+ */
+int rcu_preempt_needs_cpu(void)
+{
+ if (!rcu_preempt_running_reader())
+ rcu_preempt_cpu_qs();
+ return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
+}
+
+/*
+ * Check for a task exiting while in a preemptible -RCU read-side
+ * critical section, clean up if so. No need to issue warnings,
+ * as debug_check_no_locks_held() already does this if lockdep
+ * is enabled.
+ */
+void exit_rcu(void)
+{
+ struct task_struct *t = current;
+
+ if (t->rcu_read_lock_nesting == 0)
+ return;
+ t->rcu_read_lock_nesting = 1;
+ rcu_read_unlock();
+}
+
+#else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
+
+/*
+ * Because preemptible RCU does not exist, it never has any callbacks
+ * to check.
+ */
+static void rcu_preempt_check_callbacks(void)
+{
+}
+
+/*
+ * Because preemptible RCU does not exist, it never has any callbacks
+ * to remove.
+ */
+static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
+{
+}
+
+/*
+ * Because preemptible RCU does not exist, it never has any callbacks
+ * to process.
+ */
+static void rcu_preempt_process_callbacks(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#include
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 2e2726d..404413e 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -47,6 +47,7 @@
#include
#include
#include
+#include
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney and "
@@ -64,6 +65,9 @@ static int irqreader = 1; /* RCU readers from irq (timers). */
static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */
static int fqs_holdoff = 0; /* Hold time within burst (us). */
static int fqs_stutter = 3; /* Wait time between bursts (s). */
+static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
+static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
+static int test_boost_duration = 3; /* Duration of each boost test, seconds. */
static char *torture_type = "rcu"; /* What RCU implementation to torture. */
module_param(nreaders, int, 0444);
@@ -88,6 +92,12 @@ module_param(fqs_holdoff, int, 0444);
MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
module_param(fqs_stutter, int, 0444);
MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
+module_param(test_boost, int, 0444);
+MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
+module_param(test_boost_interval, int, 0444);
+MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
+module_param(test_boost_duration, int, 0444);
+MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
@@ -109,6 +119,7 @@ static struct task_struct *stats_task;
static struct task_struct *shuffler_task;
static struct task_struct *stutter_task;
static struct task_struct *fqs_task;
+static struct task_struct *boost_tasks[NR_CPUS];
#define RCU_TORTURE_PIPE_LEN 10
@@ -134,6 +145,12 @@ static atomic_t n_rcu_torture_alloc_fail;
static atomic_t n_rcu_torture_free;
static atomic_t n_rcu_torture_mberror;
static atomic_t n_rcu_torture_error;
+static long n_rcu_torture_boost_ktrerror;
+static long n_rcu_torture_boost_rterror;
+static long n_rcu_torture_boost_allocerror;
+static long n_rcu_torture_boost_afferror;
+static long n_rcu_torture_boost_failure;
+static long n_rcu_torture_boosts;
static long n_rcu_torture_timers;
static struct list_head rcu_torture_removed;
static cpumask_var_t shuffle_tmp_mask;
@@ -147,6 +164,16 @@ static int stutter_pause_test;
#endif
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
+#ifdef CONFIG_BOOST_RCU
+#define rcu_can_boost() 1
+#else /* #ifdef CONFIG_BOOST_RCU */
+#define rcu_can_boost() 0
+#endif /* #else #ifdef CONFIG_BOOST_RCU */
+
+static unsigned long boost_starttime; /* jiffies of next boost test start. */
+DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
+ /* and boost task create/destroy. */
+
/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
@@ -275,6 +302,7 @@ struct rcu_torture_ops {
void (*fqs)(void);
int (*stats)(char *page);
int irq_capable;
+ int can_boost;
char *name;
};
@@ -303,6 +331,10 @@ static void rcu_read_delay(struct rcu_random_state *rrsp)
mdelay(longdelay_ms);
if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
udelay(shortdelay_us);
+#ifdef CONFIG_PREEMPT
+ if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
+ preempt_schedule(); /* No QS if preempt_disable() in effect */
+#endif
}
static void rcu_torture_read_unlock(int idx) __releases(RCU)
@@ -360,6 +392,7 @@ static struct rcu_torture_ops rcu_ops = {
.fqs = rcu_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
+ .can_boost = rcu_can_boost(),
.name = "rcu"
};
@@ -402,6 +435,7 @@ static struct rcu_torture_ops rcu_sync_ops = {
.fqs = rcu_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
+ .can_boost = rcu_can_boost(),
.name = "rcu_sync"
};
@@ -418,6 +452,7 @@ static struct rcu_torture_ops rcu_expedited_ops = {
.fqs = rcu_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
+ .can_boost = rcu_can_boost(),
.name = "rcu_expedited"
};
@@ -536,6 +571,8 @@ static void srcu_read_delay(struct rcu_random_state *rrsp)
delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
if (!delay)
schedule_timeout_interruptible(longdelay);
+ else
+ rcu_read_delay(rrsp);
}
static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
@@ -676,6 +713,134 @@ static struct rcu_torture_ops sched_expedited_ops = {
};
/*
+ * RCU torture priority-boost testing. Runs one real-time thread per
+ * CPU for moderate bursts, repeatedly registering RCU callbacks and
+ * spinning waiting for them to be invoked. If a given callback takes
+ * too long to be invoked, we assume that priority inversion has occurred.
+ */
+
+struct rcu_boost_inflight {
+ struct rcu_head rcu;
+ int inflight;
+};
+
+static void rcu_torture_boost_cb(struct rcu_head *head)
+{
+ struct rcu_boost_inflight *rbip =
+ container_of(head, struct rcu_boost_inflight, rcu);
+
+ smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
+ rbip->inflight = 0;
+}
+
+static int rcu_torture_boost(void *arg)
+{
+ long cpu = (long)arg;
+ unsigned long call_rcu_time = jiffies;
+ unsigned long endtime;
+ int firsttime = 1;
+ cpumask_var_t mask;
+ unsigned long oldstarttime;
+ struct rcu_boost_inflight rbi = { .inflight = 0 };
+ struct sched_param sp;
+
+ VERBOSE_PRINTK_STRING("rcu_torture_boost started");
+
+ /* Set real-time priority. */
+ sp.sched_priority = 1;
+ if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
+ VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!");
+ n_rcu_torture_boost_rterror++;
+ }
+
+ /* Allocate cpumask and set affinity. */
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+ VERBOSE_PRINTK_STRING("rcu_torture_boost mask alloc failed!");
+ n_rcu_torture_boost_allocerror++;
+ } else {
+ cpumask_set_cpu(cpu, mask);
+ while (sched_setaffinity(0, mask) < 0) {
+ if (cpu_is_offline(cpu)) {
+ /* Race with offline. Wait for online. */
+ schedule_timeout_uninterruptible(1);
+ if (kthread_should_stop() ||
+ fullstop != FULLSTOP_DONTSTOP)
+ goto checkwait;
+ } else {
+ /* There is some chance of false positive. */
+ VERBOSE_PRINTK_STRING("rcu_torture_boost "
+ "affinity failed");
+ n_rcu_torture_boost_afferror++;
+ break;
+ }
+ }
+ }
+
+ /* Each pass through the following loop does one boost-test cycle. */
+ do {
+ /* Wait for the next test interval. */
+ oldstarttime = boost_starttime;
+ while (jiffies - oldstarttime > ULONG_MAX / 2) {
+ schedule_timeout_uninterruptible(1);
+ if (kthread_should_stop() ||
+ fullstop != FULLSTOP_DONTSTOP)
+ goto checkwait;
+ }
+
+ /* Do one boost-test interval. */
+ endtime = oldstarttime + test_boost_duration * HZ;
+ while (jiffies - endtime > ULONG_MAX / 2) {
+ /* If we don't have a callback in flight, post one. */
+ if (!rbi.inflight) {
+ smp_mb(); /* RCU core before ->inflight = 1. */
+ rbi.inflight = 1;
+ call_rcu(&rbi.rcu, rcu_torture_boost_cb);
+ if (firsttime)
+ firsttime = 0;
+ else if (jiffies - call_rcu_time >
+ test_boost_duration * HZ - HZ / 2) {
+ VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed");
+ n_rcu_torture_boost_failure++;
+ }
+ call_rcu_time = jiffies;
+ }
+ if (kthread_should_stop() ||
+ fullstop != FULLSTOP_DONTSTOP)
+ goto checkwait;
+ }
+
+ /*
+ * Set the start time of the next test interval.
+ * Yes, this is vulnerable to long delays, but such
+ * delays simply cause a false negative for the next
+ * interval. Besides, we are running at RT priority,
+ * so delays should be relatively rare.
+ */
+ while (oldstarttime == boost_starttime) {
+ if (mutex_trylock(&boost_mutex)) {
+ boost_starttime = jiffies +
+ test_boost_interval * HZ;
+ n_rcu_torture_boosts++;
+ mutex_unlock(&boost_mutex);
+ break;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Go do the stutter. */
+checkwait: rcu_stutter_wait("rcu_torture_boost");
+ } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
+
+ /* Clean up and exit. */
+ VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
+ rcutorture_shutdown_absorb("rcu_torture_boost");
+ while (!kthread_should_stop() || rbi.inflight)
+ schedule_timeout_uninterruptible(1);
+ smp_mb(); /* order accesses to ->inflight before stack-frame death. */
+ return 0;
+}
+
+/*
* RCU torture force-quiescent-state kthread. Repeatedly induces
* bursts of calls to force_quiescent_state(), increasing the probability
* of occurrence of some important types of race conditions.
@@ -924,7 +1089,8 @@ rcu_torture_printk(char *page)
cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
cnt += sprintf(&page[cnt],
"rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
- "rtmbe: %d nt: %ld",
+ "rtmbe: %d rtbke: %ld rtbre: %ld rtbae: %ld rtbafe: %ld "
+ "rtbf: %ld rtb: %ld nt: %ld",
rcu_torture_current,
rcu_torture_current_version,
list_empty(&rcu_torture_freelist),
@@ -932,8 +1098,19 @@ rcu_torture_printk(char *page)
atomic_read(&n_rcu_torture_alloc_fail),
atomic_read(&n_rcu_torture_free),
atomic_read(&n_rcu_torture_mberror),
+ n_rcu_torture_boost_ktrerror,
+ n_rcu_torture_boost_rterror,
+ n_rcu_torture_boost_allocerror,
+ n_rcu_torture_boost_afferror,
+ n_rcu_torture_boost_failure,
+ n_rcu_torture_boosts,
n_rcu_torture_timers);
- if (atomic_read(&n_rcu_torture_mberror) != 0)
+ if (atomic_read(&n_rcu_torture_mberror) != 0 ||
+ n_rcu_torture_boost_ktrerror != 0 ||
+ n_rcu_torture_boost_rterror != 0 ||
+ n_rcu_torture_boost_allocerror != 0 ||
+ n_rcu_torture_boost_afferror != 0 ||
+ n_rcu_torture_boost_failure != 0)
cnt += sprintf(&page[cnt], " !!!");
cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
if (i > 1) {
@@ -1085,22 +1262,89 @@ rcu_torture_stutter(void *arg)
}
static inline void
-rcu_torture_print_module_parms(char *tag)
+rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
{
printk(KERN_ALERT "%s" TORTURE_FLAG
"--- %s: nreaders=%d nfakewriters=%d "
"stat_interval=%d verbose=%d test_no_idle_hz=%d "
"shuffle_interval=%d stutter=%d irqreader=%d "
- "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d\n",
+ "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
+ "test_boost=%d/%d test_boost_interval=%d "
+ "test_boost_duration=%d\n",
torture_type, tag, nrealreaders, nfakewriters,
stat_interval, verbose, test_no_idle_hz, shuffle_interval,
- stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter);
+ stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
+ test_boost, cur_ops->can_boost,
+ test_boost_interval, test_boost_duration);
}
-static struct notifier_block rcutorture_nb = {
+static struct notifier_block rcutorture_shutdown_nb = {
.notifier_call = rcutorture_shutdown_notify,
};
+static void rcutorture_booster_cleanup(int cpu)
+{
+ struct task_struct *t;
+
+ if (boost_tasks[cpu] == NULL)
+ return;
+ mutex_lock(&boost_mutex);
+ VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task");
+ t = boost_tasks[cpu];
+ boost_tasks[cpu] = NULL;
+ mutex_unlock(&boost_mutex);
+
+ /* This must be outside of the mutex, otherwise deadlock! */
+ kthread_stop(t);
+}
+
+static int rcutorture_booster_init(int cpu)
+{
+ int retval;
+
+ if (boost_tasks[cpu] != NULL)
+ return 0; /* Already created, nothing more to do. */
+
+ /* Don't allow time recalculation while creating a new task. */
+ mutex_lock(&boost_mutex);
+ VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
+ boost_tasks[cpu] = kthread_run(rcu_torture_boost, (void *)(long)cpu,
+ "rcu_torture_boost");
+ if (IS_ERR(boost_tasks[cpu])) {
+ retval = PTR_ERR(boost_tasks[cpu]);
+ VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
+ n_rcu_torture_boost_ktrerror++;
+ boost_tasks[cpu] = NULL;
+ mutex_unlock(&boost_mutex);
+ return retval;
+ }
+ mutex_unlock(&boost_mutex);
+ return 0;
+}
+
+static int rcutorture_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ (void)rcutorture_booster_init(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ rcutorture_booster_cleanup(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rcutorture_cpu_nb = {
+ .notifier_call = rcutorture_cpu_notify,
+};
+
static void
rcu_torture_cleanup(void)
{
@@ -1118,7 +1362,7 @@ rcu_torture_cleanup(void)
}
fullstop = FULLSTOP_RMMOD;
mutex_unlock(&fullstop_mutex);
- unregister_reboot_notifier(&rcutorture_nb);
+ unregister_reboot_notifier(&rcutorture_shutdown_nb);
if (stutter_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
kthread_stop(stutter_task);
@@ -1175,6 +1419,12 @@ rcu_torture_cleanup(void)
kthread_stop(fqs_task);
}
fqs_task = NULL;
+ if ((test_boost == 1 && cur_ops->can_boost) ||
+ test_boost == 2) {
+ unregister_cpu_notifier(&rcutorture_cpu_nb);
+ for (i = 0; i < NR_CPUS; i++)
+ rcutorture_booster_cleanup(i);
+ }
/* Wait for all RCU callbacks to fire. */
@@ -1186,9 +1436,9 @@ rcu_torture_cleanup(void)
if (cur_ops->cleanup)
cur_ops->cleanup();
if (atomic_read(&n_rcu_torture_error))
- rcu_torture_print_module_parms("End of test: FAILURE");
+ rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
else
- rcu_torture_print_module_parms("End of test: SUCCESS");
+ rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
}
static int __init
@@ -1233,7 +1483,7 @@ rcu_torture_init(void)
nrealreaders = nreaders;
else
nrealreaders = 2 * num_online_cpus();
- rcu_torture_print_module_parms("Start of test");
+ rcu_torture_print_module_parms(cur_ops, "Start of test");
fullstop = FULLSTOP_DONTSTOP;
/* Set up the freelist. */
@@ -1254,6 +1504,12 @@ rcu_torture_init(void)
atomic_set(&n_rcu_torture_free, 0);
atomic_set(&n_rcu_torture_mberror, 0);
atomic_set(&n_rcu_torture_error, 0);
+ n_rcu_torture_boost_ktrerror = 0;
+ n_rcu_torture_boost_rterror = 0;
+ n_rcu_torture_boost_allocerror = 0;
+ n_rcu_torture_boost_afferror = 0;
+ n_rcu_torture_boost_failure = 0;
+ n_rcu_torture_boosts = 0;
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
atomic_set(&rcu_torture_wcount[i], 0);
for_each_possible_cpu(cpu) {
@@ -1367,7 +1623,27 @@ rcu_torture_init(void)
goto unwind;
}
}
- register_reboot_notifier(&rcutorture_nb);
+ if (test_boost_interval < 1)
+ test_boost_interval = 1;
+ if (test_boost_duration < 2)
+ test_boost_duration = 2;
+ if ((test_boost == 1 && cur_ops->can_boost) ||
+ test_boost == 2) {
+ int retval;
+
+ boost_starttime = jiffies + test_boost_interval * HZ;
+ register_cpu_notifier(&rcutorture_cpu_nb);
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_is_offline(cpu))
+ continue; /* Heuristic: CPU can go offline. */
+ retval = rcutorture_booster_init(cpu);
+ if (retval < 0) {
+ firsterr = retval;
+ goto unwind;
+ }
+ }
+ }
+ register_reboot_notifier(&rcutorture_shutdown_nb);
mutex_unlock(&fullstop_mutex);
return 0;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index d5bc439..42140a8 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -143,6 +143,11 @@ module_param(blimit, int, 0);
module_param(qhimark, int, 0);
module_param(qlowmark, int, 0);
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+int rcu_cpu_stall_suppress __read_mostly = RCU_CPU_STALL_SUPPRESS_INIT;
+module_param(rcu_cpu_stall_suppress, int, 0644);
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
static int rcu_pending(int cpu);
@@ -450,7 +455,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
-int rcu_cpu_stall_panicking __read_mostly;
+int rcu_cpu_stall_suppress __read_mostly;
static void record_gp_stall_check_time(struct rcu_state *rsp)
{
@@ -482,8 +487,11 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
rcu_print_task_stall(rnp);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- /* OK, time to rat on our buddy... */
-
+ /*
+ * OK, time to rat on our buddy...
+ * See Documentation/RCU/stallwarn.txt for info on how to debug
+ * RCU CPU stall warnings.
+ */
printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {",
rsp->name);
rcu_for_each_leaf_node(rsp, rnp) {
@@ -512,6 +520,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
unsigned long flags;
struct rcu_node *rnp = rcu_get_root(rsp);
+ /*
+ * OK, time to rat on ourselves...
+ * See Documentation/RCU/stallwarn.txt for info on how to debug
+ * RCU CPU stall warnings.
+ */
printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n",
rsp->name, smp_processor_id(), jiffies - rsp->gp_start);
trigger_all_cpu_backtrace();
@@ -530,7 +543,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
long delta;
struct rcu_node *rnp;
- if (rcu_cpu_stall_panicking)
+ if (rcu_cpu_stall_suppress)
return;
delta = jiffies - rsp->jiffies_stall;
rnp = rdp->mynode;
@@ -548,10 +561,26 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
{
- rcu_cpu_stall_panicking = 1;
+ rcu_cpu_stall_suppress = 1;
return NOTIFY_DONE;
}
+/**
+ * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
+ *
+ * Set the stall-warning timeout way off into the future, thus preventing
+ * any RCU CPU stall-warning messages from appearing in the current set of
+ * RCU grace periods.
+ *
+ * The caller must disable hard irqs.
+ */
+void rcu_cpu_stall_reset(void)
+{
+ rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2;
+ rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
+ rcu_preempt_stall_reset();
+}
+
static struct notifier_block rcu_panic_block = {
.notifier_call = rcu_panic,
};
@@ -571,6 +600,10 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
{
}
+void rcu_cpu_stall_reset(void)
+{
+}
+
static void __init check_cpu_stall_init(void)
{
}
@@ -712,7 +745,7 @@ static void
rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
__releases(rcu_get_root(rsp)->lock)
{
- struct rcu_data *rdp = rsp->rda[smp_processor_id()];
+ struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
struct rcu_node *rnp = rcu_get_root(rsp);
if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) {
@@ -960,7 +993,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
{
int i;
- struct rcu_data *rdp = rsp->rda[smp_processor_id()];
+ struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
if (rdp->nxtlist == NULL)
return; /* irqs disabled, so comparison is stable. */
@@ -984,7 +1017,7 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
struct rcu_data *rdp;
raw_spin_lock_irqsave(&rsp->onofflock, flags);
- rdp = rsp->rda[smp_processor_id()];
+ rdp = this_cpu_ptr(rsp->rda);
if (rsp->orphan_cbs_list == NULL) {
raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
return;
@@ -1007,7 +1040,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
unsigned long flags;
unsigned long mask;
int need_report = 0;
- struct rcu_data *rdp = rsp->rda[cpu];
+ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp;
/* Exclude any attempts to start a new grace period. */
@@ -1226,7 +1259,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
cpu = rnp->grplo;
bit = 1;
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
- if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
+ if ((rnp->qsmask & bit) != 0 &&
+ f(per_cpu_ptr(rsp->rda, cpu)))
mask |= bit;
}
if (mask != 0) {
@@ -1402,7 +1436,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
* a quiescent state betweentimes.
*/
local_irq_save(flags);
- rdp = rsp->rda[smp_processor_id()];
+ rdp = this_cpu_ptr(rsp->rda);
rcu_process_gp_end(rsp, rdp);
check_for_new_grace_period(rsp, rdp);
@@ -1701,7 +1735,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
{
unsigned long flags;
int i;
- struct rcu_data *rdp = rsp->rda[cpu];
+ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp = rcu_get_root(rsp);
/* Set up local state, ensuring consistent view of global state. */
@@ -1729,7 +1763,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
{
unsigned long flags;
unsigned long mask;
- struct rcu_data *rdp = rsp->rda[cpu];
+ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp = rcu_get_root(rsp);
/* Set up local state, ensuring consistent view of global state. */
@@ -1865,7 +1899,8 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
/*
* Helper function for rcu_init() that initializes one rcu_state structure.
*/
-static void __init rcu_init_one(struct rcu_state *rsp)
+static void __init rcu_init_one(struct rcu_state *rsp,
+ struct rcu_data __percpu *rda)
{
static char *buf[] = { "rcu_node_level_0",
"rcu_node_level_1",
@@ -1918,37 +1953,23 @@ static void __init rcu_init_one(struct rcu_state *rsp)
}
}
+ rsp->rda = rda;
rnp = rsp->level[NUM_RCU_LVLS - 1];
for_each_possible_cpu(i) {
while (i > rnp->grphi)
rnp++;
- rsp->rda[i]->mynode = rnp;
+ per_cpu_ptr(rsp->rda, i)->mynode = rnp;
rcu_boot_init_percpu_data(i, rsp);
}
}
-/*
- * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used
- * nowhere else! Assigns leaf node pointers into each CPU's rcu_data
- * structure.
- */
-#define RCU_INIT_FLAVOR(rsp, rcu_data) \
-do { \
- int i; \
- \
- for_each_possible_cpu(i) { \
- (rsp)->rda[i] = &per_cpu(rcu_data, i); \
- } \
- rcu_init_one(rsp); \
-} while (0)
-
void __init rcu_init(void)
{
int cpu;
rcu_bootup_announce();
- RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
- RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
+ rcu_init_one(&rcu_sched_state, &rcu_sched_data);
+ rcu_init_one(&rcu_bh_state, &rcu_bh_data);
__rcu_init_preempt();
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 14c040b..7918ba6 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -254,19 +254,23 @@ struct rcu_data {
#define RCU_STALL_DELAY_DELTA 0
#endif
-#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ + RCU_STALL_DELAY_DELTA)
+#define RCU_SECONDS_TILL_STALL_CHECK (CONFIG_RCU_CPU_STALL_TIMEOUT * HZ + \
+ RCU_STALL_DELAY_DELTA)
/* for rsp->jiffies_stall */
-#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ + RCU_STALL_DELAY_DELTA)
+#define RCU_SECONDS_TILL_STALL_RECHECK (3 * RCU_SECONDS_TILL_STALL_CHECK + 30)
/* for rsp->jiffies_stall */
#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
/* to take at least one */
/* scheduling clock irq */
/* before ratting on them. */
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR_RUNNABLE
+#define RCU_CPU_STALL_SUPPRESS_INIT 0
+#else
+#define RCU_CPU_STALL_SUPPRESS_INIT 1
+#endif
-#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
-#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
/*
* RCU global state, including node hierarchy. This hierarchy is
@@ -283,7 +287,7 @@ struct rcu_state {
struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
- struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
+ struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
/* The following fields are guarded by the root rcu_node's lock. */
@@ -365,6 +369,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
static void rcu_print_detail_task_stall(struct rcu_state *rsp);
static void rcu_print_task_stall(struct rcu_node *rnp);
+static void rcu_preempt_stall_reset(void);
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 0e4f420..71a4147 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -57,7 +57,7 @@ static void __init rcu_bootup_announce_oddness(void)
printk(KERN_INFO
"\tRCU-based detection of stalled CPUs is disabled.\n");
#endif
-#ifndef CONFIG_RCU_CPU_STALL_VERBOSE
+#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
#endif
#if NUM_RCU_LVL_4 != 0
@@ -154,7 +154,7 @@ static void rcu_preempt_note_context_switch(int cpu)
(t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
/* Possibly blocking in an RCU read-side critical section. */
- rdp = rcu_preempt_state.rda[cpu];
+ rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags);
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
@@ -201,7 +201,7 @@ static void rcu_preempt_note_context_switch(int cpu)
*/
void __rcu_read_lock(void)
{
- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
+ current->rcu_read_lock_nesting++;
barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
}
EXPORT_SYMBOL_GPL(__rcu_read_lock);
@@ -344,7 +344,9 @@ void __rcu_read_unlock(void)
struct task_struct *t = current;
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
+ --t->rcu_read_lock_nesting;
+ barrier(); /* decrement before load of ->rcu_read_unlock_special */
+ if (t->rcu_read_lock_nesting == 0 &&
unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
rcu_read_unlock_special(t);
#ifdef CONFIG_PROVE_LOCKING
@@ -417,6 +419,16 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
}
}
+/*
+ * Suppress preemptible RCU's CPU stall warnings by pushing the
+ * time of the next stall-warning message comfortably far into the
+ * future.
+ */
+static void rcu_preempt_stall_reset(void)
+{
+ rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
+}
+
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
/*
@@ -546,9 +558,11 @@ EXPORT_SYMBOL_GPL(call_rcu);
*
* Control will return to the caller some time after a full grace
* period has elapsed, in other words after all currently executing RCU
- * read-side critical sections have completed. RCU read-side critical
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
- * and may be nested.
+ * read-side critical sections have completed. Note, however, that
+ * upon return from synchronize_rcu(), the caller might well be executing
+ * concurrently with new RCU read-side critical sections that began while
+ * synchronize_rcu() was waiting. RCU read-side critical sections are
+ * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
*/
void synchronize_rcu(void)
{
@@ -771,7 +785,7 @@ static void rcu_preempt_send_cbs_to_orphanage(void)
*/
static void __init __rcu_init_preempt(void)
{
- RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
+ rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
}
/*
@@ -865,6 +879,14 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
{
}
+/*
+ * Because preemptible RCU does not exist, there is no need to suppress
+ * its CPU stall warnings.
+ */
+static void rcu_preempt_stall_reset(void)
+{
+}
+
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
/*
@@ -919,15 +941,6 @@ static void rcu_preempt_process_callbacks(void)
}
/*
- * In classic RCU, call_rcu() is just call_rcu_sched().
- */
-void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
-{
- call_rcu_sched(head, func);
-}
-EXPORT_SYMBOL_GPL(call_rcu);
-
-/*
* Wait for an rcu-preempt grace period, but make it happen quickly.
* But because preemptable RCU does not exist, map to rcu-sched.
*/
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 36c95b4..458e032 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -262,7 +262,7 @@ static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp)
struct rcu_data *rdp;
for_each_possible_cpu(cpu) {
- rdp = rsp->rda[cpu];
+ rdp = per_cpu_ptr(rsp->rda, cpu);
if (rdp->beenonline)
print_one_rcu_pending(m, rdp);
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 41541d7..febe5d3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4903,6 +4903,7 @@ out_put_task:
put_online_cpus();
return retval;
}
+EXPORT_SYMBOL_GPL(sched_setaffinity);
static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
struct cpumask *new_mask)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1b4afd2..52c2172 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -539,6 +539,19 @@ config PROVE_RCU_REPEATEDLY
disabling, allowing multiple RCU-lockdep warnings to be printed
on a single reboot.
+config SPARSE_RCU_POINTER
+ bool "RCU debugging: sparse-based checks for pointer usage"
+ default n
+ help
+ This feature enables the __rcu sparse annotation for
+ RCU-protected pointers. This annotation will cause sparse
+ to flag any non-RCU used of annotated pointers. This can be
+ helpful when debugging RCU usage. Please note that this feature
+ is not intended to enforce code cleanliness; it is instead merely
+ a debugging aid.
+
+ Say Y to make sparse flag questionable use of RCU-protected pointers
+
Say N if you are unsure.
config LOCKDEP
@@ -832,6 +845,30 @@ config RCU_CPU_STALL_DETECTOR
Say Y if you are unsure.
+config RCU_CPU_STALL_TIMEOUT
+ int "RCU CPU stall timeout in seconds"
+ depends on RCU_CPU_STALL_DETECTOR
+ range 3 300
+ default 60
+ help
+ If a given RCU grace period extends more than the specified
+ number of seconds, a CPU stall warning is printed. If the
+ RCU grace period persists, additional CPU stall warnings are
+ printed at more widely spaced intervals.
+
+config RCU_CPU_STALL_DETECTOR_RUNNABLE
+ bool "RCU CPU stall checking starts automatically at boot"
+ depends on RCU_CPU_STALL_DETECTOR
+ default y
+ help
+ If set, start checking for RCU CPU stalls immediately on
+ boot. Otherwise, RCU CPU stall checking must be manually
+ enabled.
+
+ Say Y if you are unsure.
+
+ Say N if you wish to suppress RCU CPU stall checking during boot.
+
config RCU_CPU_STALL_VERBOSE
bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 5b7d462..0ccbcdf 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -49,7 +49,7 @@ struct radix_tree_node {
unsigned int height; /* Height from the bottom */
unsigned int count;
struct rcu_head rcu_head;
- void *slots[RADIX_TREE_MAP_SIZE];
+ void __rcu *slots[RADIX_TREE_MAP_SIZE];
unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
};
diff --git a/mm/slab.c b/mm/slab.c
index fcae981..88435fc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2330,8 +2330,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
- && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
- cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
+ && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
+ cachep->obj_offset += PAGE_SIZE - size;
size = PAGE_SIZE;
}
#endif
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 8c8632d..957c924 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -38,7 +38,7 @@ static DEFINE_SPINLOCK(nf_nat_lock);
static struct nf_conntrack_l3proto *l3proto __read_mostly;
#define MAX_IP_NAT_PROTO 256
-static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
+static const struct nf_nat_protocol __rcu *nf_nat_protos[MAX_IP_NAT_PROTO]
__read_mostly;
static inline const struct nf_nat_protocol *
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 78b505d..fdaec7d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -27,7 +27,7 @@
static DEFINE_MUTEX(afinfo_mutex);
-const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
+const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
EXPORT_SYMBOL(nf_afinfo);
int nf_register_afinfo(const struct nf_afinfo *afinfo)
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index cdcc764..5702de3 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -26,10 +26,10 @@
static DEFINE_MUTEX(nf_ct_ecache_mutex);
-struct nf_ct_event_notifier *nf_conntrack_event_cb __read_mostly;
+struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
-struct nf_exp_event_notifier *nf_expect_event_cb __read_mostly;
+struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
EXPORT_SYMBOL_GPL(nf_expect_event_cb);
/* deliver cached events and clear cache entry - must be called with locally
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 7dcf7a4..1d9bdae 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -16,7 +16,7 @@
#include
#include
-static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM];
+static struct nf_ct_ext_type __rcu *nf_ct_ext_types[NF_CT_EXT_NUM];
static DEFINE_MUTEX(nf_ct_ext_type_mutex);
void __nf_ct_ext_destroy(struct nf_conn *ct)
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 5886ba1..ed6d929 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -28,8 +28,8 @@
#include
#include
-static struct nf_conntrack_l4proto **nf_ct_protos[PF_MAX] __read_mostly;
-struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX] __read_mostly;
+static struct nf_conntrack_l4proto __rcu **nf_ct_protos[PF_MAX] __read_mostly;
+struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX] __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_l3protos);
static DEFINE_MUTEX(nf_ct_proto_mutex);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 7df37fd..b07393e 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -16,7 +16,7 @@
#define NF_LOG_PREFIXLEN 128
#define NFLOGGER_NAME_LEN 64
-static const struct nf_logger *nf_loggers[NFPROTO_NUMPROTO] __read_mostly;
+static const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO] __read_mostly;
static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly;
static DEFINE_MUTEX(nf_log_mutex);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 78b3cf9..74aebed 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -18,7 +18,7 @@
* long term mutex. The handler must provide an an outfn() to accept packets
* for queueing and must reinject all packets it receives, no matter what.
*/
-static const struct nf_queue_handler *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
+static const struct nf_queue_handler __rcu *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
static DEFINE_MUTEX(queue_handler_mutex);