1
0
Fork 0

isci: retire scic_sds_ and scic_ prefixes

The distinction between scic_sds_ scic_ and sci_ are no longer relevant
so just unify the prefixes on sci_.  The distinction between isci_ and
sci_ is historically significant, and useful for comparing the old
'core' to the current Linux driver. 'sci_' represents the former core as
well as the routines that are closer to the hardware and protocol than
their 'isci_' brethren. sci == sas controller interface.

Also unwind the 'sds1' out of the parameter structs.

Reported-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
hifive-unleashed-5.1
Dan Williams 2011-06-30 19:14:33 -07:00
parent d9dcb4ba79
commit 89a7301f21
23 changed files with 1449 additions and 1943 deletions

File diff suppressed because it is too large Load Diff

View File

@ -69,12 +69,12 @@ struct scu_task_context;
/**
* struct scic_power_control -
* struct sci_power_control -
*
* This structure defines the fields for managing power control for direct
* attached disk devices.
*/
struct scic_power_control {
struct sci_power_control {
/**
* This field is set when the power control timer is running and cleared when
* it is not.
@ -99,18 +99,18 @@ struct scic_power_control {
/**
* This field is an array of phys that we are waiting on. The phys are direct
* mapped into requesters via struct scic_sds_phy.phy_index
* mapped into requesters via struct sci_phy.phy_index
*/
struct isci_phy *requesters[SCI_MAX_PHYS];
};
struct scic_sds_port_configuration_agent;
struct sci_port_configuration_agent;
typedef void (*port_config_fn)(struct isci_host *,
struct scic_sds_port_configuration_agent *,
struct sci_port_configuration_agent *,
struct isci_port *, struct isci_phy *);
struct scic_sds_port_configuration_agent {
struct sci_port_configuration_agent {
u16 phy_configured_mask;
u16 phy_ready_mask;
struct {
@ -149,13 +149,13 @@ struct isci_host {
/* XXX can we time this externally */
struct sci_timer timer;
/* XXX drop reference module params directly */
union scic_user_parameters user_parameters;
struct sci_user_parameters user_parameters;
/* XXX no need to be a union */
union scic_oem_parameters oem_parameters;
struct scic_sds_port_configuration_agent port_agent;
struct sci_oem_params oem_parameters;
struct sci_port_configuration_agent port_agent;
struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
struct scic_remote_node_table available_remote_nodes;
struct scic_power_control power_control;
struct sci_remote_node_table available_remote_nodes;
struct sci_power_control power_control;
u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
struct scu_task_context *task_context_table;
dma_addr_t task_context_dma;
@ -165,7 +165,7 @@ struct isci_host {
u32 logical_port_entries;
u32 remote_node_entries;
u32 task_context_entries;
struct scic_sds_unsolicited_frame_control uf_control;
struct sci_unsolicited_frame_control uf_control;
/* phy startup */
struct sci_timer phy_timer;
@ -206,10 +206,10 @@ struct isci_host {
};
/**
* enum scic_sds_controller_states - This enumeration depicts all the states
* enum sci_controller_states - This enumeration depicts all the states
* for the common controller state machine.
*/
enum scic_sds_controller_states {
enum sci_controller_states {
/**
* Simply the initial state for the base controller state machine.
*/
@ -360,14 +360,14 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
}
/**
* scic_sds_controller_get_protocol_engine_group() -
* sci_controller_get_protocol_engine_group() -
*
* This macro returns the protocol engine group for this controller object.
* Presently we only support protocol engine group 0 so just return that
*/
#define scic_sds_controller_get_protocol_engine_group(controller) 0
#define sci_controller_get_protocol_engine_group(controller) 0
/* see scic_controller_io_tag_allocate|free for how seq and tci are built */
/* see sci_controller_io_tag_allocate|free for how seq and tci are built */
#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
/* these are returned by the hardware, so sanitize them */
@ -375,7 +375,7 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
/* expander attached sata devices require 3 rnc slots */
static inline int scic_sds_remote_device_node_count(struct isci_remote_device *idev)
static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
{
struct domain_device *dev = idev->domain_dev;
@ -386,23 +386,23 @@ static inline int scic_sds_remote_device_node_count(struct isci_remote_device *i
}
/**
* scic_sds_controller_set_invalid_phy() -
* sci_controller_set_invalid_phy() -
*
* This macro will set the bit in the invalid phy mask for this controller
* object. This is used to control messages reported for invalid link up
* notifications.
*/
#define scic_sds_controller_set_invalid_phy(controller, phy) \
#define sci_controller_set_invalid_phy(controller, phy) \
((controller)->invalid_phy_mask |= (1 << (phy)->phy_index))
/**
* scic_sds_controller_clear_invalid_phy() -
* sci_controller_clear_invalid_phy() -
*
* This macro will clear the bit in the invalid phy mask for this controller
* object. This is used to control messages reported for invalid link up
* notifications.
*/
#define scic_sds_controller_clear_invalid_phy(controller, phy) \
#define sci_controller_clear_invalid_phy(controller, phy) \
((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
@ -460,56 +460,53 @@ static inline bool is_c0(void)
return isci_si_rev > ISCI_SI_REVB0;
}
void scic_sds_controller_post_request(struct isci_host *ihost,
void sci_controller_post_request(struct isci_host *ihost,
u32 request);
void scic_sds_controller_release_frame(struct isci_host *ihost,
void sci_controller_release_frame(struct isci_host *ihost,
u32 frame_index);
void scic_sds_controller_copy_sata_response(void *response_buffer,
void sci_controller_copy_sata_response(void *response_buffer,
void *frame_header,
void *frame_buffer);
enum sci_status scic_sds_controller_allocate_remote_node_context(struct isci_host *ihost,
enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
struct isci_remote_device *idev,
u16 *node_id);
void scic_sds_controller_free_remote_node_context(
void sci_controller_free_remote_node_context(
struct isci_host *ihost,
struct isci_remote_device *idev,
u16 node_id);
union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
struct isci_host *ihost,
u16 node_id);
struct isci_request *scic_request_by_tag(struct isci_host *ihost,
struct isci_request *sci_request_by_tag(struct isci_host *ihost,
u16 io_tag);
void scic_sds_controller_power_control_queue_insert(
void sci_controller_power_control_queue_insert(
struct isci_host *ihost,
struct isci_phy *iphy);
void scic_sds_controller_power_control_queue_remove(
void sci_controller_power_control_queue_remove(
struct isci_host *ihost,
struct isci_phy *iphy);
void scic_sds_controller_link_up(
void sci_controller_link_up(
struct isci_host *ihost,
struct isci_port *iport,
struct isci_phy *iphy);
void scic_sds_controller_link_down(
void sci_controller_link_down(
struct isci_host *ihost,
struct isci_port *iport,
struct isci_phy *iphy);
void scic_sds_controller_remote_device_stopped(
void sci_controller_remote_device_stopped(
struct isci_host *ihost,
struct isci_remote_device *idev);
void scic_sds_controller_copy_task_context(
void sci_controller_copy_task_context(
struct isci_host *ihost,
struct isci_request *ireq);
void scic_sds_controller_register_setup(struct isci_host *ihost);
void sci_controller_register_setup(struct isci_host *ihost);
enum sci_status scic_controller_continue_io(struct isci_request *ireq);
enum sci_status sci_controller_continue_io(struct isci_request *ireq);
int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
void isci_host_scan_start(struct Scsi_Host *);
u16 isci_alloc_tag(struct isci_host *ihost);
@ -536,33 +533,33 @@ void isci_host_remote_device_start_complete(
struct isci_remote_device *,
enum sci_status);
void scic_controller_disable_interrupts(
void sci_controller_disable_interrupts(
struct isci_host *ihost);
enum sci_status scic_controller_start_io(
enum sci_status sci_controller_start_io(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
enum sci_task_status scic_controller_start_task(
enum sci_task_status sci_controller_start_task(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
enum sci_status scic_controller_terminate_request(
enum sci_status sci_controller_terminate_request(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
enum sci_status scic_controller_complete_io(
enum sci_status sci_controller_complete_io(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
void scic_sds_port_configuration_agent_construct(
struct scic_sds_port_configuration_agent *port_agent);
void sci_port_configuration_agent_construct(
struct sci_port_configuration_agent *port_agent);
enum sci_status scic_sds_port_configuration_agent_initialize(
enum sci_status sci_port_configuration_agent_initialize(
struct isci_host *ihost,
struct scic_sds_port_configuration_agent *port_agent);
struct sci_port_configuration_agent *port_agent);
#endif

View File

@ -484,7 +484,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
orom = isci_request_oprom(pdev);
for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
if (scic_oem_parameters_validate(&orom->ctrl[i])) {
if (sci_oem_parameters_validate(&orom->ctrl[i])) {
dev_warn(&pdev->dev,
"[%d]: invalid oem parameters detected, falling back to firmware\n", i);
devm_kfree(&pdev->dev, orom);
@ -554,7 +554,7 @@ static void __devexit isci_pci_remove(struct pci_dev *pdev)
for_each_isci_host(i, ihost, pdev) {
isci_unregister(ihost);
isci_host_deinit(ihost);
scic_controller_disable_interrupts(ihost);
sci_controller_disable_interrupts(ihost);
}
}

View File

@ -304,7 +304,7 @@ enum sci_status {
* This member indicates that the operation failed, the failure is
* controller implementation specific, and the response data associated
* with the request is not valid. You can query for the controller
* specific error information via scic_controller_get_request_status()
* specific error information via sci_controller_get_request_status()
*/
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
@ -395,7 +395,7 @@ enum sci_status {
/**
* This value indicates that an unsupported PCI device ID has been
* specified. This indicates that attempts to invoke
* scic_library_allocate_controller() will fail.
* sci_library_allocate_controller() will fail.
*/
SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID
@ -493,7 +493,7 @@ irqreturn_t isci_error_isr(int vec, void *data);
/*
* Each timer is associated with a cancellation flag that is set when
* del_timer() is called and checked in the timer callback function. This
* is needed since del_timer_sync() cannot be called with scic_lock held.
* is needed since del_timer_sync() cannot be called with sci_lock held.
* For deinit however, del_timer_sync() is used without holding the lock.
*/
struct sci_timer {

View File

@ -67,25 +67,13 @@ enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
return iphy->max_negotiated_speed;
}
/*
* *****************************************************************************
* * SCIC SDS PHY Internal Methods
* ***************************************************************************** */
/**
* This method will initialize the phy transport layer registers
* @sci_phy:
* @transport_layer_registers
*
* enum sci_status
*/
static enum sci_status scic_sds_phy_transport_layer_initialization(
struct isci_phy *iphy,
struct scu_transport_layer_registers __iomem *transport_layer_registers)
static enum sci_status
sci_phy_transport_layer_initialization(struct isci_phy *iphy,
struct scu_transport_layer_registers __iomem *reg)
{
u32 tl_control;
iphy->transport_layer_registers = transport_layer_registers;
iphy->transport_layer_registers = reg;
writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX,
&iphy->transport_layer_registers->stp_rni);
@ -101,32 +89,23 @@ static enum sci_status scic_sds_phy_transport_layer_initialization(
return SCI_SUCCESS;
}
/**
* This method will initialize the phy link layer registers
* @sci_phy:
* @link_layer_registers:
*
* enum sci_status
*/
static enum sci_status
scic_sds_phy_link_layer_initialization(struct isci_phy *iphy,
struct scu_link_layer_registers __iomem *link_layer_registers)
sci_phy_link_layer_initialization(struct isci_phy *iphy,
struct scu_link_layer_registers __iomem *reg)
{
struct isci_host *ihost =
iphy->owning_port->owning_controller;
struct isci_host *ihost = iphy->owning_port->owning_controller;
int phy_idx = iphy->phy_index;
struct sci_phy_user_params *phy_user =
&ihost->user_parameters.sds1.phys[phy_idx];
struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
struct sci_phy_oem_params *phy_oem =
&ihost->oem_parameters.sds1.phys[phy_idx];
&ihost->oem_parameters.phys[phy_idx];
u32 phy_configuration;
struct scic_phy_cap phy_cap;
struct sci_phy_cap phy_cap;
u32 parity_check = 0;
u32 parity_count = 0;
u32 llctl, link_rate;
u32 clksm_value = 0;
iphy->link_layer_registers = link_layer_registers;
iphy->link_layer_registers = reg;
/* Set our IDENTIFY frame data */
#define SCI_END_DEVICE 0x01
@ -169,7 +148,7 @@ scic_sds_phy_link_layer_initialization(struct isci_phy *iphy,
phy_cap.gen3_no_ssc = 1;
phy_cap.gen2_no_ssc = 1;
phy_cap.gen1_no_ssc = 1;
if (ihost->oem_parameters.sds1.controller.do_enable_ssc == true) {
if (ihost->oem_parameters.controller.do_enable_ssc == true) {
phy_cap.gen3_ssc = 1;
phy_cap.gen2_ssc = 1;
phy_cap.gen1_ssc = 1;
@ -216,7 +195,7 @@ scic_sds_phy_link_layer_initialization(struct isci_phy *iphy,
&iphy->link_layer_registers->afe_lookup_table_control);
llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
(u8)ihost->user_parameters.sds1.no_outbound_task_timeout);
(u8)ihost->user_parameters.no_outbound_task_timeout);
switch(phy_user->max_speed_generation) {
case SCIC_SDS_PARM_GEN3_SPEED:
@ -289,7 +268,7 @@ done:
struct isci_port *phy_get_non_dummy_port(
struct isci_phy *iphy)
{
if (scic_sds_port_get_index(iphy->owning_port) == SCIC_SDS_DUMMY_PORT)
if (sci_port_get_index(iphy->owning_port) == SCIC_SDS_DUMMY_PORT)
return NULL;
return iphy->owning_port;
@ -302,7 +281,7 @@ struct isci_port *phy_get_non_dummy_port(
*
*
*/
void scic_sds_phy_set_port(
void sci_phy_set_port(
struct isci_phy *iphy,
struct isci_port *iport)
{
@ -310,33 +289,23 @@ void scic_sds_phy_set_port(
if (iphy->bcn_received_while_port_unassigned) {
iphy->bcn_received_while_port_unassigned = false;
scic_sds_port_broadcast_change_received(iphy->owning_port, iphy);
sci_port_broadcast_change_received(iphy->owning_port, iphy);
}
}
/**
* This method will initialize the constructed phy
* @sci_phy:
* @link_layer_registers:
*
* enum sci_status
*/
enum sci_status scic_sds_phy_initialize(
struct isci_phy *iphy,
struct scu_transport_layer_registers __iomem *transport_layer_registers,
struct scu_link_layer_registers __iomem *link_layer_registers)
enum sci_status sci_phy_initialize(struct isci_phy *iphy,
struct scu_transport_layer_registers __iomem *tl,
struct scu_link_layer_registers __iomem *ll)
{
/* Perfrom the initialization of the TL hardware */
scic_sds_phy_transport_layer_initialization(
iphy,
transport_layer_registers);
sci_phy_transport_layer_initialization(iphy, tl);
/* Perofrm the initialization of the PE hardware */
scic_sds_phy_link_layer_initialization(iphy, link_layer_registers);
sci_phy_link_layer_initialization(iphy, ll);
/*
* There is nothing that needs to be done in this state just
* transition to the stopped state. */
/* There is nothing that needs to be done in this state just
* transition to the stopped state
*/
sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
return SCI_SUCCESS;
@ -351,9 +320,7 @@ enum sci_status scic_sds_phy_initialize(
* This will either be the RNi for the device or an invalid RNi if there
* is no current device assigned to the phy.
*/
void scic_sds_phy_setup_transport(
struct isci_phy *iphy,
u32 device_id)
void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id)
{
u32 tl_control;
@ -368,15 +335,7 @@ void scic_sds_phy_setup_transport(
writel(tl_control, &iphy->transport_layer_registers->control);
}
/**
*
* @sci_phy: The phy object to be suspended.
*
* This function will perform the register reads/writes to suspend the SCU
* hardware protocol engine. none
*/
static void scic_sds_phy_suspend(
struct isci_phy *iphy)
static void sci_phy_suspend(struct isci_phy *iphy)
{
u32 scu_sas_pcfg_value;
@ -386,12 +345,10 @@ static void scic_sds_phy_suspend(
writel(scu_sas_pcfg_value,
&iphy->link_layer_registers->phy_configuration);
scic_sds_phy_setup_transport(
iphy,
SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
}
void scic_sds_phy_resume(struct isci_phy *iphy)
void sci_phy_resume(struct isci_phy *iphy)
{
u32 scu_sas_pcfg_value;
@ -402,34 +359,28 @@ void scic_sds_phy_resume(struct isci_phy *iphy)
&iphy->link_layer_registers->phy_configuration);
}
void scic_sds_phy_get_sas_address(struct isci_phy *iphy,
struct sci_sas_address *sas_address)
void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
{
sas_address->high = readl(&iphy->link_layer_registers->source_sas_address_high);
sas_address->low = readl(&iphy->link_layer_registers->source_sas_address_low);
sas->high = readl(&iphy->link_layer_registers->source_sas_address_high);
sas->low = readl(&iphy->link_layer_registers->source_sas_address_low);
}
void scic_sds_phy_get_attached_sas_address(struct isci_phy *iphy,
struct sci_sas_address *sas_address)
void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
{
struct sas_identify_frame *iaf;
iaf = &iphy->frame_rcvd.iaf;
memcpy(sas_address, iaf->sas_addr, SAS_ADDR_SIZE);
memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE);
}
void scic_sds_phy_get_protocols(struct isci_phy *iphy,
struct scic_phy_proto *protocols)
void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto)
{
protocols->all =
(u16)(readl(&iphy->
link_layer_registers->transmit_identification) &
0x0000FFFF);
proto->all = readl(&iphy->link_layer_registers->transmit_identification);
}
enum sci_status scic_sds_phy_start(struct isci_phy *iphy)
enum sci_status sci_phy_start(struct isci_phy *iphy)
{
enum scic_sds_phy_states state = iphy->sm.current_state_id;
enum sci_phy_states state = iphy->sm.current_state_id;
if (state != SCI_PHY_STOPPED) {
dev_dbg(sciphy_to_dev(iphy),
@ -441,9 +392,9 @@ enum sci_status scic_sds_phy_start(struct isci_phy *iphy)
return SCI_SUCCESS;
}
enum sci_status scic_sds_phy_stop(struct isci_phy *iphy)
enum sci_status sci_phy_stop(struct isci_phy *iphy)
{
enum scic_sds_phy_states state = iphy->sm.current_state_id;
enum sci_phy_states state = iphy->sm.current_state_id;
switch (state) {
case SCI_PHY_SUB_INITIAL:
@ -467,9 +418,9 @@ enum sci_status scic_sds_phy_stop(struct isci_phy *iphy)
return SCI_SUCCESS;
}
enum sci_status scic_sds_phy_reset(struct isci_phy *iphy)
enum sci_status sci_phy_reset(struct isci_phy *iphy)
{
enum scic_sds_phy_states state = iphy->sm.current_state_id;
enum sci_phy_states state = iphy->sm.current_state_id;
if (state != SCI_PHY_READY) {
dev_dbg(sciphy_to_dev(iphy),
@ -481,9 +432,9 @@ enum sci_status scic_sds_phy_reset(struct isci_phy *iphy)
return SCI_SUCCESS;
}
enum sci_status scic_sds_phy_consume_power_handler(struct isci_phy *iphy)
enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
{
enum scic_sds_phy_states state = iphy->sm.current_state_id;
enum sci_phy_states state = iphy->sm.current_state_id;
switch (state) {
case SCI_PHY_SUB_AWAIT_SAS_POWER: {
@ -528,55 +479,37 @@ enum sci_status scic_sds_phy_consume_power_handler(struct isci_phy *iphy)
}
}
/*
* *****************************************************************************
* * SCIC SDS PHY HELPER FUNCTIONS
* ***************************************************************************** */
/**
*
* @sci_phy: The phy object that received SAS PHY DETECTED.
*
* This method continues the link training for the phy as if it were a SAS PHY
* instead of a SATA PHY. This is done because the completion queue had a SAS
* PHY DETECTED event when the state machine was expecting a SATA PHY event.
* none
*/
static void scic_sds_phy_start_sas_link_training(
struct isci_phy *iphy)
static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
{
/* continue the link training for the phy as if it were a SAS PHY
* instead of a SATA PHY. This is done because the completion queue had a SAS
* PHY DETECTED event when the state machine was expecting a SATA PHY event.
*/
u32 phy_control;
phy_control =
readl(&iphy->link_layer_registers->phy_configuration);
phy_control = readl(&iphy->link_layer_registers->phy_configuration);
phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD);
writel(phy_control,
&iphy->link_layer_registers->phy_configuration);
&iphy->link_layer_registers->phy_configuration);
sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
}
/**
*
* @sci_phy: The phy object that received a SATA SPINUP HOLD event
*
* This method continues the link training for the phy as if it were a SATA PHY
* instead of a SAS PHY. This is done because the completion queue had a SATA
* SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
*/
static void scic_sds_phy_start_sata_link_training(
struct isci_phy *iphy)
static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
{
/* This method continues the link training for the phy as if it were a SATA PHY
* instead of a SAS PHY. This is done because the completion queue had a SATA
* SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
*/
sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
}
/**
* scic_sds_phy_complete_link_training - perform processing common to
* sci_phy_complete_link_training - perform processing common to
* all protocols upon completion of link training.
* @sci_phy: This parameter specifies the phy object for which link training
* has completed.
@ -586,30 +519,28 @@ static void scic_sds_phy_start_sata_link_training(
* sub-state machine.
*
*/
static void scic_sds_phy_complete_link_training(
struct isci_phy *iphy,
enum sas_linkrate max_link_rate,
u32 next_state)
static void sci_phy_complete_link_training(struct isci_phy *iphy,
enum sas_linkrate max_link_rate,
u32 next_state)
{
iphy->max_negotiated_speed = max_link_rate;
sci_change_state(&iphy->sm, next_state);
}
enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy,
u32 event_code)
enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
{
enum scic_sds_phy_states state = iphy->sm.current_state_id;
enum sci_phy_states state = iphy->sm.current_state_id;
switch (state) {
case SCI_PHY_SUB_AWAIT_OSSP_EN:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_SAS_PHY_DETECTED:
scic_sds_phy_start_sas_link_training(iphy);
sci_phy_start_sas_link_training(iphy);
iphy->is_in_link_training = true;
break;
case SCU_EVENT_SATA_SPINUP_HOLD:
scic_sds_phy_start_sata_link_training(iphy);
sci_phy_start_sata_link_training(iphy);
iphy->is_in_link_training = true;
break;
default:
@ -630,30 +561,24 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy,
break;
case SCU_EVENT_SAS_15:
case SCU_EVENT_SAS_15_SSC:
scic_sds_phy_complete_link_training(
iphy,
SAS_LINK_RATE_1_5_GBPS,
SCI_PHY_SUB_AWAIT_IAF_UF);
sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
SCI_PHY_SUB_AWAIT_IAF_UF);
break;
case SCU_EVENT_SAS_30:
case SCU_EVENT_SAS_30_SSC:
scic_sds_phy_complete_link_training(
iphy,
SAS_LINK_RATE_3_0_GBPS,
SCI_PHY_SUB_AWAIT_IAF_UF);
sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
SCI_PHY_SUB_AWAIT_IAF_UF);
break;
case SCU_EVENT_SAS_60:
case SCU_EVENT_SAS_60_SSC:
scic_sds_phy_complete_link_training(
iphy,
SAS_LINK_RATE_6_0_GBPS,
SCI_PHY_SUB_AWAIT_IAF_UF);
sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
SCI_PHY_SUB_AWAIT_IAF_UF);
break;
case SCU_EVENT_SATA_SPINUP_HOLD:
/*
* We were doing SAS PHY link training and received a SATA PHY event
* continue OOB/SN as if this were a SATA PHY */
scic_sds_phy_start_sata_link_training(iphy);
sci_phy_start_sata_link_training(iphy);
break;
case SCU_EVENT_LINK_FAILURE:
/* Link failure change state back to the starting state */
@ -673,14 +598,14 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy,
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_SAS_PHY_DETECTED:
/* Backup the state machine */
scic_sds_phy_start_sas_link_training(iphy);
sci_phy_start_sas_link_training(iphy);
break;
case SCU_EVENT_SATA_SPINUP_HOLD:
/* We were doing SAS PHY link training and received a
* SATA PHY event continue OOB/SN as if this were a
* SATA PHY
*/
scic_sds_phy_start_sata_link_training(iphy);
sci_phy_start_sata_link_training(iphy);
break;
case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
case SCU_EVENT_LINK_FAILURE:
@ -727,7 +652,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy,
/* There has been a change in the phy type before OOB/SN for the
* SATA finished start down the SAS link traning path.
*/
scic_sds_phy_start_sas_link_training(iphy);
sci_phy_start_sas_link_training(iphy);
break;
default:
@ -760,7 +685,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy,
/* There has been a change in the phy type before OOB/SN for the
* SATA finished start down the SAS link traning path.
*/
scic_sds_phy_start_sas_link_training(iphy);
sci_phy_start_sas_link_training(iphy);
break;
default:
dev_warn(sciphy_to_dev(iphy),
@ -781,24 +706,18 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy,
break;
case SCU_EVENT_SATA_15:
case SCU_EVENT_SATA_15_SSC:
scic_sds_phy_complete_link_training(
iphy,
SAS_LINK_RATE_1_5_GBPS,
SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
break;
case SCU_EVENT_SATA_30:
case SCU_EVENT_SATA_30_SSC:
scic_sds_phy_complete_link_training(
iphy,
SAS_LINK_RATE_3_0_GBPS,
SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
break;
case SCU_EVENT_SATA_60:
case SCU_EVENT_SATA_60_SSC:
scic_sds_phy_complete_link_training(
iphy,
SAS_LINK_RATE_6_0_GBPS,
SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
break;
case SCU_EVENT_LINK_FAILURE:
/* Link failure change state back to the starting state */
@ -808,7 +727,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy,
/*
* There has been a change in the phy type before OOB/SN for the
* SATA finished start down the SAS link traning path. */
scic_sds_phy_start_sas_link_training(iphy);
sci_phy_start_sas_link_training(iphy);
break;
default:
dev_warn(sciphy_to_dev(iphy),
@ -851,7 +770,7 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy,
case SCU_EVENT_BROADCAST_CHANGE:
/* Broadcast change received. Notify the port. */
if (phy_get_non_dummy_port(iphy) != NULL)
scic_sds_port_broadcast_change_received(iphy->owning_port, iphy);
sci_port_broadcast_change_received(iphy->owning_port, iphy);
else
iphy->bcn_received_while_port_unassigned = true;
break;
@ -886,10 +805,9 @@ enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy,
}
}
enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy,
u32 frame_index)
enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
{
enum scic_sds_phy_states state = iphy->sm.current_state_id;
enum sci_phy_states state = iphy->sm.current_state_id;
struct isci_host *ihost = iphy->owning_port->owning_controller;
enum sci_status result;
unsigned long flags;
@ -899,9 +817,9 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy,
u32 *frame_words;
struct sas_identify_frame iaf;
result = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_words);
result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_words);
if (result != SCI_SUCCESS)
return result;
@ -933,15 +851,15 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy,
"unexpected frame id %x\n",
__func__, frame_index);
scic_sds_controller_release_frame(ihost, frame_index);
sci_controller_release_frame(ihost, frame_index);
return result;
}
case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: {
struct dev_to_host_fis *frame_header;
u32 *fis_frame_data;
result = scic_sds_unsolicited_frame_control_get_header(
&(scic_sds_phy_get_controller(iphy)->uf_control),
result = sci_unsolicited_frame_control_get_header(
&(sci_phy_get_controller(iphy)->uf_control),
frame_index,
(void **)&frame_header);
@ -950,14 +868,14 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy,
if ((frame_header->fis_type == FIS_REGD2H) &&
!(frame_header->status & ATA_BUSY)) {
scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&fis_frame_data);
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&fis_frame_data);
spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
scic_sds_controller_copy_sata_response(&iphy->frame_rcvd.fis,
frame_header,
fis_frame_data);
sci_controller_copy_sata_response(&iphy->frame_rcvd.fis,
frame_header,
fis_frame_data);
spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
/* got IAF we can now go to the await spinup semaphore state */
@ -971,7 +889,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy,
__func__, frame_index);
/* Regardless of the result we are done with this frame with it */
scic_sds_controller_release_frame(ihost, frame_index);
sci_controller_release_frame(ihost, frame_index);
return result;
}
@ -983,7 +901,7 @@ enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy,
}
static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
@ -991,71 +909,71 @@ static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_m
sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN);
}
static void scic_sds_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
struct isci_host *ihost = iphy->owning_port->owning_controller;
scic_sds_controller_power_control_queue_insert(ihost, iphy);
sci_controller_power_control_queue_insert(ihost, iphy);
}
static void scic_sds_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
struct isci_host *ihost = iphy->owning_port->owning_controller;
scic_sds_controller_power_control_queue_remove(ihost, iphy);
sci_controller_power_control_queue_remove(ihost, iphy);
}
static void scic_sds_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
struct isci_host *ihost = iphy->owning_port->owning_controller;
scic_sds_controller_power_control_queue_insert(ihost, iphy);
sci_controller_power_control_queue_insert(ihost, iphy);
}
static void scic_sds_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
struct isci_host *ihost = iphy->owning_port->owning_controller;
scic_sds_controller_power_control_queue_remove(ihost, iphy);
sci_controller_power_control_queue_remove(ihost, iphy);
}
static void scic_sds_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
}
static void scic_sds_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
sci_del_timer(&iphy->sata_timer);
}
static void scic_sds_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
}
static void scic_sds_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
sci_del_timer(&iphy->sata_timer);
}
static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
if (scic_sds_port_link_detected(iphy->owning_port, iphy)) {
if (sci_port_link_detected(iphy->owning_port, iphy)) {
/*
* Clear the PE suspend condition so we can actually
@ -1063,7 +981,7 @@ static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_bas
* The hardware will not respond to the XRDY until the PE
* suspend condition is cleared.
*/
scic_sds_phy_resume(iphy);
sci_phy_resume(iphy);
sci_mod_timer(&iphy->sata_timer,
SCIC_SDS_SIGNATURE_FIS_TIMEOUT);
@ -1071,14 +989,14 @@ static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_bas
iphy->is_in_link_training = false;
}
static void scic_sds_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
sci_del_timer(&iphy->sata_timer);
}
static void scic_sds_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
@ -1169,7 +1087,7 @@ static void scu_link_layer_tx_hard_reset(
&iphy->link_layer_registers->phy_configuration);
}
static void scic_sds_phy_stopped_state_enter(struct sci_base_state_machine *sm)
static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
@ -1182,12 +1100,12 @@ static void scic_sds_phy_stopped_state_enter(struct sci_base_state_machine *sm)
scu_link_layer_stop_protocol_engine(iphy);
if (iphy->sm.previous_state_id != SCI_PHY_INITIAL)
scic_sds_controller_link_down(scic_sds_phy_get_controller(iphy),
sci_controller_link_down(sci_phy_get_controller(iphy),
phy_get_non_dummy_port(iphy),
iphy);
}
static void scic_sds_phy_starting_state_enter(struct sci_base_state_machine *sm)
static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
@ -1199,31 +1117,31 @@ static void scic_sds_phy_starting_state_enter(struct sci_base_state_machine *sm)
iphy->bcn_received_while_port_unassigned = false;
if (iphy->sm.previous_state_id == SCI_PHY_READY)
scic_sds_controller_link_down(scic_sds_phy_get_controller(iphy),
sci_controller_link_down(sci_phy_get_controller(iphy),
phy_get_non_dummy_port(iphy),
iphy);
sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL);
}
static void scic_sds_phy_ready_state_enter(struct sci_base_state_machine *sm)
static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
scic_sds_controller_link_up(scic_sds_phy_get_controller(iphy),
sci_controller_link_up(sci_phy_get_controller(iphy),
phy_get_non_dummy_port(iphy),
iphy);
}
static void scic_sds_phy_ready_state_exit(struct sci_base_state_machine *sm)
static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
scic_sds_phy_suspend(iphy);
sci_phy_suspend(iphy);
}
static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm)
static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
@ -1231,7 +1149,7 @@ static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm
* the resetting state we don't notify the user regarding link up and
* link down notifications
*/
scic_sds_port_deactivate_phy(iphy->owning_port, iphy, false);
sci_port_deactivate_phy(iphy->owning_port, iphy, false);
if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
scu_link_layer_tx_hard_reset(iphy);
@ -1243,57 +1161,57 @@ static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm
}
}
static const struct sci_base_state scic_sds_phy_state_table[] = {
static const struct sci_base_state sci_phy_state_table[] = {
[SCI_PHY_INITIAL] = { },
[SCI_PHY_STOPPED] = {
.enter_state = scic_sds_phy_stopped_state_enter,
.enter_state = sci_phy_stopped_state_enter,
},
[SCI_PHY_STARTING] = {
.enter_state = scic_sds_phy_starting_state_enter,
.enter_state = sci_phy_starting_state_enter,
},
[SCI_PHY_SUB_INITIAL] = {
.enter_state = scic_sds_phy_starting_initial_substate_enter,
.enter_state = sci_phy_starting_initial_substate_enter,
},
[SCI_PHY_SUB_AWAIT_OSSP_EN] = { },
[SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { },
[SCI_PHY_SUB_AWAIT_IAF_UF] = { },
[SCI_PHY_SUB_AWAIT_SAS_POWER] = {
.enter_state = scic_sds_phy_starting_await_sas_power_substate_enter,
.exit_state = scic_sds_phy_starting_await_sas_power_substate_exit,
.enter_state = sci_phy_starting_await_sas_power_substate_enter,
.exit_state = sci_phy_starting_await_sas_power_substate_exit,
},
[SCI_PHY_SUB_AWAIT_SATA_POWER] = {
.enter_state = scic_sds_phy_starting_await_sata_power_substate_enter,
.exit_state = scic_sds_phy_starting_await_sata_power_substate_exit
.enter_state = sci_phy_starting_await_sata_power_substate_enter,
.exit_state = sci_phy_starting_await_sata_power_substate_exit
},
[SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = {
.enter_state = scic_sds_phy_starting_await_sata_phy_substate_enter,
.exit_state = scic_sds_phy_starting_await_sata_phy_substate_exit
.enter_state = sci_phy_starting_await_sata_phy_substate_enter,
.exit_state = sci_phy_starting_await_sata_phy_substate_exit
},
[SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = {
.enter_state = scic_sds_phy_starting_await_sata_speed_substate_enter,
.exit_state = scic_sds_phy_starting_await_sata_speed_substate_exit
.enter_state = sci_phy_starting_await_sata_speed_substate_enter,
.exit_state = sci_phy_starting_await_sata_speed_substate_exit
},
[SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = {
.enter_state = scic_sds_phy_starting_await_sig_fis_uf_substate_enter,
.exit_state = scic_sds_phy_starting_await_sig_fis_uf_substate_exit
.enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter,
.exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit
},
[SCI_PHY_SUB_FINAL] = {
.enter_state = scic_sds_phy_starting_final_substate_enter,
.enter_state = sci_phy_starting_final_substate_enter,
},
[SCI_PHY_READY] = {
.enter_state = scic_sds_phy_ready_state_enter,
.exit_state = scic_sds_phy_ready_state_exit,
.enter_state = sci_phy_ready_state_enter,
.exit_state = sci_phy_ready_state_exit,
},
[SCI_PHY_RESETTING] = {
.enter_state = scic_sds_phy_resetting_state_enter,
.enter_state = sci_phy_resetting_state_enter,
},
[SCI_PHY_FINAL] = { },
};
void scic_sds_phy_construct(struct isci_phy *iphy,
void sci_phy_construct(struct isci_phy *iphy,
struct isci_port *iport, u8 phy_index)
{
sci_init_sm(&iphy->sm, scic_sds_phy_state_table, SCI_PHY_INITIAL);
sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL);
/* Copy the rest of the input data to our locals */
iphy->owning_port = iport;
@ -1309,14 +1227,13 @@ void scic_sds_phy_construct(struct isci_phy *iphy,
void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
{
union scic_oem_parameters oem;
struct sci_oem_params *oem = &ihost->oem_parameters;
u64 sci_sas_addr;
__be64 sas_addr;
scic_oem_parameters_get(ihost, &oem);
sci_sas_addr = oem.sds1.phys[index].sas_address.high;
sci_sas_addr = oem->phys[index].sas_address.high;
sci_sas_addr <<= 32;
sci_sas_addr |= oem.sds1.phys[index].sas_address.low;
sci_sas_addr |= oem->phys[index].sas_address.low;
sas_addr = cpu_to_be64(sci_sas_addr);
memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
@ -1365,14 +1282,14 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
switch (func) {
case PHY_FUNC_DISABLE:
spin_lock_irqsave(&ihost->scic_lock, flags);
scic_sds_phy_stop(iphy);
sci_phy_stop(iphy);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
break;
case PHY_FUNC_LINK_RESET:
spin_lock_irqsave(&ihost->scic_lock, flags);
scic_sds_phy_stop(iphy);
scic_sds_phy_start(iphy);
sci_phy_stop(iphy);
sci_phy_start(iphy);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
break;

View File

@ -76,7 +76,7 @@
*/
#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250
enum scic_sds_phy_protocol {
enum sci_phy_protocol {
SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
SCIC_SDS_PHY_PROTOCOL_SAS,
SCIC_SDS_PHY_PROTOCOL_SATA,
@ -95,7 +95,7 @@ struct isci_phy {
struct sci_base_state_machine sm;
struct isci_port *owning_port;
enum sas_linkrate max_negotiated_speed;
enum scic_sds_phy_protocol protocol;
enum sci_phy_protocol protocol;
u8 phy_index;
bool bcn_received_while_port_unassigned;
bool is_in_link_training;
@ -118,7 +118,7 @@ static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy)
return iphy;
}
struct scic_phy_cap {
struct sci_phy_cap {
union {
struct {
/*
@ -147,7 +147,7 @@ struct scic_phy_cap {
} __packed;
/* this data structure reflects the link layer transmit identification reg */
struct scic_phy_proto {
struct sci_phy_proto {
union {
struct {
u16 _r_a:1;
@ -167,12 +167,12 @@ struct scic_phy_proto {
/**
* struct scic_phy_properties - This structure defines the properties common to
* struct sci_phy_properties - This structure defines the properties common to
* all phys that can be retrieved.
*
*
*/
struct scic_phy_properties {
struct sci_phy_properties {
/**
* This field specifies the port that currently contains the
* supplied phy. This field may be set to NULL
@ -194,12 +194,12 @@ struct scic_phy_properties {
};
/**
* struct scic_sas_phy_properties - This structure defines the properties,
* struct sci_sas_phy_properties - This structure defines the properties,
* specific to a SAS phy, that can be retrieved.
*
*
*/
struct scic_sas_phy_properties {
struct sci_sas_phy_properties {
/**
* This field delineates the Identify Address Frame received
* from the remote end point.
@ -210,17 +210,17 @@ struct scic_sas_phy_properties {
* This field delineates the Phy capabilities structure received
* from the remote end point.
*/
struct scic_phy_cap rcvd_cap;
struct sci_phy_cap rcvd_cap;
};
/**
* struct scic_sata_phy_properties - This structure defines the properties,
* struct sci_sata_phy_properties - This structure defines the properties,
* specific to a SATA phy, that can be retrieved.
*
*
*/
struct scic_sata_phy_properties {
struct sci_sata_phy_properties {
/**
* This field delineates the signature FIS received from the
* attached target.
@ -236,12 +236,12 @@ struct scic_sata_phy_properties {
};
/**
* enum scic_phy_counter_id - This enumeration depicts the various pieces of
* enum sci_phy_counter_id - This enumeration depicts the various pieces of
* optional information that can be retrieved for a specific phy.
*
*
*/
enum scic_phy_counter_id {
enum sci_phy_counter_id {
/**
* This PHY information field tracks the number of frames received.
*/
@ -344,7 +344,7 @@ enum scic_phy_counter_id {
SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
};
enum scic_sds_phy_states {
enum sci_phy_states {
/**
* Simply the initial state for the base domain state machine.
*/
@ -441,77 +441,77 @@ enum scic_sds_phy_states {
};
/**
* scic_sds_phy_get_index() -
* sci_phy_get_index() -
*
* This macro returns the phy index for the specified phy
*/
#define scic_sds_phy_get_index(phy) \
#define sci_phy_get_index(phy) \
((phy)->phy_index)
/**
* scic_sds_phy_get_controller() - This macro returns the controller for this
* sci_phy_get_controller() - This macro returns the controller for this
* phy
*
*
*/
#define scic_sds_phy_get_controller(phy) \
(scic_sds_port_get_controller((phy)->owning_port))
#define sci_phy_get_controller(phy) \
(sci_port_get_controller((phy)->owning_port))
void scic_sds_phy_construct(
void sci_phy_construct(
struct isci_phy *iphy,
struct isci_port *iport,
u8 phy_index);
struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy);
void scic_sds_phy_set_port(
void sci_phy_set_port(
struct isci_phy *iphy,
struct isci_port *iport);
enum sci_status scic_sds_phy_initialize(
enum sci_status sci_phy_initialize(
struct isci_phy *iphy,
struct scu_transport_layer_registers __iomem *transport_layer_registers,
struct scu_link_layer_registers __iomem *link_layer_registers);
enum sci_status scic_sds_phy_start(
enum sci_status sci_phy_start(
struct isci_phy *iphy);
enum sci_status scic_sds_phy_stop(
enum sci_status sci_phy_stop(
struct isci_phy *iphy);
enum sci_status scic_sds_phy_reset(
enum sci_status sci_phy_reset(
struct isci_phy *iphy);
void scic_sds_phy_resume(
void sci_phy_resume(
struct isci_phy *iphy);
void scic_sds_phy_setup_transport(
void sci_phy_setup_transport(
struct isci_phy *iphy,
u32 device_id);
enum sci_status scic_sds_phy_event_handler(
enum sci_status sci_phy_event_handler(
struct isci_phy *iphy,
u32 event_code);
enum sci_status scic_sds_phy_frame_handler(
enum sci_status sci_phy_frame_handler(
struct isci_phy *iphy,
u32 frame_index);
enum sci_status scic_sds_phy_consume_power_handler(
enum sci_status sci_phy_consume_power_handler(
struct isci_phy *iphy);
void scic_sds_phy_get_sas_address(
void sci_phy_get_sas_address(
struct isci_phy *iphy,
struct sci_sas_address *sas_address);
void scic_sds_phy_get_attached_sas_address(
void sci_phy_get_attached_sas_address(
struct isci_phy *iphy,
struct sci_sas_address *sas_address);
struct scic_phy_proto;
void scic_sds_phy_get_protocols(
struct sci_phy_proto;
void sci_phy_get_protocols(
struct isci_phy *iphy,
struct scic_phy_proto *protocols);
struct sci_phy_proto *protocols);
enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy);
struct isci_host;

File diff suppressed because it is too large Load Diff

View File

@ -123,7 +123,7 @@ struct isci_port {
struct scu_viit_entry __iomem *viit_registers;
};
enum scic_port_not_ready_reason_code {
enum sci_port_not_ready_reason_code {
SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS,
SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED,
SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION,
@ -132,25 +132,25 @@ enum scic_port_not_ready_reason_code {
SCIC_PORT_NOT_READY_REASON_CODE_MAX
};
struct scic_port_end_point_properties {
struct sci_port_end_point_properties {
struct sci_sas_address sas_address;
struct scic_phy_proto protocols;
struct sci_phy_proto protocols;
};
struct scic_port_properties {
struct sci_port_properties {
u32 index;
struct scic_port_end_point_properties local;
struct scic_port_end_point_properties remote;
struct sci_port_end_point_properties local;
struct sci_port_end_point_properties remote;
u32 phy_mask;
};
/**
* enum scic_sds_port_states - This enumeration depicts all the states for the
* enum sci_port_states - This enumeration depicts all the states for the
* common port state machine.
*
*
*/
enum scic_sds_port_states {
enum sci_port_states {
/**
* This state indicates that the port has successfully been stopped.
* In this state no new IO operations are permitted.
@ -211,23 +211,23 @@ enum scic_sds_port_states {
};
/**
* scic_sds_port_get_controller() -
* sci_port_get_controller() -
*
* Helper macro to get the owning controller of this port
*/
#define scic_sds_port_get_controller(this_port) \
#define sci_port_get_controller(this_port) \
((this_port)->owning_controller)
/**
* scic_sds_port_get_index() -
* sci_port_get_index() -
*
* This macro returns the physical port index for this port object
*/
#define scic_sds_port_get_index(this_port) \
#define sci_port_get_index(this_port) \
((this_port)->physical_port_index)
static inline void scic_sds_port_decrement_request_count(struct isci_port *iport)
static inline void sci_port_decrement_request_count(struct isci_port *iport)
{
if (WARN_ONCE(iport->started_request_count == 0,
"%s: tried to decrement started_request_count past 0!?",
@ -237,79 +237,73 @@ static inline void scic_sds_port_decrement_request_count(struct isci_port *iport
iport->started_request_count--;
}
#define scic_sds_port_active_phy(port, phy) \
#define sci_port_active_phy(port, phy) \
(((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0)
void scic_sds_port_construct(
void sci_port_construct(
struct isci_port *iport,
u8 port_index,
struct isci_host *ihost);
enum sci_status scic_sds_port_initialize(
struct isci_port *iport,
void __iomem *port_task_scheduler_registers,
void __iomem *port_configuration_regsiter,
void __iomem *viit_registers);
enum sci_status sci_port_start(struct isci_port *iport);
enum sci_status sci_port_stop(struct isci_port *iport);
enum sci_status scic_sds_port_start(struct isci_port *iport);
enum sci_status scic_sds_port_stop(struct isci_port *iport);
enum sci_status scic_sds_port_add_phy(
enum sci_status sci_port_add_phy(
struct isci_port *iport,
struct isci_phy *iphy);
enum sci_status scic_sds_port_remove_phy(
enum sci_status sci_port_remove_phy(
struct isci_port *iport,
struct isci_phy *iphy);
void scic_sds_port_setup_transports(
void sci_port_setup_transports(
struct isci_port *iport,
u32 device_id);
void isci_port_bcn_enable(struct isci_host *, struct isci_port *);
void scic_sds_port_deactivate_phy(
void sci_port_deactivate_phy(
struct isci_port *iport,
struct isci_phy *iphy,
bool do_notify_user);
bool scic_sds_port_link_detected(
bool sci_port_link_detected(
struct isci_port *iport,
struct isci_phy *iphy);
enum sci_status scic_sds_port_link_up(struct isci_port *iport,
enum sci_status sci_port_link_up(struct isci_port *iport,
struct isci_phy *iphy);
enum sci_status scic_sds_port_link_down(struct isci_port *iport,
enum sci_status sci_port_link_down(struct isci_port *iport,
struct isci_phy *iphy);
struct isci_request;
struct isci_remote_device;
enum sci_status scic_sds_port_start_io(
enum sci_status sci_port_start_io(
struct isci_port *iport,
struct isci_remote_device *idev,
struct isci_request *ireq);
enum sci_status scic_sds_port_complete_io(
enum sci_status sci_port_complete_io(
struct isci_port *iport,
struct isci_remote_device *idev,
struct isci_request *ireq);
enum sas_linkrate scic_sds_port_get_max_allowed_speed(
enum sas_linkrate sci_port_get_max_allowed_speed(
struct isci_port *iport);
void scic_sds_port_broadcast_change_received(
void sci_port_broadcast_change_received(
struct isci_port *iport,
struct isci_phy *iphy);
bool scic_sds_port_is_valid_phy_assignment(
bool sci_port_is_valid_phy_assignment(
struct isci_port *iport,
u32 phy_index);
void scic_sds_port_get_sas_address(
void sci_port_get_sas_address(
struct isci_port *iport,
struct sci_sas_address *sas_address);
void scic_sds_port_get_attached_sas_address(
void sci_port_get_attached_sas_address(
struct isci_port *iport,
struct sci_sas_address *sas_address);

View File

@ -112,7 +112,7 @@ static s32 sci_sas_address_compare(
* port. port address if the port can be found to match the phy.
* NULL if there is no matching port for the phy.
*/
static struct isci_port *scic_sds_port_configuration_agent_find_port(
static struct isci_port *sci_port_configuration_agent_find_port(
struct isci_host *ihost,
struct isci_phy *iphy)
{
@ -127,14 +127,14 @@ static struct isci_port *scic_sds_port_configuration_agent_find_port(
* more phys match the sent and received SAS address as this phy in which
* case it should participate in the same port.
*/
scic_sds_phy_get_sas_address(iphy, &phy_sas_address);
scic_sds_phy_get_attached_sas_address(iphy, &phy_attached_device_address);
sci_phy_get_sas_address(iphy, &phy_sas_address);
sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address);
for (i = 0; i < ihost->logical_port_entries; i++) {
struct isci_port *iport = &ihost->ports[i];
scic_sds_port_get_sas_address(iport, &port_sas_address);
scic_sds_port_get_attached_sas_address(iport, &port_attached_device_address);
sci_port_get_sas_address(iport, &port_sas_address);
sci_port_get_attached_sas_address(iport, &port_attached_device_address);
if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 &&
sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0)
@ -156,9 +156,9 @@ static struct isci_port *scic_sds_port_configuration_agent_find_port(
* this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION
* the port configuration is not valid for this port configuration agent.
*/
static enum sci_status scic_sds_port_configuration_agent_validate_ports(
static enum sci_status sci_port_configuration_agent_validate_ports(
struct isci_host *ihost,
struct scic_sds_port_configuration_agent *port_agent)
struct sci_port_configuration_agent *port_agent)
{
struct sci_sas_address first_address;
struct sci_sas_address second_address;
@ -194,8 +194,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports(
* PE0 and PE3 can never have the same SAS Address unless they
* are part of the same x4 wide port and we have already checked
* for this condition. */
scic_sds_phy_get_sas_address(&ihost->phys[0], &first_address);
scic_sds_phy_get_sas_address(&ihost->phys[3], &second_address);
sci_phy_get_sas_address(&ihost->phys[0], &first_address);
sci_phy_get_sas_address(&ihost->phys[3], &second_address);
if (sci_sas_address_compare(first_address, second_address) == 0) {
return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
@ -207,8 +207,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports(
* part of the same port. */
if (port_agent->phy_valid_port_range[0].min_index == 0 &&
port_agent->phy_valid_port_range[1].min_index == 1) {
scic_sds_phy_get_sas_address(&ihost->phys[0], &first_address);
scic_sds_phy_get_sas_address(&ihost->phys[2], &second_address);
sci_phy_get_sas_address(&ihost->phys[0], &first_address);
sci_phy_get_sas_address(&ihost->phys[2], &second_address);
if (sci_sas_address_compare(first_address, second_address) == 0) {
return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
@ -221,8 +221,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports(
* part of the same port. */
if (port_agent->phy_valid_port_range[2].min_index == 2 &&
port_agent->phy_valid_port_range[3].min_index == 3) {
scic_sds_phy_get_sas_address(&ihost->phys[1], &first_address);
scic_sds_phy_get_sas_address(&ihost->phys[3], &second_address);
sci_phy_get_sas_address(&ihost->phys[1], &first_address);
sci_phy_get_sas_address(&ihost->phys[3], &second_address);
if (sci_sas_address_compare(first_address, second_address) == 0) {
return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
@ -239,8 +239,8 @@ static enum sci_status scic_sds_port_configuration_agent_validate_ports(
/* verify all of the phys in the same port are using the same SAS address */
static enum sci_status
scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
struct scic_sds_port_configuration_agent *port_agent)
sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
struct sci_port_configuration_agent *port_agent)
{
u32 phy_mask;
u32 assigned_phy_mask;
@ -254,7 +254,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
sas_address.low = 0;
for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) {
phy_mask = ihost->oem_parameters.sds1.ports[port_index].phy_mask;
phy_mask = ihost->oem_parameters.ports[port_index].phy_mask;
if (!phy_mask)
continue;
@ -269,7 +269,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
if ((phy_mask & (1 << phy_index)) == 0)
continue;
scic_sds_phy_get_sas_address(&ihost->phys[phy_index],
sci_phy_get_sas_address(&ihost->phys[phy_index],
&sas_address);
/*
@ -294,7 +294,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
while (phy_index < SCI_MAX_PHYS) {
if ((phy_mask & (1 << phy_index)) == 0)
continue;
scic_sds_phy_get_sas_address(&ihost->phys[phy_index],
sci_phy_get_sas_address(&ihost->phys[phy_index],
&phy_assigned_address);
if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) {
@ -307,7 +307,7 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
port_agent->phy_valid_port_range[phy_index].min_index = port_index;
port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
scic_sds_port_add_phy(&ihost->ports[port_index],
sci_port_add_phy(&ihost->ports[port_index],
&ihost->phys[phy_index]);
assigned_phy_mask |= (1 << phy_index);
@ -316,14 +316,14 @@ scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
phy_index++;
}
return scic_sds_port_configuration_agent_validate_ports(ihost, port_agent);
return sci_port_configuration_agent_validate_ports(ihost, port_agent);
}
static void mpc_agent_timeout(unsigned long data)
{
u8 index;
struct sci_timer *tmr = (struct sci_timer *)data;
struct scic_sds_port_configuration_agent *port_agent;
struct sci_port_configuration_agent *port_agent;
struct isci_host *ihost;
unsigned long flags;
u16 configure_phy_mask;
@ -355,8 +355,8 @@ done:
spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
static void scic_sds_mpc_agent_link_up(struct isci_host *ihost,
struct scic_sds_port_configuration_agent *port_agent,
static void sci_mpc_agent_link_up(struct isci_host *ihost,
struct sci_port_configuration_agent *port_agent,
struct isci_port *iport,
struct isci_phy *iphy)
{
@ -367,10 +367,10 @@ static void scic_sds_mpc_agent_link_up(struct isci_host *ihost,
if (!iport)
return;
port_agent->phy_ready_mask |= (1 << scic_sds_phy_get_index(iphy));
scic_sds_port_link_up(iport, iphy);
if ((iport->active_phy_mask & (1 << scic_sds_phy_get_index(iphy))))
port_agent->phy_configured_mask |= (1 << scic_sds_phy_get_index(iphy));
port_agent->phy_ready_mask |= (1 << sci_phy_get_index(iphy));
sci_port_link_up(iport, iphy);
if ((iport->active_phy_mask & (1 << sci_phy_get_index(iphy))))
port_agent->phy_configured_mask |= (1 << sci_phy_get_index(iphy));
}
/**
@ -390,9 +390,9 @@ static void scic_sds_mpc_agent_link_up(struct isci_host *ihost,
* not associated with a port there is no action taken. Is it possible to get a
* link down notification from a phy that has no assocoated port?
*/
static void scic_sds_mpc_agent_link_down(
static void sci_mpc_agent_link_down(
struct isci_host *ihost,
struct scic_sds_port_configuration_agent *port_agent,
struct sci_port_configuration_agent *port_agent,
struct isci_port *iport,
struct isci_phy *iphy)
{
@ -405,9 +405,9 @@ static void scic_sds_mpc_agent_link_down(
* state.
*/
port_agent->phy_ready_mask &=
~(1 << scic_sds_phy_get_index(iphy));
~(1 << sci_phy_get_index(iphy));
port_agent->phy_configured_mask &=
~(1 << scic_sds_phy_get_index(iphy));
~(1 << sci_phy_get_index(iphy));
/*
* Check to see if there are more phys waiting to be
@ -424,7 +424,7 @@ static void scic_sds_mpc_agent_link_down(
SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT);
}
scic_sds_port_link_down(iport, iphy);
sci_port_link_down(iport, iphy);
}
}
@ -432,8 +432,8 @@ static void scic_sds_mpc_agent_link_down(
* configuration mode.
*/
static enum sci_status
scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost,
struct scic_sds_port_configuration_agent *port_agent)
sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
struct sci_port_configuration_agent *port_agent)
{
u8 phy_index;
u8 port_index;
@ -446,11 +446,11 @@ scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost,
port_index = phy_index;
/* Get the assigned SAS Address for the first PHY on the controller. */
scic_sds_phy_get_sas_address(&ihost->phys[phy_index],
sci_phy_get_sas_address(&ihost->phys[phy_index],
&sas_address);
while (++phy_index < SCI_MAX_PHYS) {
scic_sds_phy_get_sas_address(&ihost->phys[phy_index],
sci_phy_get_sas_address(&ihost->phys[phy_index],
&phy_assigned_address);
/* Verify each of the SAS address are all the same for every PHY */
@ -465,11 +465,11 @@ scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost,
}
}
return scic_sds_port_configuration_agent_validate_ports(ihost, port_agent);
return sci_port_configuration_agent_validate_ports(ihost, port_agent);
}
static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost,
struct scic_sds_port_configuration_agent *port_agent,
static void sci_apc_agent_configure_ports(struct isci_host *ihost,
struct sci_port_configuration_agent *port_agent,
struct isci_phy *iphy,
bool start_timer)
{
@ -478,10 +478,10 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost,
struct isci_port *iport;
enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY;
iport = scic_sds_port_configuration_agent_find_port(ihost, iphy);
iport = sci_port_configuration_agent_find_port(ihost, iphy);
if (iport) {
if (scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index))
if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index))
apc_activity = SCIC_SDS_APC_ADD_PHY;
else
apc_activity = SCIC_SDS_APC_SKIP_PHY;
@ -499,7 +499,7 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost,
iport = &ihost->ports[port_index];
/* First we must make sure that this PHY can be added to this Port. */
if (scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
/*
* Port contains a PHY with a greater PHY ID than the current
* PHY that has gone link up. This phy can not be part of any
@ -559,7 +559,7 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost,
switch (apc_activity) {
case SCIC_SDS_APC_ADD_PHY:
status = scic_sds_port_add_phy(iport, iphy);
status = sci_port_add_phy(iport, iphy);
if (status == SCI_SUCCESS) {
port_agent->phy_configured_mask |= (1 << iphy->phy_index);
@ -588,7 +588,7 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost,
}
/**
* scic_sds_apc_agent_link_up - handle apc link up events
* sci_apc_agent_link_up - handle apc link up events
* @scic: This is the controller object that receives the link up
* notification.
* @sci_port: This is the port object associated with the phy. If the is no
@ -599,8 +599,8 @@ static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost,
* notifications. Is it possible to get a link down notification from a phy
* that has no assocoated port?
*/
static void scic_sds_apc_agent_link_up(struct isci_host *ihost,
struct scic_sds_port_configuration_agent *port_agent,
static void sci_apc_agent_link_up(struct isci_host *ihost,
struct sci_port_configuration_agent *port_agent,
struct isci_port *iport,
struct isci_phy *iphy)
{
@ -609,7 +609,7 @@ static void scic_sds_apc_agent_link_up(struct isci_host *ihost,
if (!iport) {
/* the phy is not the part of this port */
port_agent->phy_ready_mask |= 1 << phy_index;
scic_sds_apc_agent_configure_ports(ihost, port_agent, iphy, true);
sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
} else {
/* the phy is already the part of the port */
u32 port_state = iport->sm.current_state_id;
@ -620,7 +620,7 @@ static void scic_sds_apc_agent_link_up(struct isci_host *ihost,
*/
BUG_ON(port_state != SCI_PORT_RESETTING);
port_agent->phy_ready_mask |= 1 << phy_index;
scic_sds_port_link_up(iport, iphy);
sci_port_link_up(iport, iphy);
}
}
@ -637,20 +637,20 @@ static void scic_sds_apc_agent_link_up(struct isci_host *ihost,
* possible to get a link down notification from a phy that has no assocoated
* port?
*/
static void scic_sds_apc_agent_link_down(
static void sci_apc_agent_link_down(
struct isci_host *ihost,
struct scic_sds_port_configuration_agent *port_agent,
struct sci_port_configuration_agent *port_agent,
struct isci_port *iport,
struct isci_phy *iphy)
{
port_agent->phy_ready_mask &= ~(1 << scic_sds_phy_get_index(iphy));
port_agent->phy_ready_mask &= ~(1 << sci_phy_get_index(iphy));
if (!iport)
return;
if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) {
enum sci_status status;
status = scic_sds_port_remove_phy(iport, iphy);
status = sci_port_remove_phy(iport, iphy);
if (status == SCI_SUCCESS)
port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
@ -662,7 +662,7 @@ static void apc_agent_timeout(unsigned long data)
{
u32 index;
struct sci_timer *tmr = (struct sci_timer *)data;
struct scic_sds_port_configuration_agent *port_agent;
struct sci_port_configuration_agent *port_agent;
struct isci_host *ihost;
unsigned long flags;
u16 configure_phy_mask;
@ -686,7 +686,7 @@ static void apc_agent_timeout(unsigned long data)
if ((configure_phy_mask & (1 << index)) == 0)
continue;
scic_sds_apc_agent_configure_ports(ihost, port_agent,
sci_apc_agent_configure_ports(ihost, port_agent,
&ihost->phys[index], false);
}
@ -706,8 +706,8 @@ done:
* call is universal for both manual port configuration and automatic port
* configuration modes.
*/
void scic_sds_port_configuration_agent_construct(
struct scic_sds_port_configuration_agent *port_agent)
void sci_port_configuration_agent_construct(
struct sci_port_configuration_agent *port_agent)
{
u32 index;
@ -725,29 +725,29 @@ void scic_sds_port_configuration_agent_construct(
}
}
enum sci_status scic_sds_port_configuration_agent_initialize(
enum sci_status sci_port_configuration_agent_initialize(
struct isci_host *ihost,
struct scic_sds_port_configuration_agent *port_agent)
struct sci_port_configuration_agent *port_agent)
{
enum sci_status status;
enum scic_port_configuration_mode mode;
enum sci_port_configuration_mode mode;
mode = ihost->oem_parameters.sds1.controller.mode_type;
mode = ihost->oem_parameters.controller.mode_type;
if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
status = scic_sds_mpc_agent_validate_phy_configuration(
status = sci_mpc_agent_validate_phy_configuration(
ihost, port_agent);
port_agent->link_up_handler = scic_sds_mpc_agent_link_up;
port_agent->link_down_handler = scic_sds_mpc_agent_link_down;
port_agent->link_up_handler = sci_mpc_agent_link_up;
port_agent->link_down_handler = sci_mpc_agent_link_down;
sci_init_timer(&port_agent->timer, mpc_agent_timeout);
} else {
status = scic_sds_apc_agent_validate_phy_configuration(
status = sci_apc_agent_validate_phy_configuration(
ihost, port_agent);
port_agent->link_up_handler = scic_sds_apc_agent_link_up;
port_agent->link_down_handler = scic_sds_apc_agent_link_down;
port_agent->link_up_handler = sci_apc_agent_link_up;
port_agent->link_down_handler = sci_apc_agent_link_down;
sci_init_timer(&port_agent->timer, apc_agent_timeout);
}

View File

@ -111,25 +111,15 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
return rom;
}
/**
* isci_parse_oem_parameters() - This method will take OEM parameters
* from the module init parameters and copy them to oem_params. This will
* only copy values that are not set to the module parameter default values
* @oem_parameters: This parameter specifies the controller default OEM
* parameters. It is expected that this has been initialized to the default
* parameters for the controller
*
*
*/
enum sci_status isci_parse_oem_parameters(union scic_oem_parameters *oem_params,
enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
struct isci_orom *orom, int scu_index)
{
/* check for valid inputs */
if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
scu_index > orom->hdr.num_elements || !oem_params)
scu_index > orom->hdr.num_elements || !oem)
return -EINVAL;
oem_params->sds1 = orom->ctrl[scu_index];
*oem = orom->ctrl[scu_index];
return 0;
}

View File

@ -74,7 +74,7 @@
#define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED
/* parameters that can be set by module parameters */
struct scic_sds_user_parameters {
struct sci_user_parameters {
struct sci_phy_user_params {
/**
* This field specifies the NOTIFY (ENABLE SPIN UP) primitive
@ -147,30 +147,16 @@ struct scic_sds_user_parameters {
};
/* XXX kill this union */
union scic_user_parameters {
/**
* This field specifies the user parameters specific to the
* Storage Controller Unit (SCU) Driver Standard (SDS) version
* 1.
*/
struct scic_sds_user_parameters sds1;
};
#define SCIC_SDS_PARM_PHY_MASK_MIN 0x0
#define SCIC_SDS_PARM_PHY_MASK_MAX 0xF
#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
struct scic_sds_oem_params;
int scic_oem_parameters_validate(struct scic_sds_oem_params *oem);
union scic_oem_parameters;
void scic_oem_parameters_get(struct isci_host *ihost,
union scic_oem_parameters *oem);
struct sci_oem_params;
int sci_oem_parameters_validate(struct sci_oem_params *oem);
struct isci_orom;
struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
enum sci_status isci_parse_oem_parameters(union scic_oem_parameters *oem,
enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
struct isci_orom *orom, int scu_index);
struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
@ -214,7 +200,7 @@ struct isci_oem_hdr {
* A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs
* being assigned is sufficient to declare manual PORT configuration.
*/
enum scic_port_configuration_mode {
enum sci_port_configuration_mode {
SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0,
SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1
};
@ -230,7 +216,7 @@ struct sci_bios_oem_param_block_hdr {
uint8_t reserved[8];
} __attribute__ ((packed));
struct scic_sds_oem_params {
struct sci_oem_params {
struct {
uint8_t mode_type;
uint8_t max_concurrent_dev_spin_up;
@ -255,19 +241,9 @@ struct scic_sds_oem_params {
} phys[SCI_MAX_PHYS];
} __attribute__ ((packed));
/* XXX kill this union */
union scic_oem_parameters {
/**
* This field specifies the OEM parameters specific to the
* Storage Controller Unit (SCU) Driver Standard (SDS) version
* 1.
*/
struct scic_sds_oem_params sds1;
};
struct isci_orom {
struct sci_bios_oem_param_block_hdr hdr;
struct scic_sds_oem_params ctrl[SCI_MAX_CONTROLLERS];
struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS];
} __attribute__ ((packed));
#endif

View File

@ -68,7 +68,7 @@
* @isci_host: This parameter specifies the isci host object.
* @isci_device: This parameter specifies the remote device
*
* scic_lock is held on entrance to this function.
* sci_lock is held on entrance to this function.
*/
static void isci_remote_device_not_ready(struct isci_host *ihost,
struct isci_remote_device *idev, u32 reason)
@ -92,7 +92,7 @@ static void isci_remote_device_not_ready(struct isci_host *ihost,
"%s: isci_device = %p request = %p\n",
__func__, idev, ireq);
scic_controller_terminate_request(ihost,
sci_controller_terminate_request(ihost,
idev,
ireq);
}
@ -133,7 +133,7 @@ static void rnc_destruct_done(void *_dev)
sci_change_state(&idev->sm, SCI_DEV_STOPPED);
}
static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_remote_device *idev)
static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev)
{
struct isci_host *ihost = idev->owning_port->owning_controller;
enum sci_status status = SCI_SUCCESS;
@ -147,7 +147,7 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_rem
ireq->target_device != idev)
continue;
s = scic_controller_terminate_request(ihost, idev, ireq);
s = sci_controller_terminate_request(ihost, idev, ireq);
if (s != SCI_SUCCESS)
status = s;
}
@ -155,11 +155,11 @@ static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_rem
return status;
}
enum sci_status scic_remote_device_stop(struct isci_remote_device *idev,
enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
u32 timeout)
{
struct sci_base_state_machine *sm = &idev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
enum sci_remote_device_states state = sm->current_state_id;
switch (state) {
case SCI_DEV_INITIAL:
@ -174,7 +174,7 @@ enum sci_status scic_remote_device_stop(struct isci_remote_device *idev,
case SCI_DEV_STARTING:
/* device not started so there had better be no requests */
BUG_ON(idev->started_request_count != 0);
scic_sds_remote_node_context_destruct(&idev->rnc,
sci_remote_node_context_destruct(&idev->rnc,
rnc_destruct_done, idev);
/* Transition to the stopping state and wait for the
* remote node to complete being posted and invalidated.
@ -191,28 +191,28 @@ enum sci_status scic_remote_device_stop(struct isci_remote_device *idev,
case SCI_SMP_DEV_CMD:
sci_change_state(sm, SCI_DEV_STOPPING);
if (idev->started_request_count == 0) {
scic_sds_remote_node_context_destruct(&idev->rnc,
sci_remote_node_context_destruct(&idev->rnc,
rnc_destruct_done, idev);
return SCI_SUCCESS;
} else
return scic_sds_remote_device_terminate_requests(idev);
return sci_remote_device_terminate_requests(idev);
break;
case SCI_DEV_STOPPING:
/* All requests should have been terminated, but if there is an
* attempt to stop a device already in the stopping state, then
* try again to terminate.
*/
return scic_sds_remote_device_terminate_requests(idev);
return sci_remote_device_terminate_requests(idev);
case SCI_DEV_RESETTING:
sci_change_state(sm, SCI_DEV_STOPPING);
return SCI_SUCCESS;
}
}
enum sci_status scic_remote_device_reset(struct isci_remote_device *idev)
enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
{
struct sci_base_state_machine *sm = &idev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
enum sci_remote_device_states state = sm->current_state_id;
switch (state) {
case SCI_DEV_INITIAL:
@ -239,10 +239,10 @@ enum sci_status scic_remote_device_reset(struct isci_remote_device *idev)
}
}
enum sci_status scic_remote_device_reset_complete(struct isci_remote_device *idev)
enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
{
struct sci_base_state_machine *sm = &idev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
enum sci_remote_device_states state = sm->current_state_id;
if (state != SCI_DEV_RESETTING) {
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
@ -254,11 +254,11 @@ enum sci_status scic_remote_device_reset_complete(struct isci_remote_device *ide
return SCI_SUCCESS;
}
enum sci_status scic_sds_remote_device_suspend(struct isci_remote_device *idev,
enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
u32 suspend_type)
{
struct sci_base_state_machine *sm = &idev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
enum sci_remote_device_states state = sm->current_state_id;
if (state != SCI_STP_DEV_CMD) {
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
@ -266,15 +266,15 @@ enum sci_status scic_sds_remote_device_suspend(struct isci_remote_device *idev,
return SCI_FAILURE_INVALID_STATE;
}
return scic_sds_remote_node_context_suspend(&idev->rnc,
return sci_remote_node_context_suspend(&idev->rnc,
suspend_type, NULL, NULL);
}
enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device *idev,
enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
u32 frame_index)
{
struct sci_base_state_machine *sm = &idev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
enum sci_remote_device_states state = sm->current_state_id;
struct isci_host *ihost = idev->owning_port->owning_controller;
enum sci_status status;
@ -289,7 +289,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device *
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
__func__, state);
/* Return the frame back to the controller */
scic_sds_controller_release_frame(ihost, frame_index);
sci_controller_release_frame(ihost, frame_index);
return SCI_FAILURE_INVALID_STATE;
case SCI_DEV_READY:
case SCI_STP_DEV_NCQ_ERROR:
@ -302,7 +302,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device *
void *frame_header;
ssize_t word_cnt;
status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control,
status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
&frame_header);
if (status != SCI_SUCCESS)
@ -311,22 +311,22 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device *
word_cnt = sizeof(hdr) / sizeof(u32);
sci_swab32_cpy(&hdr, frame_header, word_cnt);
ireq = scic_request_by_tag(ihost, be16_to_cpu(hdr.tag));
ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
if (ireq && ireq->target_device == idev) {
/* The IO request is now in charge of releasing the frame */
status = scic_sds_io_request_frame_handler(ireq, frame_index);
status = sci_io_request_frame_handler(ireq, frame_index);
} else {
/* We could not map this tag to a valid IO
* request Just toss the frame and continue
*/
scic_sds_controller_release_frame(ihost, frame_index);
sci_controller_release_frame(ihost, frame_index);
}
break;
}
case SCI_STP_DEV_NCQ: {
struct dev_to_host_fis *hdr;
status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control,
status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&hdr);
if (status != SCI_SUCCESS)
@ -349,7 +349,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device *
} else
status = SCI_FAILURE;
scic_sds_controller_release_frame(ihost, frame_index);
sci_controller_release_frame(ihost, frame_index);
break;
}
case SCI_STP_DEV_CMD:
@ -358,7 +358,7 @@ enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device *
* in this state. All unsolicited frames are forwarded to the io request
* object.
*/
status = scic_sds_io_request_frame_handler(idev->working_request, frame_index);
status = sci_io_request_frame_handler(idev->working_request, frame_index);
break;
}
@ -369,7 +369,7 @@ static bool is_remote_device_ready(struct isci_remote_device *idev)
{
struct sci_base_state_machine *sm = &idev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
enum sci_remote_device_states state = sm->current_state_id;
switch (state) {
case SCI_DEV_READY:
@ -386,25 +386,25 @@ static bool is_remote_device_ready(struct isci_remote_device *idev)
}
}
enum sci_status scic_sds_remote_device_event_handler(struct isci_remote_device *idev,
enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
u32 event_code)
{
struct sci_base_state_machine *sm = &idev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
enum sci_remote_device_states state = sm->current_state_id;
enum sci_status status;
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TYPE_RNC_OPS_MISC:
case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
status = scic_sds_remote_node_context_event_handler(&idev->rnc, event_code);
status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
break;
case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
status = SCI_SUCCESS;
/* Suspend the associated RNC */
scic_sds_remote_node_context_suspend(&idev->rnc,
sci_remote_node_context_suspend(&idev->rnc,
SCI_SOFTWARE_SUSPENSION,
NULL, NULL);
@ -439,13 +439,13 @@ enum sci_status scic_sds_remote_device_event_handler(struct isci_remote_device *
*/
if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
status = scic_sds_remote_node_context_resume(&idev->rnc, NULL, NULL);
status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
}
return status;
}
static void scic_sds_remote_device_start_request(struct isci_remote_device *idev,
static void sci_remote_device_start_request(struct isci_remote_device *idev,
struct isci_request *ireq,
enum sci_status status)
{
@ -453,19 +453,19 @@ static void scic_sds_remote_device_start_request(struct isci_remote_device *idev
/* cleanup requests that failed after starting on the port */
if (status != SCI_SUCCESS)
scic_sds_port_complete_io(iport, idev, ireq);
sci_port_complete_io(iport, idev, ireq);
else {
kref_get(&idev->kref);
scic_sds_remote_device_increment_request_count(idev);
sci_remote_device_increment_request_count(idev);
}
}
enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost,
enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq)
{
struct sci_base_state_machine *sm = &idev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
enum sci_remote_device_states state = sm->current_state_id;
struct isci_port *iport = idev->owning_port;
enum sci_status status;
@ -488,15 +488,15 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost,
* successful it will start the request for the port object then
* increment its own request count.
*/
status = scic_sds_port_start_io(iport, idev, ireq);
status = sci_port_start_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
return status;
status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq);
status = sci_remote_node_context_start_io(&idev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
status = scic_sds_request_start(ireq);
status = sci_request_start(ireq);
break;
case SCI_STP_DEV_IDLE: {
/* handle the start io operation for a sata device that is in
@ -507,18 +507,18 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost,
* If this is a softreset we may want to have a different
* substate.
*/
enum scic_sds_remote_device_states new_state;
enum sci_remote_device_states new_state;
struct sas_task *task = isci_request_access_task(ireq);
status = scic_sds_port_start_io(iport, idev, ireq);
status = sci_port_start_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
return status;
status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq);
status = sci_remote_node_context_start_io(&idev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
status = scic_sds_request_start(ireq);
status = sci_request_start(ireq);
if (status != SCI_SUCCESS)
break;
@ -535,15 +535,15 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost,
struct sas_task *task = isci_request_access_task(ireq);
if (task->ata_task.use_ncq) {
status = scic_sds_port_start_io(iport, idev, ireq);
status = sci_port_start_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
return status;
status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq);
status = sci_remote_node_context_start_io(&idev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
status = scic_sds_request_start(ireq);
status = sci_request_start(ireq);
} else
return SCI_FAILURE_INVALID_STATE;
break;
@ -551,15 +551,15 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost,
case SCI_STP_DEV_AWAIT_RESET:
return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
case SCI_SMP_DEV_IDLE:
status = scic_sds_port_start_io(iport, idev, ireq);
status = sci_port_start_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
return status;
status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq);
status = sci_remote_node_context_start_io(&idev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
status = scic_sds_request_start(ireq);
status = sci_request_start(ireq);
if (status != SCI_SUCCESS)
break;
@ -574,7 +574,7 @@ enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost,
return SCI_FAILURE_INVALID_STATE;
}
scic_sds_remote_device_start_request(idev, ireq, status);
sci_remote_device_start_request(idev, ireq, status);
return status;
}
@ -584,24 +584,24 @@ static enum sci_status common_complete_io(struct isci_port *iport,
{
enum sci_status status;
status = scic_sds_request_complete(ireq);
status = sci_request_complete(ireq);
if (status != SCI_SUCCESS)
return status;
status = scic_sds_port_complete_io(iport, idev, ireq);
status = sci_port_complete_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
return status;
scic_sds_remote_device_decrement_request_count(idev);
sci_remote_device_decrement_request_count(idev);
return status;
}
enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost,
enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq)
{
struct sci_base_state_machine *sm = &idev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
enum sci_remote_device_states state = sm->current_state_id;
struct isci_port *iport = idev->owning_port;
enum sci_status status;
@ -636,7 +636,7 @@ enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost,
* status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
*/
sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
} else if (scic_sds_remote_device_get_request_count(idev) == 0)
} else if (sci_remote_device_get_request_count(idev) == 0)
sci_change_state(sm, SCI_STP_DEV_IDLE);
break;
case SCI_SMP_DEV_CMD:
@ -650,8 +650,8 @@ enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost,
if (status != SCI_SUCCESS)
break;
if (scic_sds_remote_device_get_request_count(idev) == 0)
scic_sds_remote_node_context_destruct(&idev->rnc,
if (sci_remote_device_get_request_count(idev) == 0)
sci_remote_node_context_destruct(&idev->rnc,
rnc_destruct_done,
idev);
break;
@ -668,21 +668,21 @@ enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost,
return status;
}
static void scic_sds_remote_device_continue_request(void *dev)
static void sci_remote_device_continue_request(void *dev)
{
struct isci_remote_device *idev = dev;
/* we need to check if this request is still valid to continue. */
if (idev->working_request)
scic_controller_continue_io(idev->working_request);
sci_controller_continue_io(idev->working_request);
}
enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost,
enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq)
{
struct sci_base_state_machine *sm = &idev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
enum sci_remote_device_states state = sm->current_state_id;
struct isci_port *iport = idev->owning_port;
enum sci_status status;
@ -705,15 +705,15 @@ enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost,
case SCI_STP_DEV_NCQ:
case SCI_STP_DEV_NCQ_ERROR:
case SCI_STP_DEV_AWAIT_RESET:
status = scic_sds_port_start_io(iport, idev, ireq);
status = sci_port_start_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
return status;
status = scic_sds_remote_node_context_start_task(&idev->rnc, ireq);
status = sci_remote_node_context_start_task(&idev->rnc, ireq);
if (status != SCI_SUCCESS)
goto out;
status = scic_sds_request_start(ireq);
status = sci_request_start(ireq);
if (status != SCI_SUCCESS)
goto out;
@ -731,32 +731,32 @@ enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost,
* the correct action when the remote node context is suspended
* and later resumed.
*/
scic_sds_remote_node_context_suspend(&idev->rnc,
sci_remote_node_context_suspend(&idev->rnc,
SCI_SOFTWARE_SUSPENSION, NULL, NULL);
scic_sds_remote_node_context_resume(&idev->rnc,
scic_sds_remote_device_continue_request,
sci_remote_node_context_resume(&idev->rnc,
sci_remote_device_continue_request,
idev);
out:
scic_sds_remote_device_start_request(idev, ireq, status);
sci_remote_device_start_request(idev, ireq, status);
/* We need to let the controller start request handler know that
* it can't post TC yet. We will provide a callback function to
* post TC when RNC gets resumed.
*/
return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
case SCI_DEV_READY:
status = scic_sds_port_start_io(iport, idev, ireq);
status = sci_port_start_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
return status;
status = scic_sds_remote_node_context_start_task(&idev->rnc, ireq);
status = sci_remote_node_context_start_task(&idev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
status = scic_sds_request_start(ireq);
status = sci_request_start(ireq);
break;
}
scic_sds_remote_device_start_request(idev, ireq, status);
sci_remote_device_start_request(idev, ireq, status);
return status;
}
@ -769,16 +769,16 @@ enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost,
* This method takes the request and bulids an appropriate SCU context for the
* request and then requests the controller to post the request. none
*/
void scic_sds_remote_device_post_request(
void sci_remote_device_post_request(
struct isci_remote_device *idev,
u32 request)
{
u32 context;
context = scic_sds_remote_device_build_command_context(idev, request);
context = sci_remote_device_build_command_context(idev, request);
scic_sds_controller_post_request(
scic_sds_remote_device_get_controller(idev),
sci_controller_post_request(
sci_remote_device_get_controller(idev),
context
);
}
@ -798,7 +798,7 @@ static void remote_device_resume_done(void *_dev)
sci_change_state(&idev->sm, SCI_DEV_READY);
}
static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
{
struct isci_remote_device *idev = _dev;
struct isci_host *ihost = idev->owning_port->owning_controller;
@ -810,7 +810,7 @@ static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handl
isci_remote_device_ready(ihost, idev);
}
static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
@ -819,7 +819,7 @@ static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_mac
}
/**
* scic_remote_device_destruct() - free remote node context and destruct
* sci_remote_device_destruct() - free remote node context and destruct
* @remote_device: This parameter specifies the remote device to be destructed.
*
* Remote device objects are a limited resource. As such, they must be
@ -831,10 +831,10 @@ static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_mac
* device isn't valid (e.g. it's already been destoryed, the handle isn't
* valid, etc.).
*/
static enum sci_status scic_remote_device_destruct(struct isci_remote_device *idev)
static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
{
struct sci_base_state_machine *sm = &idev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
enum sci_remote_device_states state = sm->current_state_id;
struct isci_host *ihost;
if (state != SCI_DEV_STOPPED) {
@ -844,7 +844,7 @@ static enum sci_status scic_remote_device_destruct(struct isci_remote_device *id
}
ihost = idev->owning_port->owning_controller;
scic_sds_controller_free_remote_node_context(ihost, idev,
sci_controller_free_remote_node_context(ihost, idev,
idev->rnc.remote_node_index);
idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
sci_change_state(sm, SCI_DEV_FINAL);
@ -869,12 +869,12 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_
* io requests in process */
BUG_ON(!list_empty(&idev->reqs_in_process));
scic_remote_device_destruct(idev);
sci_remote_device_destruct(idev);
list_del_init(&idev->node);
isci_put_device(idev);
}
static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct isci_host *ihost = idev->owning_port->owning_controller;
@ -887,19 +887,19 @@ static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_mac
if (prev_state == SCI_DEV_STOPPING)
isci_remote_device_deconstruct(ihost, idev);
scic_sds_controller_remote_device_stopped(ihost, idev);
sci_controller_remote_device_stopped(ihost, idev);
}
static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct isci_host *ihost = scic_sds_remote_device_get_controller(idev);
struct isci_host *ihost = sci_remote_device_get_controller(idev);
isci_remote_device_not_ready(ihost, idev,
SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
}
static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct isci_host *ihost = idev->owning_port->owning_controller;
@ -913,7 +913,7 @@ static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machi
isci_remote_device_ready(ihost, idev);
}
static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct domain_device *dev = idev->domain_dev;
@ -926,42 +926,42 @@ static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machin
}
}
static void scic_sds_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
scic_sds_remote_node_context_suspend(
sci_remote_node_context_suspend(
&idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
}
static void scic_sds_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
scic_sds_remote_node_context_resume(&idev->rnc, NULL, NULL);
sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
}
static void scic_sds_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
idev->working_request = NULL;
if (scic_sds_remote_node_context_is_ready(&idev->rnc)) {
if (sci_remote_node_context_is_ready(&idev->rnc)) {
/*
* Since the RNC is ready, it's alright to finish completion
* processing (e.g. signal the remote device is ready). */
scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
} else {
scic_sds_remote_node_context_resume(&idev->rnc,
scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler,
sci_remote_node_context_resume(&idev->rnc,
sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
idev);
}
}
static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct isci_host *ihost = scic_sds_remote_device_get_controller(idev);
struct isci_host *ihost = sci_remote_device_get_controller(idev);
BUG_ON(idev->working_request == NULL);
@ -969,28 +969,28 @@ static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_
SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
}
static void scic_sds_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct isci_host *ihost = scic_sds_remote_device_get_controller(idev);
struct isci_host *ihost = sci_remote_device_get_controller(idev);
if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
isci_remote_device_not_ready(ihost, idev,
idev->not_ready_reason);
}
static void scic_sds_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct isci_host *ihost = scic_sds_remote_device_get_controller(idev);
struct isci_host *ihost = sci_remote_device_get_controller(idev);
isci_remote_device_ready(ihost, idev);
}
static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct isci_host *ihost = scic_sds_remote_device_get_controller(idev);
struct isci_host *ihost = sci_remote_device_get_controller(idev);
BUG_ON(idev->working_request == NULL);
@ -998,83 +998,83 @@ static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_
SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
}
static void scic_sds_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
idev->working_request = NULL;
}
static const struct sci_base_state scic_sds_remote_device_state_table[] = {
static const struct sci_base_state sci_remote_device_state_table[] = {
[SCI_DEV_INITIAL] = {
.enter_state = scic_sds_remote_device_initial_state_enter,
.enter_state = sci_remote_device_initial_state_enter,
},
[SCI_DEV_STOPPED] = {
.enter_state = scic_sds_remote_device_stopped_state_enter,
.enter_state = sci_remote_device_stopped_state_enter,
},
[SCI_DEV_STARTING] = {
.enter_state = scic_sds_remote_device_starting_state_enter,
.enter_state = sci_remote_device_starting_state_enter,
},
[SCI_DEV_READY] = {
.enter_state = scic_sds_remote_device_ready_state_enter,
.exit_state = scic_sds_remote_device_ready_state_exit
.enter_state = sci_remote_device_ready_state_enter,
.exit_state = sci_remote_device_ready_state_exit
},
[SCI_STP_DEV_IDLE] = {
.enter_state = scic_sds_stp_remote_device_ready_idle_substate_enter,
.enter_state = sci_stp_remote_device_ready_idle_substate_enter,
},
[SCI_STP_DEV_CMD] = {
.enter_state = scic_sds_stp_remote_device_ready_cmd_substate_enter,
.enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
},
[SCI_STP_DEV_NCQ] = { },
[SCI_STP_DEV_NCQ_ERROR] = {
.enter_state = scic_sds_stp_remote_device_ready_ncq_error_substate_enter,
.enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
},
[SCI_STP_DEV_AWAIT_RESET] = { },
[SCI_SMP_DEV_IDLE] = {
.enter_state = scic_sds_smp_remote_device_ready_idle_substate_enter,
.enter_state = sci_smp_remote_device_ready_idle_substate_enter,
},
[SCI_SMP_DEV_CMD] = {
.enter_state = scic_sds_smp_remote_device_ready_cmd_substate_enter,
.exit_state = scic_sds_smp_remote_device_ready_cmd_substate_exit,
.enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
.exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
},
[SCI_DEV_STOPPING] = { },
[SCI_DEV_FAILED] = { },
[SCI_DEV_RESETTING] = {
.enter_state = scic_sds_remote_device_resetting_state_enter,
.exit_state = scic_sds_remote_device_resetting_state_exit
.enter_state = sci_remote_device_resetting_state_enter,
.exit_state = sci_remote_device_resetting_state_exit
},
[SCI_DEV_FINAL] = { },
};
/**
* scic_remote_device_construct() - common construction
* sci_remote_device_construct() - common construction
* @sci_port: SAS/SATA port through which this device is accessed.
* @sci_dev: remote device to construct
*
* This routine just performs benign initialization and does not
* allocate the remote_node_context which is left to
* scic_remote_device_[de]a_construct(). scic_remote_device_destruct()
* sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
* frees the remote_node_context(s) for the device.
*/
static void scic_remote_device_construct(struct isci_port *iport,
static void sci_remote_device_construct(struct isci_port *iport,
struct isci_remote_device *idev)
{
idev->owning_port = iport;
idev->started_request_count = 0;
sci_init_sm(&idev->sm, scic_sds_remote_device_state_table, SCI_DEV_INITIAL);
sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
scic_sds_remote_node_context_construct(&idev->rnc,
sci_remote_node_context_construct(&idev->rnc,
SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
}
/**
* scic_remote_device_da_construct() - construct direct attached device.
* sci_remote_device_da_construct() - construct direct attached device.
*
* The information (e.g. IAF, Signature FIS, etc.) necessary to build
* the device is known to the SCI Core since it is contained in the
* scic_phy object. Remote node context(s) is/are a global resource
* allocated by this routine, freed by scic_remote_device_destruct().
* sci_phy object. Remote node context(s) is/are a global resource
* allocated by this routine, freed by sci_remote_device_destruct().
*
* Returns:
* SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
@ -1082,20 +1082,20 @@ static void scic_remote_device_construct(struct isci_port *iport,
* sata-only controller instance.
* SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
*/
static enum sci_status scic_remote_device_da_construct(struct isci_port *iport,
static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
struct isci_remote_device *idev)
{
enum sci_status status;
struct domain_device *dev = idev->domain_dev;
scic_remote_device_construct(iport, idev);
sci_remote_device_construct(iport, idev);
/*
* This information is request to determine how many remote node context
* entries will be needed to store the remote node.
*/
idev->is_direct_attached = true;
status = scic_sds_controller_allocate_remote_node_context(iport->owning_controller,
status = sci_controller_allocate_remote_node_context(iport->owning_controller,
idev,
&idev->rnc.remote_node_index);
@ -1108,7 +1108,7 @@ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport,
else
return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
idev->connection_rate = scic_sds_port_get_max_allowed_speed(iport);
idev->connection_rate = sci_port_get_max_allowed_speed(iport);
/* / @todo Should I assign the port width by reading all of the phys on the port? */
idev->device_port_width = 1;
@ -1117,10 +1117,10 @@ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport,
}
/**
* scic_remote_device_ea_construct() - construct expander attached device
* sci_remote_device_ea_construct() - construct expander attached device
*
* Remote node context(s) is/are a global resource allocated by this
* routine, freed by scic_remote_device_destruct().
* routine, freed by sci_remote_device_destruct().
*
* Returns:
* SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
@ -1128,15 +1128,15 @@ static enum sci_status scic_remote_device_da_construct(struct isci_port *iport,
* sata-only controller instance.
* SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
*/
static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport,
static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
struct isci_remote_device *idev)
{
struct domain_device *dev = idev->domain_dev;
enum sci_status status;
scic_remote_device_construct(iport, idev);
sci_remote_device_construct(iport, idev);
status = scic_sds_controller_allocate_remote_node_context(iport->owning_controller,
status = sci_controller_allocate_remote_node_context(iport->owning_controller,
idev,
&idev->rnc.remote_node_index);
if (status != SCI_SUCCESS)
@ -1155,7 +1155,7 @@ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport,
* connection the logical link rate is that same as the
* physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
* one another, so this code works for both situations. */
idev->connection_rate = min_t(u16, scic_sds_port_get_max_allowed_speed(iport),
idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
dev->linkrate);
/* / @todo Should I assign the port width by reading all of the phys on the port? */
@ -1165,7 +1165,7 @@ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport,
}
/**
* scic_remote_device_start() - This method will start the supplied remote
* sci_remote_device_start() - This method will start the supplied remote
* device. This method enables normal IO requests to flow through to the
* remote device.
* @remote_device: This parameter specifies the device to be started.
@ -1177,11 +1177,11 @@ static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport,
* SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
* the device when there have been no phys added to it.
*/
static enum sci_status scic_remote_device_start(struct isci_remote_device *idev,
static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
u32 timeout)
{
struct sci_base_state_machine *sm = &idev->sm;
enum scic_sds_remote_device_states state = sm->current_state_id;
enum sci_remote_device_states state = sm->current_state_id;
enum sci_status status;
if (state != SCI_DEV_STOPPED) {
@ -1190,7 +1190,7 @@ static enum sci_status scic_remote_device_start(struct isci_remote_device *idev,
return SCI_FAILURE_INVALID_STATE;
}
status = scic_sds_remote_node_context_resume(&idev->rnc,
status = sci_remote_node_context_resume(&idev->rnc,
remote_device_resume_done,
idev);
if (status != SCI_SUCCESS)
@ -1209,9 +1209,9 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
enum sci_status status;
if (dev->parent && dev_is_expander(dev->parent))
status = scic_remote_device_ea_construct(iport, idev);
status = sci_remote_device_ea_construct(iport, idev);
else
status = scic_remote_device_da_construct(iport, idev);
status = sci_remote_device_da_construct(iport, idev);
if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
@ -1221,7 +1221,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
}
/* start the device. */
status = scic_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
if (status != SCI_SUCCESS)
dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
@ -1322,7 +1322,7 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
set_bit(IDEV_STOP_PENDING, &idev->flags);
spin_lock_irqsave(&ihost->scic_lock, flags);
status = scic_remote_device_stop(idev, 50);
status = sci_remote_device_stop(idev, 50);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
/* Wait for the stop complete callback. */

View File

@ -61,7 +61,7 @@
#include "remote_node_context.h"
#include "port.h"
enum scic_remote_device_not_ready_reason_code {
enum sci_remote_device_not_ready_reason_code {
SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED,
SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED,
SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED,
@ -97,7 +97,7 @@ struct isci_remote_device {
enum sas_linkrate connection_rate;
bool is_direct_attached;
struct isci_port *owning_port;
struct scic_sds_remote_node_context rnc;
struct sci_remote_node_context rnc;
/* XXX unify with device reference counting and delete */
u32 started_request_count;
struct isci_request *working_request;
@ -106,7 +106,7 @@ struct isci_remote_device {
#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
/* device reference routines must be called under scic_lock */
/* device reference routines must be called under sci_lock */
static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
{
struct isci_remote_device *idev = dev->lldd_dev;
@ -137,7 +137,7 @@ bool isci_device_is_reset_pending(struct isci_host *ihost,
void isci_device_clear_reset_pending(struct isci_host *ihost,
struct isci_remote_device *idev);
/**
* scic_remote_device_stop() - This method will stop both transmission and
* sci_remote_device_stop() - This method will stop both transmission and
* reception of link activity for the supplied remote device. This method
* disables normal IO requests from flowing through to the remote device.
* @remote_device: This parameter specifies the device to be stopped.
@ -148,12 +148,12 @@ void isci_device_clear_reset_pending(struct isci_host *ihost,
* This value is returned if the transmission and reception for the device was
* successfully stopped.
*/
enum sci_status scic_remote_device_stop(
enum sci_status sci_remote_device_stop(
struct isci_remote_device *idev,
u32 timeout);
/**
* scic_remote_device_reset() - This method will reset the device making it
* sci_remote_device_reset() - This method will reset the device making it
* ready for operation. This method must be called anytime the device is
* reset either through a SMP phy control or a port hard reset request.
* @remote_device: This parameter specifies the device to be reset.
@ -164,11 +164,11 @@ enum sci_status scic_remote_device_stop(
* was accepted. SCI_SUCCESS This value is returned if the device reset is
* started.
*/
enum sci_status scic_remote_device_reset(
enum sci_status sci_remote_device_reset(
struct isci_remote_device *idev);
/**
* scic_remote_device_reset_complete() - This method informs the device object
* sci_remote_device_reset_complete() - This method informs the device object
* that the reset operation is complete and the device can resume operation
* again.
* @remote_device: This parameter specifies the device which is to be informed
@ -177,18 +177,16 @@ enum sci_status scic_remote_device_reset(
* An indication that the device is resuming operation. SCI_SUCCESS the device
* is resuming operation.
*/
enum sci_status scic_remote_device_reset_complete(
enum sci_status sci_remote_device_reset_complete(
struct isci_remote_device *idev);
#define scic_remote_device_is_atapi(device_handle) false
/**
* enum scic_sds_remote_device_states - This enumeration depicts all the states
* enum sci_remote_device_states - This enumeration depicts all the states
* for the common remote device state machine.
*
*
*/
enum scic_sds_remote_device_states {
enum sci_remote_device_states {
/**
* Simply the initial state for the base remote device state machine.
*/
@ -293,7 +291,7 @@ enum scic_sds_remote_device_states {
SCI_DEV_FINAL,
};
static inline struct isci_remote_device *rnc_to_dev(struct scic_sds_remote_node_context *rnc)
static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
{
struct isci_remote_device *idev;
@ -308,122 +306,120 @@ static inline bool dev_is_expander(struct domain_device *dev)
}
/**
* scic_sds_remote_device_increment_request_count() -
* sci_remote_device_increment_request_count() -
*
* This macro incrments the request count for this device
*/
#define scic_sds_remote_device_increment_request_count(idev) \
#define sci_remote_device_increment_request_count(idev) \
((idev)->started_request_count++)
/**
* scic_sds_remote_device_decrement_request_count() -
* sci_remote_device_decrement_request_count() -
*
* This macro decrements the request count for this device. This count will
* never decrment past 0.
*/
#define scic_sds_remote_device_decrement_request_count(idev) \
#define sci_remote_device_decrement_request_count(idev) \
((idev)->started_request_count > 0 ? \
(idev)->started_request_count-- : 0)
/**
* scic_sds_remote_device_get_request_count() -
* sci_remote_device_get_request_count() -
*
* This is a helper macro to return the current device request count.
*/
#define scic_sds_remote_device_get_request_count(idev) \
#define sci_remote_device_get_request_count(idev) \
((idev)->started_request_count)
/**
* scic_sds_remote_device_get_controller() -
* sci_remote_device_get_controller() -
*
* This macro returns the controller object that contains this device object
*/
#define scic_sds_remote_device_get_controller(idev) \
scic_sds_port_get_controller(scic_sds_remote_device_get_port(idev))
#define sci_remote_device_get_controller(idev) \
sci_port_get_controller(sci_remote_device_get_port(idev))
/**
* scic_sds_remote_device_get_port() -
* sci_remote_device_get_port() -
*
* This macro returns the owning port of this device
*/
#define scic_sds_remote_device_get_port(idev) \
#define sci_remote_device_get_port(idev) \
((idev)->owning_port)
/**
* scic_sds_remote_device_get_controller_peg() -
* sci_remote_device_get_controller_peg() -
*
* This macro returns the controllers protocol engine group
*/
#define scic_sds_remote_device_get_controller_peg(idev) \
#define sci_remote_device_get_controller_peg(idev) \
(\
scic_sds_controller_get_protocol_engine_group(\
scic_sds_port_get_controller(\
scic_sds_remote_device_get_port(idev) \
sci_controller_get_protocol_engine_group(\
sci_port_get_controller(\
sci_remote_device_get_port(idev) \
) \
) \
)
/**
* scic_sds_remote_device_get_index() -
* sci_remote_device_get_index() -
*
* This macro returns the remote node index for this device object
*/
#define scic_sds_remote_device_get_index(idev) \
#define sci_remote_device_get_index(idev) \
((idev)->rnc.remote_node_index)
/**
* scic_sds_remote_device_build_command_context() -
* sci_remote_device_build_command_context() -
*
* This macro builds a remote device context for the SCU post request operation
*/
#define scic_sds_remote_device_build_command_context(device, command) \
#define sci_remote_device_build_command_context(device, command) \
((command) \
| (scic_sds_remote_device_get_controller_peg((device)) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) \
| (sci_remote_device_get_controller_peg((device)) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) \
| ((device)->owning_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) \
| (scic_sds_remote_device_get_index((device))) \
| (sci_remote_device_get_index((device))) \
)
/**
* scic_sds_remote_device_set_working_request() -
* sci_remote_device_set_working_request() -
*
* This macro makes the working request assingment for the remote device
* object. To clear the working request use this macro with a NULL request
* object.
*/
#define scic_sds_remote_device_set_working_request(device, request) \
#define sci_remote_device_set_working_request(device, request) \
((device)->working_request = (request))
enum sci_status scic_sds_remote_device_frame_handler(
enum sci_status sci_remote_device_frame_handler(
struct isci_remote_device *idev,
u32 frame_index);
enum sci_status scic_sds_remote_device_event_handler(
enum sci_status sci_remote_device_event_handler(
struct isci_remote_device *idev,
u32 event_code);
enum sci_status scic_sds_remote_device_start_io(
enum sci_status sci_remote_device_start_io(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
enum sci_status scic_sds_remote_device_start_task(
enum sci_status sci_remote_device_start_task(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
enum sci_status scic_sds_remote_device_complete_io(
enum sci_status sci_remote_device_complete_io(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
enum sci_status scic_sds_remote_device_suspend(
enum sci_status sci_remote_device_suspend(
struct isci_remote_device *idev,
u32 suspend_type);
void scic_sds_remote_device_post_request(
void sci_remote_device_post_request(
struct isci_remote_device *idev,
u32 request);
#define scic_sds_remote_device_is_atapi(idev) false
#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */

View File

@ -81,8 +81,8 @@
* otherwise it will return false bool true if the remote node context is in
* the ready state. false if the remote node context is not in the ready state.
*/
bool scic_sds_remote_node_context_is_ready(
struct scic_sds_remote_node_context *sci_rnc)
bool sci_remote_node_context_is_ready(
struct sci_remote_node_context *sci_rnc)
{
u32 current_state = sci_rnc->sm.current_state_id;
@ -93,15 +93,16 @@ bool scic_sds_remote_node_context_is_ready(
return false;
}
/**
*
* @sci_dev: The remote device to use to construct the RNC buffer.
* @rnc: The buffer into which the remote device data will be copied.
*
* This method will construct the RNC buffer for this remote device object. none
*/
static void scic_sds_remote_node_context_construct_buffer(
struct scic_sds_remote_node_context *sci_rnc)
static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
{
if (id < ihost->remote_node_entries &&
ihost->device_table[id])
return &ihost->remote_node_context_table[id];
return NULL;
}
static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
{
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
struct domain_device *dev = idev->domain_dev;
@ -110,11 +111,11 @@ static void scic_sds_remote_node_context_construct_buffer(
struct isci_host *ihost;
__le64 sas_addr;
ihost = scic_sds_remote_device_get_controller(idev);
rnc = scic_sds_controller_get_remote_node_context_buffer(ihost, rni);
ihost = sci_remote_device_get_controller(idev);
rnc = sci_rnc_by_id(ihost, rni);
memset(rnc, 0, sizeof(union scu_remote_node_context)
* scic_sds_remote_device_node_count(idev));
* sci_remote_device_node_count(idev));
rnc->ssp.remote_node_index = rni;
rnc->ssp.remote_node_port_width = idev->device_port_width;
@ -135,14 +136,14 @@ static void scic_sds_remote_node_context_construct_buffer(
if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
rnc->ssp.connection_occupancy_timeout =
ihost->user_parameters.sds1.stp_max_occupancy_timeout;
ihost->user_parameters.stp_max_occupancy_timeout;
rnc->ssp.connection_inactivity_timeout =
ihost->user_parameters.sds1.stp_inactivity_timeout;
ihost->user_parameters.stp_inactivity_timeout;
} else {
rnc->ssp.connection_occupancy_timeout =
ihost->user_parameters.sds1.ssp_max_occupancy_timeout;
ihost->user_parameters.ssp_max_occupancy_timeout;
rnc->ssp.connection_inactivity_timeout =
ihost->user_parameters.sds1.ssp_inactivity_timeout;
ihost->user_parameters.ssp_inactivity_timeout;
}
rnc->ssp.initial_arbitration_wait_time = 0;
@ -164,8 +165,8 @@ static void scic_sds_remote_node_context_construct_buffer(
* to its ready state. If the remote node context is already setup to
* transition to its final state then this function does nothing. none
*/
static void scic_sds_remote_node_context_setup_to_resume(
struct scic_sds_remote_node_context *sci_rnc,
static void sci_remote_node_context_setup_to_resume(
struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback callback,
void *callback_parameter)
{
@ -176,8 +177,8 @@ static void scic_sds_remote_node_context_setup_to_resume(
}
}
static void scic_sds_remote_node_context_setup_to_destory(
struct scic_sds_remote_node_context *sci_rnc,
static void sci_remote_node_context_setup_to_destory(
struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback callback,
void *callback_parameter)
{
@ -192,8 +193,8 @@ static void scic_sds_remote_node_context_setup_to_destory(
* This method just calls the user callback function and then resets the
* callback.
*/
static void scic_sds_remote_node_context_notify_user(
struct scic_sds_remote_node_context *rnc)
static void sci_remote_node_context_notify_user(
struct sci_remote_node_context *rnc)
{
if (rnc->user_callback != NULL) {
(*rnc->user_callback)(rnc->user_cookie);
@ -203,99 +204,80 @@ static void scic_sds_remote_node_context_notify_user(
}
}
static void scic_sds_remote_node_context_continue_state_transitions(struct scic_sds_remote_node_context *rnc)
static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
{
if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
scic_sds_remote_node_context_resume(rnc, rnc->user_callback,
sci_remote_node_context_resume(rnc, rnc->user_callback,
rnc->user_cookie);
}
/**
*
* @sci_rnc: The remote node context object that is to be validated.
*
* This method will mark the rnc buffer as being valid and post the request to
* the hardware. none
*/
static void scic_sds_remote_node_context_validate_context_buffer(
struct scic_sds_remote_node_context *sci_rnc)
static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
{
union scu_remote_node_context *rnc_buffer;
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
struct domain_device *dev = idev->domain_dev;
union scu_remote_node_context *rnc_buffer;
struct isci_host *ihost = idev->owning_port->owning_controller;
rnc_buffer = scic_sds_controller_get_remote_node_context_buffer(
scic_sds_remote_device_get_controller(idev),
sci_rnc->remote_node_index
);
rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
rnc_buffer->ssp.is_valid = true;
if (!idev->is_direct_attached &&
(dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
scic_sds_remote_device_post_request(idev,
SCU_CONTEXT_COMMAND_POST_RNC_96);
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
} else {
scic_sds_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
if (idev->is_direct_attached) {
scic_sds_port_setup_transports(idev->owning_port,
sci_rnc->remote_node_index);
}
if (idev->is_direct_attached)
sci_port_setup_transports(idev->owning_port,
sci_rnc->remote_node_index);
}
}
/**
*
* @sci_rnc: The remote node context object that is to be invalidated.
*
* This method will update the RNC buffer and post the invalidate request. none
*/
static void scic_sds_remote_node_context_invalidate_context_buffer(
struct scic_sds_remote_node_context *sci_rnc)
static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
{
union scu_remote_node_context *rnc_buffer;
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
struct isci_host *ihost = idev->owning_port->owning_controller;
rnc_buffer = scic_sds_controller_get_remote_node_context_buffer(
scic_sds_remote_device_get_controller(rnc_to_dev(sci_rnc)),
sci_rnc->remote_node_index);
rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
rnc_buffer->ssp.is_valid = false;
scic_sds_remote_device_post_request(rnc_to_dev(sci_rnc),
SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
sci_remote_device_post_request(rnc_to_dev(sci_rnc),
SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
}
static void scic_sds_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
{
struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
/* Check to see if we have gotten back to the initial state because
* someone requested to destroy the remote node context object.
*/
if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
scic_sds_remote_node_context_notify_user(rnc);
sci_remote_node_context_notify_user(rnc);
}
}
static void scic_sds_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
{
struct scic_sds_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
scic_sds_remote_node_context_validate_context_buffer(sci_rnc);
sci_remote_node_context_validate_context_buffer(sci_rnc);
}
static void scic_sds_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
{
struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
scic_sds_remote_node_context_invalidate_context_buffer(rnc);
sci_remote_node_context_invalidate_context_buffer(rnc);
}
static void scic_sds_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
{
struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
struct isci_remote_device *idev;
struct domain_device *dev;
@ -310,73 +292,73 @@ static void scic_sds_remote_node_context_resuming_state_enter(struct sci_base_st
*/
if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
idev->is_direct_attached)
scic_sds_port_setup_transports(idev->owning_port,
sci_port_setup_transports(idev->owning_port,
rnc->remote_node_index);
scic_sds_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
}
static void scic_sds_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
{
struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
if (rnc->user_callback)
scic_sds_remote_node_context_notify_user(rnc);
sci_remote_node_context_notify_user(rnc);
}
static void scic_sds_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
{
struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
scic_sds_remote_node_context_continue_state_transitions(rnc);
sci_remote_node_context_continue_state_transitions(rnc);
}
static void scic_sds_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
{
struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
scic_sds_remote_node_context_continue_state_transitions(rnc);
sci_remote_node_context_continue_state_transitions(rnc);
}
static const struct sci_base_state scic_sds_remote_node_context_state_table[] = {
static const struct sci_base_state sci_remote_node_context_state_table[] = {
[SCI_RNC_INITIAL] = {
.enter_state = scic_sds_remote_node_context_initial_state_enter,
.enter_state = sci_remote_node_context_initial_state_enter,
},
[SCI_RNC_POSTING] = {
.enter_state = scic_sds_remote_node_context_posting_state_enter,
.enter_state = sci_remote_node_context_posting_state_enter,
},
[SCI_RNC_INVALIDATING] = {
.enter_state = scic_sds_remote_node_context_invalidating_state_enter,
.enter_state = sci_remote_node_context_invalidating_state_enter,
},
[SCI_RNC_RESUMING] = {
.enter_state = scic_sds_remote_node_context_resuming_state_enter,
.enter_state = sci_remote_node_context_resuming_state_enter,
},
[SCI_RNC_READY] = {
.enter_state = scic_sds_remote_node_context_ready_state_enter,
.enter_state = sci_remote_node_context_ready_state_enter,
},
[SCI_RNC_TX_SUSPENDED] = {
.enter_state = scic_sds_remote_node_context_tx_suspended_state_enter,
.enter_state = sci_remote_node_context_tx_suspended_state_enter,
},
[SCI_RNC_TX_RX_SUSPENDED] = {
.enter_state = scic_sds_remote_node_context_tx_rx_suspended_state_enter,
.enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
},
[SCI_RNC_AWAIT_SUSPENSION] = { },
};
void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context *rnc,
void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
u16 remote_node_index)
{
memset(rnc, 0, sizeof(struct scic_sds_remote_node_context));
memset(rnc, 0, sizeof(struct sci_remote_node_context));
rnc->remote_node_index = remote_node_index;
rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
sci_init_sm(&rnc->sm, scic_sds_remote_node_context_state_table, SCI_RNC_INITIAL);
sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
}
enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remote_node_context *sci_rnc,
enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
u32 event_code)
{
enum scis_sds_remote_node_context_states state;
@ -476,7 +458,7 @@ enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remot
}
enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_node_context *sci_rnc,
enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p)
{
@ -485,7 +467,7 @@ enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_nod
state = sci_rnc->sm.current_state_id;
switch (state) {
case SCI_RNC_INVALIDATING:
scic_sds_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
return SCI_SUCCESS;
case SCI_RNC_POSTING:
case SCI_RNC_RESUMING:
@ -493,7 +475,7 @@ enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_nod
case SCI_RNC_TX_SUSPENDED:
case SCI_RNC_TX_RX_SUSPENDED:
case SCI_RNC_AWAIT_SUSPENSION:
scic_sds_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
return SCI_SUCCESS;
case SCI_RNC_INITIAL:
@ -511,7 +493,7 @@ enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_nod
}
}
enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node_context *sci_rnc,
enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
u32 suspend_type,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p)
@ -530,7 +512,7 @@ enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node
sci_rnc->suspension_code = suspend_type;
if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
scic_sds_remote_device_post_request(rnc_to_dev(sci_rnc),
sci_remote_device_post_request(rnc_to_dev(sci_rnc),
SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
}
@ -538,7 +520,7 @@ enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node
return SCI_SUCCESS;
}
enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_context *sci_rnc,
enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p)
{
@ -550,8 +532,8 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_
if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
return SCI_FAILURE_INVALID_STATE;
scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
scic_sds_remote_node_context_construct_buffer(sci_rnc);
sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
sci_remote_node_context_construct_buffer(sci_rnc);
sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
return SCI_SUCCESS;
case SCI_RNC_POSTING:
@ -567,7 +549,7 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
struct domain_device *dev = idev->domain_dev;
scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
/* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
@ -584,11 +566,11 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_
return SCI_SUCCESS;
}
case SCI_RNC_TX_RX_SUSPENDED:
scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
return SCI_FAILURE_INVALID_STATE;
case SCI_RNC_AWAIT_SUSPENSION:
scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
return SCI_SUCCESS;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
@ -597,7 +579,7 @@ enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_
}
}
enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc,
enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq)
{
enum scis_sds_remote_node_context_states state;
@ -622,7 +604,7 @@ enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_nod
return SCI_FAILURE_INVALID_STATE;
}
enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc,
enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq)
{
enum scis_sds_remote_node_context_states state;
@ -635,7 +617,7 @@ enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_n
return SCI_SUCCESS;
case SCI_RNC_TX_SUSPENDED:
case SCI_RNC_TX_RX_SUSPENDED:
scic_sds_remote_node_context_resume(sci_rnc, NULL, NULL);
sci_remote_node_context_resume(sci_rnc, NULL, NULL);
return SCI_SUCCESS;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),

View File

@ -80,7 +80,7 @@
struct isci_request;
struct isci_remote_device;
struct scic_sds_remote_node_context;
struct sci_remote_node_context;
typedef void (*scics_sds_remote_node_context_callback)(void *);
@ -147,19 +147,19 @@ enum scis_sds_remote_node_context_states {
* This enumeration is used to define the end destination state for the remote
* node context.
*/
enum scic_sds_remote_node_context_destination_state {
enum sci_remote_node_context_destination_state {
SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED,
SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY,
SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL
};
/**
* struct scic_sds_remote_node_context - This structure contains the data
* struct sci_remote_node_context - This structure contains the data
* associated with the remote node context object. The remote node context
* (RNC) object models the the remote device information necessary to manage
* the silicon RNC.
*/
struct scic_sds_remote_node_context {
struct sci_remote_node_context {
/**
* This field indicates the remote node index (RNI) associated with
* this RNC.
@ -177,7 +177,7 @@ struct scic_sds_remote_node_context {
* state. This can cause an automatic resume on receiving a suspension
* notification.
*/
enum scic_sds_remote_node_context_destination_state destination_state;
enum sci_remote_node_context_destination_state destination_state;
/**
* This field contains the callback function that the user requested to be
@ -197,31 +197,31 @@ struct scic_sds_remote_node_context {
struct sci_base_state_machine sm;
};
void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context *rnc,
void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
u16 remote_node_index);
bool scic_sds_remote_node_context_is_ready(
struct scic_sds_remote_node_context *sci_rnc);
bool sci_remote_node_context_is_ready(
struct sci_remote_node_context *sci_rnc);
#define scic_sds_remote_node_context_get_remote_node_index(rcn) \
#define sci_remote_node_context_get_remote_node_index(rcn) \
((rnc)->remote_node_index)
enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remote_node_context *sci_rnc,
enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
u32 event_code);
enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_node_context *sci_rnc,
enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback callback,
void *callback_parameter);
enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node_context *sci_rnc,
enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
u32 suspend_type,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p);
enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_context *sci_rnc,
enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p);
enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc,
enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq);
enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc,
enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq);
#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */

View File

@ -74,8 +74,8 @@
* just bit position. u32 This is the absolute bit position for an available
* group.
*/
static u32 scic_sds_remote_node_table_get_group_index(
struct scic_remote_node_table *remote_node_table,
static u32 sci_remote_node_table_get_group_index(
struct sci_remote_node_table *remote_node_table,
u32 group_table_index)
{
u32 dword_index;
@ -108,8 +108,8 @@ static u32 scic_sds_remote_node_table_get_group_index(
* This method will clear the group index entry in the specified group index
* table. none
*/
static void scic_sds_remote_node_table_clear_group_index(
struct scic_remote_node_table *remote_node_table,
static void sci_remote_node_table_clear_group_index(
struct sci_remote_node_table *remote_node_table,
u32 group_table_index,
u32 group_index)
{
@ -138,8 +138,8 @@ static void scic_sds_remote_node_table_clear_group_index(
* This method will set the group index bit entry in the specified gropu index
* table. none
*/
static void scic_sds_remote_node_table_set_group_index(
struct scic_remote_node_table *remote_node_table,
static void sci_remote_node_table_set_group_index(
struct sci_remote_node_table *remote_node_table,
u32 group_table_index,
u32 group_index)
{
@ -167,8 +167,8 @@ static void scic_sds_remote_node_table_set_group_index(
* This method will set the remote to available in the remote node allocation
* table. none
*/
static void scic_sds_remote_node_table_set_node_index(
struct scic_remote_node_table *remote_node_table,
static void sci_remote_node_table_set_node_index(
struct sci_remote_node_table *remote_node_table,
u32 remote_node_index)
{
u32 dword_location;
@ -200,8 +200,8 @@ static void scic_sds_remote_node_table_set_node_index(
* This method clears the remote node index from the table of available remote
* nodes. none
*/
static void scic_sds_remote_node_table_clear_node_index(
struct scic_remote_node_table *remote_node_table,
static void sci_remote_node_table_clear_node_index(
struct sci_remote_node_table *remote_node_table,
u32 remote_node_index)
{
u32 dword_location;
@ -231,8 +231,8 @@ static void scic_sds_remote_node_table_clear_node_index(
*
* This method clears the entire table slot at the specified slot index. none
*/
static void scic_sds_remote_node_table_clear_group(
struct scic_remote_node_table *remote_node_table,
static void sci_remote_node_table_clear_group(
struct sci_remote_node_table *remote_node_table,
u32 group_index)
{
u32 dword_location;
@ -258,8 +258,8 @@ static void scic_sds_remote_node_table_clear_group(
*
* THis method sets an entire remote node group in the remote node table.
*/
static void scic_sds_remote_node_table_set_group(
struct scic_remote_node_table *remote_node_table,
static void sci_remote_node_table_set_group(
struct sci_remote_node_table *remote_node_table,
u32 group_index)
{
u32 dword_location;
@ -288,8 +288,8 @@ static void scic_sds_remote_node_table_set_group(
* This method will return the group value for the specified group index. The
* bit values at the specified remote node group index.
*/
static u8 scic_sds_remote_node_table_get_group_value(
struct scic_remote_node_table *remote_node_table,
static u8 sci_remote_node_table_get_group_value(
struct sci_remote_node_table *remote_node_table,
u32 group_index)
{
u32 dword_location;
@ -313,8 +313,8 @@ static u8 scic_sds_remote_node_table_get_group_value(
*
* This method will initialize the remote node table for use. none
*/
void scic_sds_remote_node_table_initialize(
struct scic_remote_node_table *remote_node_table,
void sci_remote_node_table_initialize(
struct sci_remote_node_table *remote_node_table,
u32 remote_node_entries)
{
u32 index;
@ -342,7 +342,7 @@ void scic_sds_remote_node_table_initialize(
/* Initialize each full DWORD to a FULL SET of remote nodes */
for (index = 0; index < remote_node_entries; index++) {
scic_sds_remote_node_table_set_node_index(remote_node_table, index);
sci_remote_node_table_set_node_index(remote_node_table, index);
}
remote_node_table->group_array_size = (u16)
@ -353,14 +353,14 @@ void scic_sds_remote_node_table_initialize(
/*
* These are all guaranteed to be full slot values so fill them in the
* available sets of 3 remote nodes */
scic_sds_remote_node_table_set_group_index(remote_node_table, 2, index);
sci_remote_node_table_set_group_index(remote_node_table, 2, index);
}
/* Now fill in any remainders that we may find */
if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
scic_sds_remote_node_table_set_group_index(remote_node_table, 1, index);
sci_remote_node_table_set_group_index(remote_node_table, 1, index);
} else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
scic_sds_remote_node_table_set_group_index(remote_node_table, 0, index);
sci_remote_node_table_set_group_index(remote_node_table, 0, index);
}
}
@ -379,8 +379,8 @@ void scic_sds_remote_node_table_initialize(
* updated. The RNi value or an invalid remote node context if an RNi can not
* be found.
*/
static u16 scic_sds_remote_node_table_allocate_single_remote_node(
struct scic_remote_node_table *remote_node_table,
static u16 sci_remote_node_table_allocate_single_remote_node(
struct sci_remote_node_table *remote_node_table,
u32 group_table_index)
{
u8 index;
@ -388,12 +388,12 @@ static u16 scic_sds_remote_node_table_allocate_single_remote_node(
u32 group_index;
u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
group_index = scic_sds_remote_node_table_get_group_index(
group_index = sci_remote_node_table_get_group_index(
remote_node_table, group_table_index);
/* We could not find an available slot in the table selector 0 */
if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
group_value = scic_sds_remote_node_table_get_group_value(
group_value = sci_remote_node_table_get_group_value(
remote_node_table, group_index);
for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
@ -402,16 +402,16 @@ static u16 scic_sds_remote_node_table_allocate_single_remote_node(
remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
+ index);
scic_sds_remote_node_table_clear_group_index(
sci_remote_node_table_clear_group_index(
remote_node_table, group_table_index, group_index
);
scic_sds_remote_node_table_clear_node_index(
sci_remote_node_table_clear_node_index(
remote_node_table, remote_node_index
);
if (group_table_index > 0) {
scic_sds_remote_node_table_set_group_index(
sci_remote_node_table_set_group_index(
remote_node_table, group_table_index - 1, group_index
);
}
@ -436,24 +436,24 @@ static u16 scic_sds_remote_node_table_allocate_single_remote_node(
* The remote node index that represents three consecutive remote node entries
* or an invalid remote node context if none can be found.
*/
static u16 scic_sds_remote_node_table_allocate_triple_remote_node(
struct scic_remote_node_table *remote_node_table,
static u16 sci_remote_node_table_allocate_triple_remote_node(
struct sci_remote_node_table *remote_node_table,
u32 group_table_index)
{
u32 group_index;
u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
group_index = scic_sds_remote_node_table_get_group_index(
group_index = sci_remote_node_table_get_group_index(
remote_node_table, group_table_index);
if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
scic_sds_remote_node_table_clear_group_index(
sci_remote_node_table_clear_group_index(
remote_node_table, group_table_index, group_index
);
scic_sds_remote_node_table_clear_group(
sci_remote_node_table_clear_group(
remote_node_table, group_index
);
}
@ -473,31 +473,31 @@ static u16 scic_sds_remote_node_table_allocate_triple_remote_node(
* SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
* the remote node index that is returned or an invalid remote node context.
*/
u16 scic_sds_remote_node_table_allocate_remote_node(
struct scic_remote_node_table *remote_node_table,
u16 sci_remote_node_table_allocate_remote_node(
struct sci_remote_node_table *remote_node_table,
u32 remote_node_count)
{
u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
remote_node_index =
scic_sds_remote_node_table_allocate_single_remote_node(
sci_remote_node_table_allocate_single_remote_node(
remote_node_table, 0);
if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
remote_node_index =
scic_sds_remote_node_table_allocate_single_remote_node(
sci_remote_node_table_allocate_single_remote_node(
remote_node_table, 1);
}
if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
remote_node_index =
scic_sds_remote_node_table_allocate_single_remote_node(
sci_remote_node_table_allocate_single_remote_node(
remote_node_table, 2);
}
} else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
remote_node_index =
scic_sds_remote_node_table_allocate_triple_remote_node(
sci_remote_node_table_allocate_triple_remote_node(
remote_node_table, 2);
}
@ -511,8 +511,8 @@ u16 scic_sds_remote_node_table_allocate_remote_node(
* This method will free a single remote node index back to the remote node
* table. This routine will update the remote node groups
*/
static void scic_sds_remote_node_table_release_single_remote_node(
struct scic_remote_node_table *remote_node_table,
static void sci_remote_node_table_release_single_remote_node(
struct sci_remote_node_table *remote_node_table,
u16 remote_node_index)
{
u32 group_index;
@ -520,7 +520,7 @@ static void scic_sds_remote_node_table_release_single_remote_node(
group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
group_value = scic_sds_remote_node_table_get_group_value(remote_node_table, group_index);
group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
/*
* Assert that we are not trying to add an entry to a slot that is already
@ -531,22 +531,22 @@ static void scic_sds_remote_node_table_release_single_remote_node(
/*
* There are no entries in this slot so it must be added to the single
* slot table. */
scic_sds_remote_node_table_set_group_index(remote_node_table, 0, group_index);
sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
} else if ((group_value & (group_value - 1)) == 0) {
/*
* There is only one entry in this slot so it must be moved from the
* single slot table to the dual slot table */
scic_sds_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
scic_sds_remote_node_table_set_group_index(remote_node_table, 1, group_index);
sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
} else {
/*
* There are two entries in the slot so it must be moved from the dual
* slot table to the tripple slot table. */
scic_sds_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
scic_sds_remote_node_table_set_group_index(remote_node_table, 2, group_index);
sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
}
scic_sds_remote_node_table_set_node_index(remote_node_table, remote_node_index);
sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
}
/**
@ -557,19 +557,19 @@ static void scic_sds_remote_node_table_release_single_remote_node(
* This method will release a group of three consecutive remote nodes back to
* the free remote nodes.
*/
static void scic_sds_remote_node_table_release_triple_remote_node(
struct scic_remote_node_table *remote_node_table,
static void sci_remote_node_table_release_triple_remote_node(
struct sci_remote_node_table *remote_node_table,
u16 remote_node_index)
{
u32 group_index;
group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
scic_sds_remote_node_table_set_group_index(
sci_remote_node_table_set_group_index(
remote_node_table, 2, group_index
);
scic_sds_remote_node_table_set_group(remote_node_table, group_index);
sci_remote_node_table_set_group(remote_node_table, group_index);
}
/**
@ -582,16 +582,16 @@ static void scic_sds_remote_node_table_release_triple_remote_node(
* This method will release the remote node index back into the remote node
* table free pool.
*/
void scic_sds_remote_node_table_release_remote_node_index(
struct scic_remote_node_table *remote_node_table,
void sci_remote_node_table_release_remote_node_index(
struct sci_remote_node_table *remote_node_table,
u32 remote_node_count,
u16 remote_node_index)
{
if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
scic_sds_remote_node_table_release_single_remote_node(
sci_remote_node_table_release_single_remote_node(
remote_node_table, remote_node_index);
} else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
scic_sds_remote_node_table_release_triple_remote_node(
sci_remote_node_table_release_triple_remote_node(
remote_node_table, remote_node_index);
}
}

View File

@ -130,11 +130,11 @@
#define SCU_SATA_REMOTE_NODE_COUNT 1
/**
* struct scic_remote_node_table -
* struct sci_remote_node_table -
*
*
*/
struct scic_remote_node_table {
struct sci_remote_node_table {
/**
* This field contains the array size in dwords
*/
@ -172,16 +172,16 @@ struct scic_remote_node_table {
/* --------------------------------------------------------------------------- */
void scic_sds_remote_node_table_initialize(
struct scic_remote_node_table *remote_node_table,
void sci_remote_node_table_initialize(
struct sci_remote_node_table *remote_node_table,
u32 remote_node_entries);
u16 scic_sds_remote_node_table_allocate_remote_node(
struct scic_remote_node_table *remote_node_table,
u16 sci_remote_node_table_allocate_remote_node(
struct sci_remote_node_table *remote_node_table,
u32 remote_node_count);
void scic_sds_remote_node_table_release_remote_node_index(
struct scic_remote_node_table *remote_node_table,
void sci_remote_node_table_release_remote_node_index(
struct sci_remote_node_table *remote_node_table,
u32 remote_node_count,
u16 remote_node_index);

File diff suppressed because it is too large Load Diff

View File

@ -301,75 +301,75 @@ enum sci_base_request_states {
};
/**
* scic_sds_request_get_controller() -
* sci_request_get_controller() -
*
* This macro will return the controller for this io request object
*/
#define scic_sds_request_get_controller(ireq) \
#define sci_request_get_controller(ireq) \
((ireq)->owning_controller)
/**
* scic_sds_request_get_device() -
* sci_request_get_device() -
*
* This macro will return the device for this io request object
*/
#define scic_sds_request_get_device(ireq) \
#define sci_request_get_device(ireq) \
((ireq)->target_device)
/**
* scic_sds_request_get_port() -
* sci_request_get_port() -
*
* This macro will return the port for this io request object
*/
#define scic_sds_request_get_port(ireq) \
scic_sds_remote_device_get_port(scic_sds_request_get_device(ireq))
#define sci_request_get_port(ireq) \
sci_remote_device_get_port(sci_request_get_device(ireq))
/**
* scic_sds_request_get_post_context() -
* sci_request_get_post_context() -
*
* This macro returns the constructed post context result for the io request.
*/
#define scic_sds_request_get_post_context(ireq) \
#define sci_request_get_post_context(ireq) \
((ireq)->post_context)
/**
* scic_sds_request_get_task_context() -
* sci_request_get_task_context() -
*
* This is a helper macro to return the os handle for this request object.
*/
#define scic_sds_request_get_task_context(request) \
#define sci_request_get_task_context(request) \
((request)->task_context_buffer)
/**
* scic_sds_request_set_status() -
* sci_request_set_status() -
*
* This macro will set the scu hardware status and sci request completion
* status for an io request.
*/
#define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \
#define sci_request_set_status(request, scu_status_code, sci_status_code) \
{ \
(request)->scu_status = (scu_status_code); \
(request)->sci_status = (sci_status_code); \
}
enum sci_status scic_sds_request_start(struct isci_request *ireq);
enum sci_status scic_sds_io_request_terminate(struct isci_request *ireq);
enum sci_status sci_request_start(struct isci_request *ireq);
enum sci_status sci_io_request_terminate(struct isci_request *ireq);
enum sci_status
scic_sds_io_request_event_handler(struct isci_request *ireq,
sci_io_request_event_handler(struct isci_request *ireq,
u32 event_code);
enum sci_status
scic_sds_io_request_frame_handler(struct isci_request *ireq,
sci_io_request_frame_handler(struct isci_request *ireq,
u32 frame_index);
enum sci_status
scic_sds_task_request_terminate(struct isci_request *ireq);
sci_task_request_terminate(struct isci_request *ireq);
extern enum sci_status
scic_sds_request_complete(struct isci_request *ireq);
sci_request_complete(struct isci_request *ireq);
extern enum sci_status
scic_sds_io_request_tc_completion(struct isci_request *ireq, u32 code);
sci_io_request_tc_completion(struct isci_request *ireq, u32 code);
/* XXX open code in caller */
static inline dma_addr_t
scic_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
{
char *requested_addr = (char *)virt_addr;
@ -500,17 +500,17 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide
void isci_terminate_pending_requests(struct isci_host *ihost,
struct isci_remote_device *idev);
enum sci_status
scic_task_request_construct(struct isci_host *ihost,
sci_task_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
u16 io_tag,
struct isci_request *ireq);
enum sci_status
scic_task_request_construct_ssp(struct isci_request *ireq);
sci_task_request_construct_ssp(struct isci_request *ireq);
enum sci_status
scic_task_request_construct_sata(struct isci_request *ireq);
sci_task_request_construct_sata(struct isci_request *ireq);
void
scic_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag);
void scic_sds_smp_request_copy_response(struct isci_request *ireq);
sci_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag);
void sci_smp_request_copy_response(struct isci_request *ireq);
static inline int isci_task_is_ncq_recovery(struct sas_task *task)
{

View File

@ -116,7 +116,7 @@ void isci_sata_set_ncq_tag(
struct isci_request *request = task->lldd_task;
register_fis->sector_count = qc->tag << 3;
scic_stp_io_request_set_ncq_tag(request, qc->tag);
sci_stp_io_request_set_ncq_tag(request, qc->tag);
}
/**
@ -187,7 +187,7 @@ enum sci_status isci_sata_management_task_request_build(struct isci_request *ire
/* core builds the protocol specific request
* based on the h2d fis.
*/
status = scic_task_request_construct_sata(ireq);
status = sci_task_request_construct_sata(ireq);
return status;
}

View File

@ -257,12 +257,12 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
return NULL;
/* let the core do it's construct. */
status = scic_task_request_construct(ihost, idev, tag,
status = sci_task_request_construct(ihost, idev, tag,
ireq);
if (status != SCI_SUCCESS) {
dev_warn(&ihost->pdev->dev,
"%s: scic_task_request_construct failed - "
"%s: sci_task_request_construct failed - "
"status = 0x%x\n",
__func__,
status);
@ -272,7 +272,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
/* XXX convert to get this from task->tproto like other drivers */
if (dev->dev_type == SAS_END_DEV) {
isci_tmf->proto = SAS_PROTOCOL_SSP;
status = scic_task_request_construct_ssp(ireq);
status = sci_task_request_construct_ssp(ireq);
if (status != SCI_SUCCESS)
return NULL;
}
@ -332,7 +332,7 @@ int isci_task_execute_tmf(struct isci_host *ihost,
spin_lock_irqsave(&ihost->scic_lock, flags);
/* start the TMF io. */
status = scic_controller_start_task(ihost, idev, ireq);
status = sci_controller_start_task(ihost, idev, ireq);
if (status != SCI_TASK_SUCCESS) {
dev_warn(&ihost->pdev->dev,
@ -364,7 +364,7 @@ int isci_task_execute_tmf(struct isci_host *ihost,
if (tmf->cb_state_func != NULL)
tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
scic_controller_terminate_request(ihost,
sci_controller_terminate_request(ihost,
idev,
ireq);
@ -556,7 +556,7 @@ static void isci_terminate_request_core(struct isci_host *ihost,
if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
was_terminated = true;
needs_cleanup_handling = true;
status = scic_controller_terminate_request(ihost,
status = sci_controller_terminate_request(ihost,
idev,
isci_request);
}
@ -569,7 +569,7 @@ static void isci_terminate_request_core(struct isci_host *ihost,
*/
if (status != SCI_SUCCESS) {
dev_err(&ihost->pdev->dev,
"%s: scic_controller_terminate_request"
"%s: sci_controller_terminate_request"
" returned = 0x%x\n",
__func__, status);
@ -1251,7 +1251,7 @@ isci_task_request_complete(struct isci_host *ihost,
/* PRINT_TMF( ((struct isci_tmf *)request->task)); */
tmf_complete = tmf->complete;
scic_controller_complete_io(ihost, ireq->target_device, ireq);
sci_controller_complete_io(ihost, ireq->target_device, ireq);
/* set the 'terminated' flag handle to make sure it cannot be terminated
* or completed again.
*/
@ -1514,12 +1514,12 @@ static int isci_reset_device(struct isci_host *ihost,
dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
spin_lock_irqsave(&ihost->scic_lock, flags);
status = scic_remote_device_reset(idev);
status = sci_remote_device_reset(idev);
if (status != SCI_SUCCESS) {
spin_unlock_irqrestore(&ihost->scic_lock, flags);
dev_warn(&ihost->pdev->dev,
"%s: scic_remote_device_reset(%p) returned %d!\n",
"%s: sci_remote_device_reset(%p) returned %d!\n",
__func__, idev, status);
return TMF_RESP_FUNC_FAILED;
@ -1540,7 +1540,7 @@ static int isci_reset_device(struct isci_host *ihost,
/* Since all pending TCs have been cleaned, resume the RNC. */
spin_lock_irqsave(&ihost->scic_lock, flags);
status = scic_remote_device_reset_complete(idev);
status = sci_remote_device_reset_complete(idev);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
/* If this is a device on an expander, bring the phy back up. */
@ -1560,7 +1560,7 @@ static int isci_reset_device(struct isci_host *ihost,
if (status != SCI_SUCCESS) {
dev_warn(&ihost->pdev->dev,
"%s: scic_remote_device_reset_complete(%p) "
"%s: sci_remote_device_reset_complete(%p) "
"returned %d!\n", __func__, idev, status);
}

View File

@ -57,10 +57,10 @@
#include "unsolicited_frame_control.h"
#include "registers.h"
int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost)
int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
{
struct scic_sds_unsolicited_frame_control *uf_control = &ihost->uf_control;
struct scic_sds_unsolicited_frame *uf;
struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
struct sci_unsolicited_frame *uf;
u32 buf_len, header_len, i;
dma_addr_t dma;
size_t size;
@ -139,23 +139,14 @@ int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost)
return 0;
}
/**
* This method returns the frame header for the specified frame index.
* @uf_control:
* @frame_index:
* @frame_header:
*
* enum sci_status
*/
enum sci_status scic_sds_unsolicited_frame_control_get_header(
struct scic_sds_unsolicited_frame_control *uf_control,
u32 frame_index,
void **frame_header)
enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
u32 frame_index,
void **frame_header)
{
if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
/*
* Skip the first word in the frame since this is a controll word used
* by the hardware. */
/* Skip the first word in the frame since this is a controll word used
* by the hardware.
*/
*frame_header = &uf_control->buffers.array[frame_index].header->data;
return SCI_SUCCESS;
@ -164,18 +155,9 @@ enum sci_status scic_sds_unsolicited_frame_control_get_header(
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
}
/**
* This method returns the frame buffer for the specified frame index.
* @uf_control:
* @frame_index:
* @frame_buffer:
*
* enum sci_status
*/
enum sci_status scic_sds_unsolicited_frame_control_get_buffer(
struct scic_sds_unsolicited_frame_control *uf_control,
u32 frame_index,
void **frame_buffer)
enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control,
u32 frame_index,
void **frame_buffer)
{
if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
*frame_buffer = uf_control->buffers.array[frame_index].buffer;
@ -186,19 +168,8 @@ enum sci_status scic_sds_unsolicited_frame_control_get_buffer(
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
}
/**
* This method releases the frame once this is done the frame is available for
* re-use by the hardware. The data contained in the frame header and frame
* buffer is no longer valid.
* @uf_control: This parameter specifies the UF control object
* @frame_index: This parameter specifies the frame index to attempt to release.
*
* This method returns an indication to the caller as to whether the
* unsolicited frame get pointer should be updated.
*/
bool scic_sds_unsolicited_frame_control_release_frame(
struct scic_sds_unsolicited_frame_control *uf_control,
u32 frame_index)
bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control,
u32 frame_index)
{
u32 frame_get;
u32 frame_cycle;

View File

@ -92,12 +92,12 @@ enum unsolicited_frame_state {
};
/**
* struct scic_sds_unsolicited_frame -
* struct sci_unsolicited_frame -
*
* This is the unsolicited frame data structure it acts as the container for
* the current frame state, frame header and frame buffer.
*/
struct scic_sds_unsolicited_frame {
struct sci_unsolicited_frame {
/**
* This field contains the current frame state
*/
@ -116,11 +116,11 @@ struct scic_sds_unsolicited_frame {
};
/**
* struct scic_sds_uf_header_array -
* struct sci_uf_header_array -
*
* This structure contains all of the unsolicited frame header information.
*/
struct scic_sds_uf_header_array {
struct sci_uf_header_array {
/**
* This field is represents a virtual pointer to the start
* address of the UF address table. The table contains
@ -137,19 +137,19 @@ struct scic_sds_uf_header_array {
};
/**
* struct scic_sds_uf_buffer_array -
* struct sci_uf_buffer_array -
*
* This structure contains all of the unsolicited frame buffer (actual payload)
* information.
*/
struct scic_sds_uf_buffer_array {
struct sci_uf_buffer_array {
/**
* This field is the unsolicited frame data its used to manage
* the data for the unsolicited frame requests. It also represents
* the virtual address location that corresponds to the
* physical_address field.
*/
struct scic_sds_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES];
struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES];
/**
* This field specifies the physical address location for the UF
@ -159,13 +159,13 @@ struct scic_sds_uf_buffer_array {
};
/**
* struct scic_sds_uf_address_table_array -
* struct sci_uf_address_table_array -
*
* This object maintains all of the unsolicited frame address table specific
* data. The address table is a collection of 64-bit pointers that point to
* 1KB buffers into which the silicon will DMA unsolicited frames.
*/
struct scic_sds_uf_address_table_array {
struct sci_uf_address_table_array {
/**
* This field represents a virtual pointer that refers to the
* starting address of the UF address table.
@ -182,11 +182,11 @@ struct scic_sds_uf_address_table_array {
};
/**
* struct scic_sds_unsolicited_frame_control -
* struct sci_unsolicited_frame_control -
*
* This object contains all of the data necessary to handle unsolicited frames.
*/
struct scic_sds_unsolicited_frame_control {
struct sci_unsolicited_frame_control {
/**
* This field is the software copy of the unsolicited frame queue
* get pointer. The controller object writes this value to the
@ -198,38 +198,38 @@ struct scic_sds_unsolicited_frame_control {
* This field contains all of the unsolicited frame header
* specific fields.
*/
struct scic_sds_uf_header_array headers;
struct sci_uf_header_array headers;
/**
* This field contains all of the unsolicited frame buffer
* specific fields.
*/
struct scic_sds_uf_buffer_array buffers;
struct sci_uf_buffer_array buffers;
/**
* This field contains all of the unsolicited frame address table
* specific fields.
*/
struct scic_sds_uf_address_table_array address_table;
struct sci_uf_address_table_array address_table;
};
struct isci_host;
int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost);
int sci_unsolicited_frame_control_construct(struct isci_host *ihost);
enum sci_status scic_sds_unsolicited_frame_control_get_header(
struct scic_sds_unsolicited_frame_control *uf_control,
enum sci_status sci_unsolicited_frame_control_get_header(
struct sci_unsolicited_frame_control *uf_control,
u32 frame_index,
void **frame_header);
enum sci_status scic_sds_unsolicited_frame_control_get_buffer(
struct scic_sds_unsolicited_frame_control *uf_control,
enum sci_status sci_unsolicited_frame_control_get_buffer(
struct sci_unsolicited_frame_control *uf_control,
u32 frame_index,
void **frame_buffer);
bool scic_sds_unsolicited_frame_control_release_frame(
struct scic_sds_unsolicited_frame_control *uf_control,
bool sci_unsolicited_frame_control_release_frame(
struct sci_unsolicited_frame_control *uf_control,
u32 frame_index);
#endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */