4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/kref.h>
26 #include <linux/mutex.h>
27 #include <linux/spinlock.h>
31 #include <asm/spu_csa.h>
32 #include <asm/spu_info.h>
34 /* The magic number for our file system */
36 SPUFS_MAGIC = 0x23c9b64e,
39 struct spu_context_ops;
43 struct spu *spu; /* pointer to a physical SPU */
44 struct spu_state csa; /* SPU context save area. */
45 spinlock_t mmio_lock; /* protects mmio access */
46 struct address_space *local_store; /* local store mapping. */
47 struct address_space *mfc; /* 'mfc' area mappings. */
48 struct address_space *cntl; /* 'control' area mappings. */
49 struct address_space *signal1; /* 'signal1' area mappings. */
50 struct address_space *signal2; /* 'signal2' area mappings. */
51 struct address_space *mss; /* 'mss' area mappings. */
52 struct address_space *psmap; /* 'psmap' area mappings. */
53 u64 object_id; /* user space pointer for oprofile */
55 enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
56 struct mutex state_mutex;
57 struct semaphore run_sema;
59 struct mm_struct *owner;
62 wait_queue_head_t ibox_wq;
63 wait_queue_head_t wbox_wq;
64 wait_queue_head_t stop_wq;
65 wait_queue_head_t mfc_wq;
66 struct fasync_struct *ibox_fasync;
67 struct fasync_struct *wbox_fasync;
68 struct fasync_struct *mfc_fasync;
70 struct spu_context_ops *ops;
71 struct work_struct reap_work;
73 unsigned long event_return;
75 struct list_head gang_list;
76 struct spu_gang *gang;
78 /* scheduler fields */
84 struct list_head list;
90 struct mfc_dma_command {
91 int32_t pad; /* reserved */
92 uint32_t lsa; /* local storage address */
93 uint64_t ea; /* effective address */
94 uint16_t size; /* transfer size */
95 uint16_t tag; /* command tag */
96 uint16_t class; /* class ID */
97 uint16_t cmd; /* command opcode */
101 /* SPU context query/set operations. */
102 struct spu_context_ops {
103 int (*mbox_read) (struct spu_context * ctx, u32 * data);
104 u32(*mbox_stat_read) (struct spu_context * ctx);
105 unsigned int (*mbox_stat_poll)(struct spu_context *ctx,
106 unsigned int events);
107 int (*ibox_read) (struct spu_context * ctx, u32 * data);
108 int (*wbox_write) (struct spu_context * ctx, u32 data);
109 u32(*signal1_read) (struct spu_context * ctx);
110 void (*signal1_write) (struct spu_context * ctx, u32 data);
111 u32(*signal2_read) (struct spu_context * ctx);
112 void (*signal2_write) (struct spu_context * ctx, u32 data);
113 void (*signal1_type_set) (struct spu_context * ctx, u64 val);
114 u64(*signal1_type_get) (struct spu_context * ctx);
115 void (*signal2_type_set) (struct spu_context * ctx, u64 val);
116 u64(*signal2_type_get) (struct spu_context * ctx);
117 u32(*npc_read) (struct spu_context * ctx);
118 void (*npc_write) (struct spu_context * ctx, u32 data);
119 u32(*status_read) (struct spu_context * ctx);
120 char*(*get_ls) (struct spu_context * ctx);
121 u32 (*runcntl_read) (struct spu_context * ctx);
122 void (*runcntl_write) (struct spu_context * ctx, u32 data);
123 void (*master_start) (struct spu_context * ctx);
124 void (*master_stop) (struct spu_context * ctx);
125 int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode);
126 u32 (*read_mfc_tagstatus)(struct spu_context * ctx);
127 u32 (*get_mfc_free_elements)(struct spu_context *ctx);
128 int (*send_mfc_command)(struct spu_context * ctx,
129 struct mfc_dma_command * cmd);
130 void (*dma_info_read) (struct spu_context * ctx,
131 struct spu_dma_info * info);
132 void (*proxydma_info_read) (struct spu_context * ctx,
133 struct spu_proxydma_info * info);
136 extern struct spu_context_ops spu_hw_ops;
137 extern struct spu_context_ops spu_backing_ops;
139 struct spufs_inode_info {
140 struct spu_context *i_ctx;
141 struct spu_gang *i_gang;
142 struct inode vfs_inode;
144 #define SPUFS_I(inode) \
145 container_of(inode, struct spufs_inode_info, vfs_inode)
147 extern struct tree_descr spufs_dir_contents[];
148 extern struct tree_descr spufs_dir_nosched_contents[];
150 /* system call implementation */
151 long spufs_run_spu(struct file *file,
152 struct spu_context *ctx, u32 *npc, u32 *status);
153 long spufs_create(struct nameidata *nd,
154 unsigned int flags, mode_t mode);
155 extern const struct file_operations spufs_context_fops;
157 /* gang management */
158 struct spu_gang *alloc_spu_gang(void);
159 struct spu_gang *get_spu_gang(struct spu_gang *gang);
160 int put_spu_gang(struct spu_gang *gang);
161 void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
162 void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
164 /* context management */
165 static inline void spu_acquire(struct spu_context *ctx)
167 mutex_lock(&ctx->state_mutex);
170 static inline void spu_release(struct spu_context *ctx)
172 mutex_unlock(&ctx->state_mutex);
175 struct spu_context * alloc_spu_context(struct spu_gang *gang);
176 void destroy_spu_context(struct kref *kref);
177 struct spu_context * get_spu_context(struct spu_context *ctx);
178 int put_spu_context(struct spu_context *ctx);
179 void spu_unmap_mappings(struct spu_context *ctx);
181 void spu_forget(struct spu_context *ctx);
182 int spu_acquire_runnable(struct spu_context *ctx);
183 void spu_acquire_saved(struct spu_context *ctx);
184 int spu_acquire_exclusive(struct spu_context *ctx);
185 int spu_activate(struct spu_context *ctx, u64 flags);
186 void spu_deactivate(struct spu_context *ctx);
187 void spu_yield(struct spu_context *ctx);
188 int __init spu_sched_init(void);
189 void __exit spu_sched_exit(void);
191 extern char *isolated_loader;
195 * Same as wait_event_interruptible(), except that here
196 * we need to call spu_release(ctx) before sleeping, and
197 * then spu_acquire(ctx) when awoken.
200 #define spufs_wait(wq, condition) \
203 DEFINE_WAIT(__wait); \
205 prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \
208 if (!signal_pending(current)) { \
214 __ret = -ERESTARTSYS; \
217 finish_wait(&(wq), &__wait); \
221 size_t spu_wbox_write(struct spu_context *ctx, u32 data);
222 size_t spu_ibox_read(struct spu_context *ctx, u32 *data);
224 /* irq callback funcs. */
225 void spufs_ibox_callback(struct spu *spu);
226 void spufs_wbox_callback(struct spu *spu);
227 void spufs_stop_callback(struct spu *spu);
228 void spufs_mfc_callback(struct spu *spu);
229 void spufs_dma_callback(struct spu *spu, int type);
231 extern struct spu_coredump_calls spufs_coredump_calls;
232 struct spufs_coredump_reader {
234 ssize_t (*read)(struct spu_context *ctx,
235 char __user *buffer, size_t size, loff_t *pos);
236 u64 (*get)(void *data);
239 extern struct spufs_coredump_reader spufs_coredump_read[];
240 extern int spufs_coredump_num_notes;