* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6: (116 commits)
V4L/DVB (13698): pms: replace asm/uaccess.h to linux/uaccess.h
V4L/DVB (13690): radio/si470x: #include <sched.h>
V4L/DVB (13688): au8522: modify the attributes of local filter coefficients
V4L/DVB (13687): cx231xx: use NULL when pointer is needed
V4L/DVB: Davinci VPFE Capture: remove unused #include <linux/version.h>
V4L/DVB (13685): Correct code taking the size of a pointer
V4L/DVB (13684): Fix some cut-and-paste noise in dib0090.h
V4L/DVB (13683): sanio-ms: clean up init, exit and id_table
V4L/DVB (13682): dib8000: make some constant static
V4L/DVB: lgs8gxx: Use shifts rather than multiply/divide when possible
V4L/DVB (13680b): DocBook/media: create links for included sources
V4L/DVB (13680a): DocBook/media: copy images after building HTML
V4L/DVB (13678): Add support for yet another DvbWorld, TeVii and Prof USB devices
V4L/DVB (13676): configurable IRQ mode on NetUP Dual DVB-S2 CI; IRQ from CAM processing (CI interface works faster)
V4L/DVB (13674): stv090x: Add DiSEqC envelope mode
V4L/DVB (13673): lnbp21: Implement 22 kHz tone control
V4L/DVB (13671): sh_mobile_ceu_camera: Remove frame size page alignment
V4L/DVB (13670): soc-camera: Add mt9t112 camera driver
V4L/DVB (13669): tw9910: Add sync polarity support
V4L/DVB (13668): tw9910: remove cropping
...
DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
- procfs-guide.xml writing_usb_driver.xml networking.xml \
+ writing_usb_driver.xml networking.xml \
kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
DOCPROC = $(objtree)/scripts/basic/docproc
XMLTOFLAGS = -m $(srctree)/Documentation/DocBook/stylesheet.xsl
-#XMLTOFLAGS += --skip-validation
+XMLTOFLAGS += --skip-validation
###
# DOCPROC is used for two purposes:
# Changes in kernel-doc force a rebuild of all documentation
$(BOOKS): $(KERNELDOC)
-###
-# procfs guide uses a .c file as example code.
-# This requires an explicit dependency
-C-procfs-example = procfs_example.xml
-C-procfs-example2 = $(addprefix $(obj)/,$(C-procfs-example))
-$(obj)/procfs-guide.xml: $(C-procfs-example2)
-
-# List of programs to build
-##oops, this is a kernel module::hostprogs-y := procfs_example
-obj-m += procfs_example.o
-
# Tell kbuild to always build the programs
always := $(hostprogs-y)
$(patsubst %.xml, %.pdf, $(DOCBOOKS)) \
$(patsubst %.xml, %.html, $(DOCBOOKS)) \
$(patsubst %.xml, %.9, $(DOCBOOKS)) \
- $(C-procfs-example) $(index)
+ $(index)
clean-dirs := $(patsubst %.xml,%,$(DOCBOOKS)) man
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [
-<!ENTITY procfsexample SYSTEM "procfs_example.xml">
-]>
-
-<book id="LKProcfsGuide">
- <bookinfo>
- <title>Linux Kernel Procfs Guide</title>
-
- <authorgroup>
- <author>
- <firstname>Erik</firstname>
- <othername>(J.A.K.)</othername>
- <surname>Mouw</surname>
- <affiliation>
- <address>
- <email>mouw@nl.linux.org</email>
- </address>
- </affiliation>
- </author>
- <othercredit>
- <contrib>
- This software and documentation were written while working on the
- LART computing board
- (<ulink url="http://www.lartmaker.nl/">http://www.lartmaker.nl/</ulink>),
- which was sponsored by the Delt University of Technology projects
- Mobile Multi-media Communications and Ubiquitous Communications.
- </contrib>
- </othercredit>
- </authorgroup>
-
- <revhistory>
- <revision>
- <revnumber>1.0</revnumber>
- <date>May 30, 2001</date>
- <revremark>Initial revision posted to linux-kernel</revremark>
- </revision>
- <revision>
- <revnumber>1.1</revnumber>
- <date>June 3, 2001</date>
- <revremark>Revised after comments from linux-kernel</revremark>
- </revision>
- </revhistory>
-
- <copyright>
- <year>2001</year>
- <holder>Erik Mouw</holder>
- </copyright>
-
-
- <legalnotice>
- <para>
- This documentation is free software; you can redistribute it
- and/or modify it under the terms of the GNU General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later
- version.
- </para>
-
- <para>
- This documentation is distributed in the hope that it will be
- useful, but WITHOUT ANY WARRANTY; without even the implied
- warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- PURPOSE. See the GNU General Public License for more details.
- </para>
-
- <para>
- You should have received a copy of the GNU General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- MA 02111-1307 USA
- </para>
-
- <para>
- For more details see the file COPYING in the source
- distribution of Linux.
- </para>
- </legalnotice>
- </bookinfo>
-
-
-
-
- <toc>
- </toc>
-
-
-
-
- <preface id="Preface">
- <title>Preface</title>
-
- <para>
- This guide describes the use of the procfs file system from
- within the Linux kernel. The idea to write this guide came up on
- the #kernelnewbies IRC channel (see <ulink
- url="http://www.kernelnewbies.org/">http://www.kernelnewbies.org/</ulink>),
- when Jeff Garzik explained the use of procfs and forwarded me a
- message Alexander Viro wrote to the linux-kernel mailing list. I
- agreed to write it up nicely, so here it is.
- </para>
-
- <para>
- I'd like to thank Jeff Garzik
- <email>jgarzik@pobox.com</email> and Alexander Viro
- <email>viro@parcelfarce.linux.theplanet.co.uk</email> for their input,
- Tim Waugh <email>twaugh@redhat.com</email> for his <ulink
- url="http://people.redhat.com/twaugh/docbook/selfdocbook/">Selfdocbook</ulink>,
- and Marc Joosen <email>marcj@historia.et.tudelft.nl</email> for
- proofreading.
- </para>
-
- <para>
- Erik
- </para>
- </preface>
-
-
-
-
- <chapter id="intro">
- <title>Introduction</title>
-
- <para>
- The <filename class="directory">/proc</filename> file system
- (procfs) is a special file system in the linux kernel. It's a
- virtual file system: it is not associated with a block device
- but exists only in memory. The files in the procfs are there to
- allow userland programs access to certain information from the
- kernel (like process information in <filename
- class="directory">/proc/[0-9]+/</filename>), but also for debug
- purposes (like <filename>/proc/ksyms</filename>).
- </para>
-
- <para>
- This guide describes the use of the procfs file system from
- within the Linux kernel. It starts by introducing all relevant
- functions to manage the files within the file system. After that
- it shows how to communicate with userland, and some tips and
- tricks will be pointed out. Finally a complete example will be
- shown.
- </para>
-
- <para>
- Note that the files in <filename
- class="directory">/proc/sys</filename> are sysctl files: they
- don't belong to procfs and are governed by a completely
- different API described in the Kernel API book.
- </para>
- </chapter>
-
-
-
-
- <chapter id="managing">
- <title>Managing procfs entries</title>
-
- <para>
- This chapter describes the functions that various kernel
- components use to populate the procfs with files, symlinks,
- device nodes, and directories.
- </para>
-
- <para>
- A minor note before we start: if you want to use any of the
- procfs functions, be sure to include the correct header file!
- This should be one of the first lines in your code:
- </para>
-
- <programlisting>
-#include <linux/proc_fs.h>
- </programlisting>
-
-
-
-
- <sect1 id="regularfile">
- <title>Creating a regular file</title>
-
- <funcsynopsis>
- <funcprototype>
- <funcdef>struct proc_dir_entry* <function>create_proc_entry</function></funcdef>
- <paramdef>const char* <parameter>name</parameter></paramdef>
- <paramdef>mode_t <parameter>mode</parameter></paramdef>
- <paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
-
- <para>
- This function creates a regular file with the name
- <parameter>name</parameter>, file mode
- <parameter>mode</parameter> in the directory
- <parameter>parent</parameter>. To create a file in the root of
- the procfs, use <constant>NULL</constant> as
- <parameter>parent</parameter> parameter. When successful, the
- function will return a pointer to the freshly created
- <structname>struct proc_dir_entry</structname>; otherwise it
- will return <constant>NULL</constant>. <xref
- linkend="userland"/> describes how to do something useful with
- regular files.
- </para>
-
- <para>
- Note that it is specifically supported that you can pass a
- path that spans multiple directories. For example
- <function>create_proc_entry</function>(<parameter>"drivers/via0/info"</parameter>)
- will create the <filename class="directory">via0</filename>
- directory if necessary, with standard
- <constant>0755</constant> permissions.
- </para>
-
- <para>
- If you only want to be able to read the file, the function
- <function>create_proc_read_entry</function> described in <xref
- linkend="convenience"/> may be used to create and initialise
- the procfs entry in one single call.
- </para>
- </sect1>
-
-
-
-
- <sect1 id="Creating_a_symlink">
- <title>Creating a symlink</title>
-
- <funcsynopsis>
- <funcprototype>
- <funcdef>struct proc_dir_entry*
- <function>proc_symlink</function></funcdef> <paramdef>const
- char* <parameter>name</parameter></paramdef>
- <paramdef>struct proc_dir_entry*
- <parameter>parent</parameter></paramdef> <paramdef>const
- char* <parameter>dest</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
-
- <para>
- This creates a symlink in the procfs directory
- <parameter>parent</parameter> that points from
- <parameter>name</parameter> to
- <parameter>dest</parameter>. This translates in userland to
- <literal>ln -s</literal> <parameter>dest</parameter>
- <parameter>name</parameter>.
- </para>
- </sect1>
-
- <sect1 id="Creating_a_directory">
- <title>Creating a directory</title>
-
- <funcsynopsis>
- <funcprototype>
- <funcdef>struct proc_dir_entry* <function>proc_mkdir</function></funcdef>
- <paramdef>const char* <parameter>name</parameter></paramdef>
- <paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
-
- <para>
- Create a directory <parameter>name</parameter> in the procfs
- directory <parameter>parent</parameter>.
- </para>
- </sect1>
-
-
-
-
- <sect1 id="Removing_an_entry">
- <title>Removing an entry</title>
-
- <funcsynopsis>
- <funcprototype>
- <funcdef>void <function>remove_proc_entry</function></funcdef>
- <paramdef>const char* <parameter>name</parameter></paramdef>
- <paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
-
- <para>
- Removes the entry <parameter>name</parameter> in the directory
- <parameter>parent</parameter> from the procfs. Entries are
- removed by their <emphasis>name</emphasis>, not by the
- <structname>struct proc_dir_entry</structname> returned by the
- various create functions. Note that this function doesn't
- recursively remove entries.
- </para>
-
- <para>
- Be sure to free the <structfield>data</structfield> entry from
- the <structname>struct proc_dir_entry</structname> before
- <function>remove_proc_entry</function> is called (that is: if
- there was some <structfield>data</structfield> allocated, of
- course). See <xref linkend="usingdata"/> for more information
- on using the <structfield>data</structfield> entry.
- </para>
- </sect1>
- </chapter>
-
-
-
-
- <chapter id="userland">
- <title>Communicating with userland</title>
-
- <para>
- Instead of reading (or writing) information directly from
- kernel memory, procfs works with <emphasis>call back
- functions</emphasis> for files: functions that are called when
- a specific file is being read or written. Such functions have
- to be initialised after the procfs file is created by setting
- the <structfield>read_proc</structfield> and/or
- <structfield>write_proc</structfield> fields in the
- <structname>struct proc_dir_entry*</structname> that the
- function <function>create_proc_entry</function> returned:
- </para>
-
- <programlisting>
-struct proc_dir_entry* entry;
-
-entry->read_proc = read_proc_foo;
-entry->write_proc = write_proc_foo;
- </programlisting>
-
- <para>
- If you only want to use a the
- <structfield>read_proc</structfield>, the function
- <function>create_proc_read_entry</function> described in <xref
- linkend="convenience"/> may be used to create and initialise the
- procfs entry in one single call.
- </para>
-
-
-
- <sect1 id="Reading_data">
- <title>Reading data</title>
-
- <para>
- The read function is a call back function that allows userland
- processes to read data from the kernel. The read function
- should have the following format:
- </para>
-
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>read_func</function></funcdef>
- <paramdef>char* <parameter>buffer</parameter></paramdef>
- <paramdef>char** <parameter>start</parameter></paramdef>
- <paramdef>off_t <parameter>off</parameter></paramdef>
- <paramdef>int <parameter>count</parameter></paramdef>
- <paramdef>int* <parameter>peof</parameter></paramdef>
- <paramdef>void* <parameter>data</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
-
- <para>
- The read function should write its information into the
- <parameter>buffer</parameter>, which will be exactly
- <literal>PAGE_SIZE</literal> bytes long.
- </para>
-
- <para>
- The parameter
- <parameter>peof</parameter> should be used to signal that the
- end of the file has been reached by writing
- <literal>1</literal> to the memory location
- <parameter>peof</parameter> points to.
- </para>
-
- <para>
- The <parameter>data</parameter>
- parameter can be used to create a single call back function for
- several files, see <xref linkend="usingdata"/>.
- </para>
-
- <para>
- The rest of the parameters and the return value are described
- by a comment in <filename>fs/proc/generic.c</filename> as follows:
- </para>
-
- <blockquote>
- <para>
- You have three ways to return data:
- </para>
- <orderedlist>
- <listitem>
- <para>
- Leave <literal>*start = NULL</literal>. (This is the default.)
- Put the data of the requested offset at that
- offset within the buffer. Return the number (<literal>n</literal>)
- of bytes there are from the beginning of the
- buffer up to the last byte of data. If the
- number of supplied bytes (<literal>= n - offset</literal>) is
- greater than zero and you didn't signal eof
- and the reader is prepared to take more data
- you will be called again with the requested
- offset advanced by the number of bytes
- absorbed. This interface is useful for files
- no larger than the buffer.
- </para>
- </listitem>
- <listitem>
- <para>
- Set <literal>*start</literal> to an unsigned long value less than
- the buffer address but greater than zero.
- Put the data of the requested offset at the
- beginning of the buffer. Return the number of
- bytes of data placed there. If this number is
- greater than zero and you didn't signal eof
- and the reader is prepared to take more data
- you will be called again with the requested
- offset advanced by <literal>*start</literal>. This interface is
- useful when you have a large file consisting
- of a series of blocks which you want to count
- and return as wholes.
- (Hack by Paul.Russell@rustcorp.com.au)
- </para>
- </listitem>
- <listitem>
- <para>
- Set <literal>*start</literal> to an address within the buffer.
- Put the data of the requested offset at <literal>*start</literal>.
- Return the number of bytes of data placed there.
- If this number is greater than zero and you
- didn't signal eof and the reader is prepared to
- take more data you will be called again with the
- requested offset advanced by the number of bytes
- absorbed.
- </para>
- </listitem>
- </orderedlist>
- </blockquote>
-
- <para>
- <xref linkend="example"/> shows how to use a read call back
- function.
- </para>
- </sect1>
-
-
-
-
- <sect1 id="Writing_data">
- <title>Writing data</title>
-
- <para>
- The write call back function allows a userland process to write
- data to the kernel, so it has some kind of control over the
- kernel. The write function should have the following format:
- </para>
-
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>write_func</function></funcdef>
- <paramdef>struct file* <parameter>file</parameter></paramdef>
- <paramdef>const char* <parameter>buffer</parameter></paramdef>
- <paramdef>unsigned long <parameter>count</parameter></paramdef>
- <paramdef>void* <parameter>data</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
-
- <para>
- The write function should read <parameter>count</parameter>
- bytes at maximum from the <parameter>buffer</parameter>. Note
- that the <parameter>buffer</parameter> doesn't live in the
- kernel's memory space, so it should first be copied to kernel
- space with <function>copy_from_user</function>. The
- <parameter>file</parameter> parameter is usually
- ignored. <xref linkend="usingdata"/> shows how to use the
- <parameter>data</parameter> parameter.
- </para>
-
- <para>
- Again, <xref linkend="example"/> shows how to use this call back
- function.
- </para>
- </sect1>
-
-
-
-
- <sect1 id="usingdata">
- <title>A single call back for many files</title>
-
- <para>
- When a large number of almost identical files is used, it's
- quite inconvenient to use a separate call back function for
- each file. A better approach is to have a single call back
- function that distinguishes between the files by using the
- <structfield>data</structfield> field in <structname>struct
- proc_dir_entry</structname>. First of all, the
- <structfield>data</structfield> field has to be initialised:
- </para>
-
- <programlisting>
-struct proc_dir_entry* entry;
-struct my_file_data *file_data;
-
-file_data = kmalloc(sizeof(struct my_file_data), GFP_KERNEL);
-entry->data = file_data;
- </programlisting>
-
- <para>
- The <structfield>data</structfield> field is a <type>void
- *</type>, so it can be initialised with anything.
- </para>
-
- <para>
- Now that the <structfield>data</structfield> field is set, the
- <function>read_proc</function> and
- <function>write_proc</function> can use it to distinguish
- between files because they get it passed into their
- <parameter>data</parameter> parameter:
- </para>
-
- <programlisting>
-int foo_read_func(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len;
-
- if(data == file_data) {
- /* special case for this file */
- } else {
- /* normal processing */
- }
-
- return len;
-}
- </programlisting>
-
- <para>
- Be sure to free the <structfield>data</structfield> data field
- when removing the procfs entry.
- </para>
- </sect1>
- </chapter>
-
-
-
-
- <chapter id="tips">
- <title>Tips and tricks</title>
-
-
-
-
- <sect1 id="convenience">
- <title>Convenience functions</title>
-
- <funcsynopsis>
- <funcprototype>
- <funcdef>struct proc_dir_entry* <function>create_proc_read_entry</function></funcdef>
- <paramdef>const char* <parameter>name</parameter></paramdef>
- <paramdef>mode_t <parameter>mode</parameter></paramdef>
- <paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
- <paramdef>read_proc_t* <parameter>read_proc</parameter></paramdef>
- <paramdef>void* <parameter>data</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
-
- <para>
- This function creates a regular file in exactly the same way
- as <function>create_proc_entry</function> from <xref
- linkend="regularfile"/> does, but also allows to set the read
- function <parameter>read_proc</parameter> in one call. This
- function can set the <parameter>data</parameter> as well, like
- explained in <xref linkend="usingdata"/>.
- </para>
- </sect1>
-
-
-
- <sect1 id="Modules">
- <title>Modules</title>
-
- <para>
- If procfs is being used from within a module, be sure to set
- the <structfield>owner</structfield> field in the
- <structname>struct proc_dir_entry</structname> to
- <constant>THIS_MODULE</constant>.
- </para>
-
- <programlisting>
-struct proc_dir_entry* entry;
-
-entry->owner = THIS_MODULE;
- </programlisting>
- </sect1>
-
-
-
-
- <sect1 id="Mode_and_ownership">
- <title>Mode and ownership</title>
-
- <para>
- Sometimes it is useful to change the mode and/or ownership of
- a procfs entry. Here is an example that shows how to achieve
- that:
- </para>
-
- <programlisting>
-struct proc_dir_entry* entry;
-
-entry->mode = S_IWUSR |S_IRUSR | S_IRGRP | S_IROTH;
-entry->uid = 0;
-entry->gid = 100;
- </programlisting>
-
- </sect1>
- </chapter>
-
-
-
-
- <chapter id="example">
- <title>Example</title>
-
- <!-- be careful with the example code: it shouldn't be wider than
- approx. 60 columns, or otherwise it won't fit properly on a page
- -->
-
-&procfsexample;
-
- </chapter>
-</book>
+++ /dev/null
-/*
- * procfs_example.c: an example proc interface
- *
- * Copyright (C) 2001, Erik Mouw (mouw@nl.linux.org)
- *
- * This file accompanies the procfs-guide in the Linux kernel
- * source. Its main use is to demonstrate the concepts and
- * functions described in the guide.
- *
- * This software has been developed while working on the LART
- * computing board (http://www.lartmaker.nl), which was sponsored
- * by the Delt University of Technology projects Mobile Multi-media
- * Communications and Ubiquitous Communications.
- *
- * This program is free software; you can redistribute
- * it and/or modify it under the terms of the GNU General
- * Public License as published by the Free Software
- * Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place,
- * Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/jiffies.h>
-#include <asm/uaccess.h>
-
-
-#define MODULE_VERS "1.0"
-#define MODULE_NAME "procfs_example"
-
-#define FOOBAR_LEN 8
-
-struct fb_data_t {
- char name[FOOBAR_LEN + 1];
- char value[FOOBAR_LEN + 1];
-};
-
-
-static struct proc_dir_entry *example_dir, *foo_file,
- *bar_file, *jiffies_file, *symlink;
-
-
-struct fb_data_t foo_data, bar_data;
-
-
-static int proc_read_jiffies(char *page, char **start,
- off_t off, int count,
- int *eof, void *data)
-{
- int len;
-
- len = sprintf(page, "jiffies = %ld\n",
- jiffies);
-
- return len;
-}
-
-
-static int proc_read_foobar(char *page, char **start,
- off_t off, int count,
- int *eof, void *data)
-{
- int len;
- struct fb_data_t *fb_data = (struct fb_data_t *)data;
-
- /* DON'T DO THAT - buffer overruns are bad */
- len = sprintf(page, "%s = '%s'\n",
- fb_data->name, fb_data->value);
-
- return len;
-}
-
-
-static int proc_write_foobar(struct file *file,
- const char *buffer,
- unsigned long count,
- void *data)
-{
- int len;
- struct fb_data_t *fb_data = (struct fb_data_t *)data;
-
- if(count > FOOBAR_LEN)
- len = FOOBAR_LEN;
- else
- len = count;
-
- if(copy_from_user(fb_data->value, buffer, len))
- return -EFAULT;
-
- fb_data->value[len] = '\0';
-
- return len;
-}
-
-
-static int __init init_procfs_example(void)
-{
- int rv = 0;
-
- /* create directory */
- example_dir = proc_mkdir(MODULE_NAME, NULL);
- if(example_dir == NULL) {
- rv = -ENOMEM;
- goto out;
- }
- /* create jiffies using convenience function */
- jiffies_file = create_proc_read_entry("jiffies",
- 0444, example_dir,
- proc_read_jiffies,
- NULL);
- if(jiffies_file == NULL) {
- rv = -ENOMEM;
- goto no_jiffies;
- }
-
- /* create foo and bar files using same callback
- * functions
- */
- foo_file = create_proc_entry("foo", 0644, example_dir);
- if(foo_file == NULL) {
- rv = -ENOMEM;
- goto no_foo;
- }
-
- strcpy(foo_data.name, "foo");
- strcpy(foo_data.value, "foo");
- foo_file->data = &foo_data;
- foo_file->read_proc = proc_read_foobar;
- foo_file->write_proc = proc_write_foobar;
-
- bar_file = create_proc_entry("bar", 0644, example_dir);
- if(bar_file == NULL) {
- rv = -ENOMEM;
- goto no_bar;
- }
-
- strcpy(bar_data.name, "bar");
- strcpy(bar_data.value, "bar");
- bar_file->data = &bar_data;
- bar_file->read_proc = proc_read_foobar;
- bar_file->write_proc = proc_write_foobar;
-
- /* create symlink */
- symlink = proc_symlink("jiffies_too", example_dir,
- "jiffies");
- if(symlink == NULL) {
- rv = -ENOMEM;
- goto no_symlink;
- }
-
- /* everything OK */
- printk(KERN_INFO "%s %s initialised\n",
- MODULE_NAME, MODULE_VERS);
- return 0;
-
-no_symlink:
- remove_proc_entry("bar", example_dir);
-no_bar:
- remove_proc_entry("foo", example_dir);
-no_foo:
- remove_proc_entry("jiffies", example_dir);
-no_jiffies:
- remove_proc_entry(MODULE_NAME, NULL);
-out:
- return rv;
-}
-
-
-static void __exit cleanup_procfs_example(void)
-{
- remove_proc_entry("jiffies_too", example_dir);
- remove_proc_entry("bar", example_dir);
- remove_proc_entry("foo", example_dir);
- remove_proc_entry("jiffies", example_dir);
- remove_proc_entry(MODULE_NAME, NULL);
-
- printk(KERN_INFO "%s %s removed\n",
- MODULE_NAME, MODULE_VERS);
-}
-
-
-module_init(init_procfs_example);
-module_exit(cleanup_procfs_example);
-
-MODULE_AUTHOR("Erik Mouw");
-MODULE_DESCRIPTION("procfs examples");
-MODULE_LICENSE("GPL");
2: Passes allnoconfig, allmodconfig
3: Builds on multiple CPU architectures by using local cross-compile tools
- or something like PLM at OSDL.
+ or some other build farm.
4: ppc64 is a good architecture for cross-compilation checking because it
tends to use `unsigned long' for 64-bit quantities.
24: All memory barriers {e.g., barrier(), rmb(), wmb()} need a comment in the
source code that explains the logic of what they are doing and why.
+
+25: If any ioctl's are added by the patch, then also update
+ Documentation/ioctl/ioctl-number.txt.
VIA UniChrome Family(CLE266, PM800 / CN400 / CN300,
P4M800CE / P4M800Pro / CN700 / VN800,
CX700 / VX700, K8M890, P4M890,
- CN896 / P4M900, VX800)
+ CN896 / P4M900, VX800, VX855)
[Driver features]
------------------------
0 : No Dual Edge Panel (default)
1 : Dual Edge Panel
- viafb_video_dev:
- This option is used to specify video output devices(CRT, DVI, LCD) for
- duoview case.
- For example:
- To output video on DVI, we should use:
- modprobe viafb viafb_video_dev=DVI...
-
viafb_lcd_port:
This option is used to specify LCD output port,
available values are "DVP0" "DVP1" "DFP_HIGHLOW" "DFP_HIGH" "DFP_LOW".
and bpp, need to call VIAFB specified ioctl interface VIAFB_SET_DEVICE
instead of calling common ioctl function FBIOPUT_VSCREENINFO since
viafb doesn't support multi-head well, or it will cause screen crush.
- 4. VX800 2D accelerator hasn't been supported in this driver yet. When
- using driver on VX800, the driver will disable the acceleration
- function as default.
[Configure viafb with "fbset" tool]
{
struct proc_dir_entry *entry;
- entry = create_proc_entry("sequence", 0, NULL);
- if (entry)
- entry->proc_fops = &ct_file_ops;
+ proc_create("sequence", 0, NULL, &ct_file_ops);
return 0;
}
This file exists only if the pin can be configured as an
interrupt generating input pin.
+ "active_low" ... reads as either 0 (false) or 1 (true). Write
+ any nonzero value to invert the value attribute both
+ for reading and writing. Existing and subsequent
+ poll(2) support configuration via the edge attribute
+ for "rising" and "falling" edges will follow this
+ setting.
+
GPIO controllers have paths like /sys/class/gpio/gpiochip42/ (for the
controller implementing GPIOs starting at #42) and have the following
read-only attributes:
int gpio_export_link(struct device *dev, const char *name,
unsigned gpio)
+ /* change the polarity of a GPIO node in sysfs */
+ int gpio_sysfs_set_active_low(unsigned gpio, int value);
After a kernel driver requests a GPIO, it may only be made available in
the sysfs interface by gpio_export(). The driver can control whether the
symlinks from elsewhere in sysfs to the GPIO sysfs node. Drivers can
use this to provide the interface under their own device in sysfs with
a descriptive name.
+
+Drivers can use gpio_sysfs_set_active_low() to hide GPIO line polarity
+differences between boards from user space. This only affects the
+sysfs interface. Polarity change can be done both before and after
+gpio_export(), and previously enabled poll(2) support for either
+rising or falling edge will be reconfigured to follow this setting.
vmpoff= [KNL,S390] Perform z/VM CP command after power off.
Format: <command>
+ vt.cur_default= [VT] Default cursor shape.
+ Format: 0xCCBBAA, where AA, BB, and CC are the same as
+ the parameters of the <Esc>[?A;B;Cc escape sequence;
+ see VGA-softcursor.txt. Default: 2 = underline.
+
vt.default_blu= [VT]
Format: <blue0>,<blue1>,<blue2>,...,<blue15>
Change the default blue palette of the console.
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_ALPHA
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 8192
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
#define ELF_CORE_COPY_TASK_REGS dump_task_regs
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
.version = MMC_CTLR_VERSION_2,
};
+static void da850_panel_power_ctrl(int val)
+{
+ /* lcd backlight */
+ gpio_set_value(DA850_LCD_BL_PIN, val);
+
+ /* lcd power */
+ gpio_set_value(DA850_LCD_PWR_PIN, val);
+}
+
static int da850_lcd_hw_init(void)
{
int status;
gpio_direction_output(DA850_LCD_BL_PIN, 0);
gpio_direction_output(DA850_LCD_PWR_PIN, 0);
- /* disable lcd backlight */
- gpio_set_value(DA850_LCD_BL_PIN, 0);
-
- /* disable lcd power */
- gpio_set_value(DA850_LCD_PWR_PIN, 0);
-
- /* enable lcd power */
- gpio_set_value(DA850_LCD_PWR_PIN, 1);
+ /* Switch off panel power and backlight */
+ da850_panel_power_ctrl(0);
- /* enable lcd backlight */
- gpio_set_value(DA850_LCD_BL_PIN, 1);
+ /* Switch on panel power and backlight */
+ da850_panel_power_ctrl(1);
return 0;
}
pr_warning("da850_evm_init: lcd initialization failed: %d\n",
ret);
+ sharp_lk043t1dg01_pdata.panel_power_ctrl = da850_panel_power_ctrl,
ret = da8xx_register_lcdc(&sharp_lk043t1dg01_pdata);
if (ret)
pr_warning("da850_evm_init: lcdc registration failed: %d\n",
#endif
#define ELF_ARCH EM_AVR32
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
--- /dev/null
+/*
+ * Blackfin LCD Framebuffer driver SHARP LQ035Q1DH02
+ *
+ * Copyright 2008-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef BFIN_LQ035Q1_H
+#define BFIN_LQ035Q1_H
+
+#define LQ035_RL (0 << 8) /* Right -> Left Scan */
+#define LQ035_LR (1 << 8) /* Left -> Right Scan */
+#define LQ035_TB (1 << 9) /* Top -> Botton Scan */
+#define LQ035_BT (0 << 9) /* Botton -> Top Scan */
+#define LQ035_BGR (1 << 11) /* Use BGR format */
+#define LQ035_RGB (0 << 11) /* Use RGB format */
+#define LQ035_NORM (1 << 13) /* Reversal */
+#define LQ035_REV (0 << 13) /* Reversal */
+
+struct bfin_lq035q1fb_disp_info {
+
+ unsigned mode;
+ /* GPIOs */
+ int use_bl;
+ unsigned gpio_bl;
+};
+
+#endif /* BFIN_LQ035Q1_H */
_regs->p2 = _dynamic_addr; \
} while(0)
-#define USE_ELF_CORE_DUMP
#define ELF_FDPIC_CORE_EFLAGS EF_BFIN_FDPIC
#define ELF_EXEC_PAGESIZE 4096
#define EF_CRIS_VARIANT_COMMON_V10_V32 0x00000004
/* End of excerpt from {binutils}/include/elf/cris.h. */
-#define USE_ELF_CORE_DUMP
-
#define ELF_EXEC_PAGESIZE 8192
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
__kernel_frame0_ptr->gr29 = 0; \
} while(0)
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_FDPIC_CORE_EFLAGS EF_FRV_FDPIC
#define ELF_EXEC_PAGESIZE 16384
#define ELF_PLAT_INIT(_r) _r->er1 = 0
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
#include <asm/intrinsics.h>
#include <asm/uaccess.h>
-#define USE_ELF_CORE_DUMP 1
-
/* Override elfcore.h */
#define _LINUX_ELFCORE_H 1
typedef unsigned int elf_greg_t;
if (!dev->dma_mask)
return 0;
- return addr + size <= *dev->dma_mask;
+ return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_IA_64
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/bitmap.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
#include <asm/sn/io.h>
static dma_addr_t
tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
{
- int i, ps, ps_shift, entry, entries, mapsize, last_entry;
+ int ps, ps_shift, entry, entries, mapsize;
u64 xio_addr, end_xio_addr;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
map = tioca_kern->ca_pcigart_pagemap;
mapsize = tioca_kern->ca_pcigart_entries;
- entry = find_first_zero_bit(map, mapsize);
- while (entry < mapsize) {
- last_entry = find_next_bit(map, mapsize, entry);
-
- if (last_entry - entry >= entries)
- break;
-
- entry = find_next_zero_bit(map, mapsize, last_entry);
- }
-
- if (entry > mapsize) {
+ entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
+ if (entry >= mapsize) {
kfree(ca_dmamap);
goto map_return;
}
- for (i = 0; i < entries; i++)
- set_bit(entry + i, map);
+ bitmap_set(map, entry, entries);
bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
*/
#define ELF_PLAT_INIT(_r, load_addr) (_r)->r0 = 0
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
is actually used on ASV. */
#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
-#define USE_ELF_CORE_DUMP
#ifndef CONFIG_SUN3
#define ELF_EXEC_PAGESIZE 4096
#else
#define ELF_DATA ELFDATA2MSB
#endif
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) \
dump_task_fpu(tsk, elf_fpregs)
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This yields a mask that user programs can use to figure out what
_ur->a1 = 0; _ur->a0 = 0; _ur->d1 = 0; _ur->d0 = 0; \
} while (0)
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
such function. */
#define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
if (!dev->dma_mask)
return 0;
- return addr + size <= *dev->dma_mask;
+ return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
#define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC)
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
extern void user_enable_block_step(struct task_struct *);
extern void user_disable_single_step(struct task_struct *);
+#define ARCH_HAS_USER_SINGLE_STEP_INFO
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
#include <linux/iommu-helper.h>
#include <linux/crash_dump.h>
#include <asm/io.h>
}
ppc_md.tce_free(tbl, entry, npages);
- iommu_area_free(tbl->it_map, free_entry, npages);
+ bitmap_clear(tbl->it_map, free_entry, npages);
}
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
return 0;
}
+void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs, siginfo_t *info)
+{
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+ info->si_code = TRAP_TRACE;
+ info->si_addr = (void __user *)regs->nip;
+}
+
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
{
siginfo_t info;
} while (0)
#define CORE_DUMP_USE_REGSET
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
struct pt_regs;
#define CORE_DUMP_USE_REGSET
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This yields a mask that user programs can use to figure out what
*/
#define CORE_DUMP_USE_REGSET
-#define USE_ELF_CORE_DUMP
#define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC
#define ELF_EXEC_PAGESIZE PAGE_SIZE
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
-#define USE_ELF_CORE_DUMP
-
#define ELF_EXEC_PAGESIZE 4096
(x)->e_machine == EM_SPARC32PLUS)
#define compat_start_thread start_thread32
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/iommu-helper.h>
+#include <linux/bitmap.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
- iommu_area_free(arena->map, entry, npages);
+ bitmap_clear(arena->map, entry, npages);
}
int iommu_table_init(struct iommu *iommu, int tsbsize,
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/init.h>
+#include <linux/bitmap.h>
#include <asm/hypervisor.h>
#include <asm/iommu.h>
static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
{
struct iommu_arena *arena = &iommu->arena;
- unsigned long n, i, start, end, limit;
+ unsigned long n, start, end, limit;
int pass;
limit = arena->limit;
pass = 0;
again:
- n = find_next_zero_bit(arena->map, limit, start);
+ n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0);
end = n + npages;
if (unlikely(end >= limit)) {
if (likely(pass < 1)) {
return -1;
}
}
-
- for (i = n; i < end; i++) {
- if (test_bit(i, arena->map)) {
- start = i + 1;
- goto again;
- }
- }
-
- for (i = n; i < end; i++)
- __set_bit(i, arena->map);
+ bitmap_set(arena->map, n, npages);
arena->hint = end;
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/scatterlist.h>
+#include <linux/bitmap.h>
#include <asm/sections.h>
#include <asm/page.h>
npages = (((unsigned long)vaddr & ~PAGE_MASK) +
size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
- scan = 0;
local_irq_save(flags);
- for (;;) {
- scan = find_next_zero_bit(sun4c_iobuffer_map,
- iobuffer_map_size, scan);
- if ((base = scan) + npages > iobuffer_map_size) goto abend;
- for (;;) {
- if (scan >= base + npages) goto found;
- if (test_bit(scan, sun4c_iobuffer_map)) break;
- scan++;
- }
- }
+ base = bitmap_find_next_zero_area(sun4c_iobuffer_map, iobuffer_map_size,
+ 0, npages, 0);
+ if (base >= iobuffer_map_size)
+ goto abend;
-found:
high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start;
high = SUN4C_REAL_PGDIR_ALIGN(high);
while (high > sun4c_iobuffer_high) {
PT_REGS_EAX(regs) = 0; \
} while (0)
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
#define ELF_CLASS ELFCLASS32
#endif
-#define USE_ELF_CORE_DUMP
-
#define R_386_NONE 0
#define R_386_32 1
#define R_386_PC32 2
clear_thread_flag(TIF_IA32);
#endif
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
if (!dev->dma_mask)
return 0;
- return addr + size <= *dev->dma_mask;
+ return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
#endif /* !CONFIG_X86_32 */
#define CORE_DUMP_USE_REGSET
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
#endif
+#define ARCH_HAS_USER_SINGLE_STEP_INFO
+
struct user_desc;
extern int do_get_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info);
};
};
-union uv_watchlist_u {
- u64 val;
- struct {
- u64 blade : 16,
- size : 32,
- filler : 16;
- };
-};
-
enum uv_memprotect {
UV_MEMPROT_RESTRICT_ACCESS,
UV_MEMPROT_ALLOW_AMO,
extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
extern s64 uv_bios_freq_base(u64, u64 *);
-extern int uv_bios_mq_watchlist_alloc(int, unsigned long, unsigned int,
+extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int,
unsigned long *);
extern int uv_bios_mq_watchlist_free(int, int);
extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
+#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
+
#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
return uv_soc_phys_ram_to_gpa(__pa(v));
}
+/* Top two bits indicate the requested address is in MMR space. */
+static inline int
+uv_gpa_in_mmr_space(unsigned long gpa)
+{
+ return (gpa >> 62) == 0x3UL;
+}
+
+/* UV global physical address --> socket phys RAM */
+static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
+{
+ unsigned long paddr = gpa & uv_hub_info->gpa_mask;
+ unsigned long remap_base = uv_hub_info->lowmem_remap_base;
+ unsigned long remap_top = uv_hub_info->lowmem_remap_top;
+
+ if (paddr >= remap_base && paddr < remap_base + remap_top)
+ paddr -= remap_base;
+ return paddr;
+}
+
+
/* gnode -> pnode */
static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
{
}
/*
+ * Global MMR space addresses when referenced by the GRU. (GRU does
+ * NOT use socket addressing).
+ */
+static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset)
+{
+ return UV_GLOBAL_GRU_MMR_BASE | offset | (pnode << uv_hub_info->m_val);
+}
+
+/*
* Access hub local MMRs. Faster than using global space but only local MMRs
* are accessible.
*/
}
}
+static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode)
+{
+ return (1UL << UVH_IPI_INT_SEND_SHFT) |
+ ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
+ (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
+ (vector << UVH_IPI_INT_VECTOR_SHFT);
+}
+
static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
{
unsigned long val;
if (vector == NMI_VECTOR)
dmode = dest_NMI;
- val = (1UL << UVH_IPI_INT_SEND_SHFT) |
- ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
- (dmode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
- (vector << UVH_IPI_INT_VECTOR_SHFT);
+ val = uv_hub_ipi_value(apicid, vector, dmode);
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
}
#include <linux/pci.h>
#include <linux/gfp.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
- iommu_area_free(range->bitmap, address, pages);
+ bitmap_clear(range->bitmap, address, pages);
}
}
int
-uv_bios_mq_watchlist_alloc(int blade, unsigned long addr, unsigned int mq_size,
+uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
unsigned long *intr_mmr_offset)
{
- union uv_watchlist_u size_blade;
u64 watchlist;
s64 ret;
- size_blade.size = mq_size;
- size_blade.blade = blade;
-
/*
* bios returns watchlist number or negative error number.
*/
ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
- size_blade.val, (u64)intr_mmr_offset,
+ mq_size, (u64)intr_mmr_offset,
(u64)&watchlist, 0);
if (ret < BIOS_STATUS_SUCCESS)
return ret;
#include <linux/string.h>
#include <linux/crash_dump.h>
#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
#include <linux/pci_ids.h>
#include <linux/pci.h>
#include <linux/delay.h>
spin_lock_irqsave(&tbl->it_lock, flags);
- iommu_area_reserve(tbl->it_map, index, npages);
+ bitmap_set(tbl->it_map, index, npages);
spin_unlock_irqrestore(&tbl->it_lock, flags);
}
spin_lock_irqsave(&tbl->it_lock, flags);
- iommu_area_free(tbl->it_map, entry, npages);
+ bitmap_clear(tbl->it_map, entry, npages);
spin_unlock_irqrestore(&tbl->it_lock, flags);
}
#include <linux/module.h>
#include <linux/topology.h>
#include <linux/interrupt.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
#include <linux/kdebug.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
unsigned long flags;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
- iommu_area_free(iommu_gart_bitmap, offset, size);
+ bitmap_clear(iommu_gart_bitmap, offset, size);
if (offset >= next_bit)
next_bit = offset + size;
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
* Out of IOMMU space handling.
* Reserve some invalid pages at the beginning of the GART.
*/
- iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
+ bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
iommu_size >> 20);
#endif
}
-void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
- int error_code, int si_code)
+static void fill_sigtrap_info(struct task_struct *tsk,
+ struct pt_regs *regs,
+ int error_code, int si_code,
+ struct siginfo *info)
{
- struct siginfo info;
-
tsk->thread.trap_no = 1;
tsk->thread.error_code = error_code;
- memset(&info, 0, sizeof(info));
- info.si_signo = SIGTRAP;
- info.si_code = si_code;
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+ info->si_code = si_code;
+ info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
+}
- /* User-mode ip? */
- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
+void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs,
+ struct siginfo *info)
+{
+ fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
+}
+void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+ int error_code, int si_code)
+{
+ struct siginfo info;
+
+ fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
/* Send us the fake SIGTRAP */
force_sig_info(SIGTRAP, &info, tsk);
}
asmregparm void syscall_trace_leave(struct pt_regs *regs)
{
+ bool step;
+
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->ax);
- if (test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, 0);
-
/*
* If TIF_SYSCALL_EMU is set, we only get here because of
* TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
* We already reported this syscall instruction in
- * syscall_trace_enter(), so don't do any more now.
- */
- if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
- return;
-
- /*
- * If we are single-stepping, synthesize a trap to follow the
- * system call instruction.
+ * syscall_trace_enter().
*/
- if (test_thread_flag(TIF_SINGLESTEP) &&
- tracehook_consider_fatal_signal(current, SIGTRAP))
- send_sigtrap(current, regs, 0, TRAP_BRKPT);
+ step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
+ !test_thread_flag(TIF_SYSCALL_EMU);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
}
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_XTENSA
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
.unlocked_ioctl = efi_rtc_ioctl,
.open = efi_rtc_open,
.release = efi_rtc_close,
+ .llseek = no_llseek,
};
static struct miscdevice efi_rtc_dev= {
return SI_SM_IDLE;
case KCS_START_OP:
- if (state != KCS_IDLE) {
+ if (state != KCS_IDLE_STATE) {
start_error_recovery(kcs,
"State machine not idle at start");
break;
static void moom_callback(struct work_struct *ignored)
{
- out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0);
+ out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0, NULL);
}
static DECLARE_WORK(moom_work, moom_callback);
int global_cursor_default = -1;
module_param(global_cursor_default, int, S_IRUGO | S_IWUSR);
+static int cur_default = CUR_DEFAULT;
+module_param(cur_default, int, S_IRUGO | S_IWUSR);
+
/*
* ignore_poke: don't unblank the screen when things are typed. This is
* mainly for the privacy of braille terminal users.
/* do not do set_leds here because this causes an endless tasklet loop
when the keyboard hasn't been initialized yet */
- vc->vc_cursor_type = CUR_DEFAULT;
+ vc->vc_cursor_type = cur_default;
vc->vc_complement_mask = vc->vc_s_complement_mask;
default_attr(vc);
if (vc->vc_par[0])
vc->vc_cursor_type = vc->vc_par[0] | (vc->vc_par[1] << 8) | (vc->vc_par[2] << 16);
else
- vc->vc_cursor_type = CUR_DEFAULT;
+ vc->vc_cursor_type = cur_default;
return;
}
break;
* Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
* http://download.intel.com/design/chipsets/datashts/318378.pdf
*
+ * The intel 5100 has two independent channels. EDAC core currently
+ * can not reflect this configuration so instead the chip-select
+ * rows for each respective channel are layed out one after another,
+ * the first half belonging to channel 0, the second half belonging
+ * to channel 1.
*/
#include <linux/module.h>
#include <linux/init.h>
/* device 16, func 1 */
#define I5100_MC 0x40 /* Memory Control Register */
+#define I5100_MC_SCRBEN_MASK (1 << 7)
+#define I5100_MC_SCRBDONE_MASK (1 << 4)
#define I5100_MS 0x44 /* Memory Status Register */
#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
/* bit field accessors */
+static inline u32 i5100_mc_scrben(u32 mc)
+{
+ return mc >> 7 & 1;
+}
+
static inline u32 i5100_mc_errdeten(u32 mc)
{
return mc >> 5 & 1;
}
+static inline u32 i5100_mc_scrbdone(u32 mc)
+{
+ return mc >> 4 & 1;
+}
+
static inline u16 i5100_spddata_rdo(u16 a)
{
return a >> 15 & 1;
}
/* some generic limits */
-#define I5100_MAX_RANKS_PER_CTLR 6
-#define I5100_MAX_CTLRS 2
+#define I5100_MAX_RANKS_PER_CHAN 6
+#define I5100_CHANNELS 2
#define I5100_MAX_RANKS_PER_DIMM 4
#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
-#define I5100_MAX_DIMM_SLOTS_PER_CTLR 4
+#define I5100_MAX_DIMM_SLOTS_PER_CHAN 4
#define I5100_MAX_RANK_INTERLEAVE 4
#define I5100_MAX_DMIRS 5
+#define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ)
struct i5100_priv {
/* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
- int dimm_numrank[I5100_MAX_CTLRS][I5100_MAX_DIMM_SLOTS_PER_CTLR];
+ int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN];
/*
* mainboard chip select map -- maps i5100 chip selects to
* DIMM slot chip selects. In the case of only 4 ranks per
- * controller, the mapping is fairly obvious but not unique.
- * we map -1 -> NC and assume both controllers use the same
+ * channel, the mapping is fairly obvious but not unique.
+ * we map -1 -> NC and assume both channels use the same
* map...
*
*/
- int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CTLR][I5100_MAX_RANKS_PER_DIMM];
+ int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM];
/* memory interleave range */
struct {
u64 limit;
unsigned way[2];
- } mir[I5100_MAX_CTLRS];
+ } mir[I5100_CHANNELS];
/* adjusted memory interleave range register */
- unsigned amir[I5100_MAX_CTLRS];
+ unsigned amir[I5100_CHANNELS];
/* dimm interleave range */
struct {
unsigned rank[I5100_MAX_RANK_INTERLEAVE];
u64 limit;
- } dmir[I5100_MAX_CTLRS][I5100_MAX_DMIRS];
+ } dmir[I5100_CHANNELS][I5100_MAX_DMIRS];
/* memory technology registers... */
struct {
unsigned numbank; /* 2 or 3 lines */
unsigned numrow; /* 13 .. 16 lines */
unsigned numcol; /* 11 .. 12 lines */
- } mtr[I5100_MAX_CTLRS][I5100_MAX_RANKS_PER_CTLR];
+ } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN];
u64 tolm; /* top of low memory in bytes */
- unsigned ranksperctlr; /* number of ranks per controller */
+ unsigned ranksperchan; /* number of ranks per channel */
struct pci_dev *mc; /* device 16 func 1 */
struct pci_dev *ch0mm; /* device 21 func 0 */
struct pci_dev *ch1mm; /* device 22 func 0 */
+
+ struct delayed_work i5100_scrubbing;
+ int scrub_enable;
};
-/* map a rank/ctlr to a slot number on the mainboard */
+/* map a rank/chan to a slot number on the mainboard */
static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
- int ctlr, int rank)
+ int chan, int rank)
{
const struct i5100_priv *priv = mci->pvt_info;
int i;
- for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
+ for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
int j;
- const int numrank = priv->dimm_numrank[ctlr][i];
+ const int numrank = priv->dimm_numrank[chan][i];
for (j = 0; j < numrank; j++)
if (priv->dimm_csmap[i][j] == rank)
- return i * 2 + ctlr;
+ return i * 2 + chan;
}
return -1;
return "none";
}
-/* convert csrow index into a rank (per controller -- 0..5) */
+/* convert csrow index into a rank (per channel -- 0..5) */
static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
- return csrow % priv->ranksperctlr;
+ return csrow % priv->ranksperchan;
}
-/* convert csrow index into a controller (0..1) */
-static int i5100_csrow_to_cntlr(const struct mem_ctl_info *mci, int csrow)
+/* convert csrow index into a channel (0..1) */
+static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
- return csrow / priv->ranksperctlr;
+ return csrow / priv->ranksperchan;
}
static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
- int ctlr, int rank)
+ int chan, int rank)
{
const struct i5100_priv *priv = mci->pvt_info;
- return ctlr * priv->ranksperctlr + rank;
+ return chan * priv->ranksperchan + rank;
}
static void i5100_handle_ce(struct mem_ctl_info *mci,
- int ctlr,
+ int chan,
unsigned bank,
unsigned rank,
unsigned long syndrome,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
+ const int csrow = i5100_rank_to_csrow(mci, chan, rank);
printk(KERN_ERR
- "CE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
+ "CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- ctlr, bank, rank, syndrome, cas, ras,
+ chan, bank, rank, syndrome, cas, ras,
csrow, mci->csrows[csrow].channels[0].label, msg);
mci->ce_count++;
}
static void i5100_handle_ue(struct mem_ctl_info *mci,
- int ctlr,
+ int chan,
unsigned bank,
unsigned rank,
unsigned long syndrome,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
+ const int csrow = i5100_rank_to_csrow(mci, chan, rank);
printk(KERN_ERR
- "UE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
+ "UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- ctlr, bank, rank, syndrome, cas, ras,
+ chan, bank, rank, syndrome, cas, ras,
csrow, mci->csrows[csrow].channels[0].label, msg);
mci->ue_count++;
mci->csrows[csrow].ue_count++;
}
-static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
+static void i5100_read_log(struct mem_ctl_info *mci, int chan,
u32 ferr, u32 nerr)
{
struct i5100_priv *priv = mci->pvt_info;
- struct pci_dev *pdev = (ctlr) ? priv->ch1mm : priv->ch0mm;
+ struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm;
u32 dw;
u32 dw2;
unsigned syndrome = 0;
else
msg = i5100_err_msg(nerr);
- i5100_handle_ce(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
+ i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg);
}
if (i5100_validlog_nrecmemvalid(dw)) {
else
msg = i5100_err_msg(nerr);
- i5100_handle_ue(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
+ i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg);
}
pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
}
}
+/* The i5100 chipset will scrub the entire memory once, then
+ * set a done bit. Continuous scrubbing is achieved by enqueing
+ * delayed work to a workqueue, checking every few minutes if
+ * the scrubbing has completed and if so reinitiating it.
+ */
+
+static void i5100_refresh_scrubbing(struct work_struct *work)
+{
+ struct delayed_work *i5100_scrubbing = container_of(work,
+ struct delayed_work,
+ work);
+ struct i5100_priv *priv = container_of(i5100_scrubbing,
+ struct i5100_priv,
+ i5100_scrubbing);
+ u32 dw;
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ if (priv->scrub_enable) {
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ if (i5100_mc_scrbdone(dw)) {
+ dw |= I5100_MC_SCRBEN_MASK;
+ pci_write_config_dword(priv->mc, I5100_MC, dw);
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+ }
+
+ schedule_delayed_work(&(priv->i5100_scrubbing),
+ I5100_SCRUB_REFRESH_RATE);
+ }
+}
+/*
+ * The bandwidth is based on experimentation, feel free to refine it.
+ */
+static int i5100_set_scrub_rate(struct mem_ctl_info *mci,
+ u32 *bandwidth)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ u32 dw;
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+ if (*bandwidth) {
+ priv->scrub_enable = 1;
+ dw |= I5100_MC_SCRBEN_MASK;
+ schedule_delayed_work(&(priv->i5100_scrubbing),
+ I5100_SCRUB_REFRESH_RATE);
+ } else {
+ priv->scrub_enable = 0;
+ dw &= ~I5100_MC_SCRBEN_MASK;
+ cancel_delayed_work(&(priv->i5100_scrubbing));
+ }
+ pci_write_config_dword(priv->mc, I5100_MC, dw);
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ *bandwidth = 5900000 * i5100_mc_scrben(dw);
+
+ return 0;
+}
+
+static int i5100_get_scrub_rate(struct mem_ctl_info *mci,
+ u32 *bandwidth)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ u32 dw;
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ *bandwidth = 5900000 * i5100_mc_scrben(dw);
+
+ return 0;
+}
+
static struct pci_dev *pci_get_device_func(unsigned vendor,
unsigned device,
unsigned func)
int csrow)
{
struct i5100_priv *priv = mci->pvt_info;
- const unsigned ctlr_rank = i5100_csrow_to_rank(mci, csrow);
- const unsigned ctlr = i5100_csrow_to_cntlr(mci, csrow);
+ const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
+ const unsigned chan = i5100_csrow_to_chan(mci, csrow);
unsigned addr_lines;
/* dimm present? */
- if (!priv->mtr[ctlr][ctlr_rank].present)
+ if (!priv->mtr[chan][chan_rank].present)
return 0ULL;
addr_lines =
I5100_DIMM_ADDR_LINES +
- priv->mtr[ctlr][ctlr_rank].numcol +
- priv->mtr[ctlr][ctlr_rank].numrow +
- priv->mtr[ctlr][ctlr_rank].numbank;
+ priv->mtr[chan][chan_rank].numcol +
+ priv->mtr[chan][chan_rank].numrow +
+ priv->mtr[chan][chan_rank].numbank;
return (unsigned long)
((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
int i;
- for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ for (i = 0; i < I5100_CHANNELS; i++) {
int j;
struct pci_dev *pdev = mms[i];
- for (j = 0; j < I5100_MAX_RANKS_PER_CTLR; j++) {
+ for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) {
const unsigned addr =
(j < 4) ? I5100_MTR_0 + j * 2 :
I5100_MTR_4 + (j - 4) * 2;
* fill dimm chip select map
*
* FIXME:
- * o only valid for 4 ranks per controller
* o not the only way to may chip selects to dimm slots
* o investigate if there is some way to obtain this map from the bios
*/
struct i5100_priv *priv = mci->pvt_info;
int i;
- WARN_ON(priv->ranksperctlr != 4);
-
- for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
+ for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
int j;
for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
}
/* only 2 chip selects per slot... */
- priv->dimm_csmap[0][0] = 0;
- priv->dimm_csmap[0][1] = 3;
- priv->dimm_csmap[1][0] = 1;
- priv->dimm_csmap[1][1] = 2;
- priv->dimm_csmap[2][0] = 2;
- priv->dimm_csmap[3][0] = 3;
+ if (priv->ranksperchan == 4) {
+ priv->dimm_csmap[0][0] = 0;
+ priv->dimm_csmap[0][1] = 3;
+ priv->dimm_csmap[1][0] = 1;
+ priv->dimm_csmap[1][1] = 2;
+ priv->dimm_csmap[2][0] = 2;
+ priv->dimm_csmap[3][0] = 3;
+ } else {
+ priv->dimm_csmap[0][0] = 0;
+ priv->dimm_csmap[0][1] = 1;
+ priv->dimm_csmap[1][0] = 2;
+ priv->dimm_csmap[1][1] = 3;
+ priv->dimm_csmap[2][0] = 4;
+ priv->dimm_csmap[2][1] = 5;
+ }
}
static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
struct i5100_priv *priv = mci->pvt_info;
int i;
- for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ for (i = 0; i < I5100_CHANNELS; i++) {
int j;
- for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CTLR; j++) {
+ for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) {
u8 rank;
if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
pci_read_config_word(pdev, I5100_AMIR_1, &w);
priv->amir[1] = w;
- for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ for (i = 0; i < I5100_CHANNELS; i++) {
int j;
for (j = 0; j < 5; j++) {
for (i = 0; i < mci->nr_csrows; i++) {
const unsigned long npages = i5100_npages(mci, i);
- const unsigned cntlr = i5100_csrow_to_cntlr(mci, i);
+ const unsigned chan = i5100_csrow_to_chan(mci, i);
const unsigned rank = i5100_csrow_to_rank(mci, i);
if (!npages)
mci->csrows[i].grain = 32;
mci->csrows[i].csrow_idx = i;
mci->csrows[i].dtype =
- (priv->mtr[cntlr][rank].width == 4) ? DEV_X4 : DEV_X8;
+ (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
mci->csrows[i].ue_count = 0;
mci->csrows[i].ce_count = 0;
mci->csrows[i].mtype = MEM_RDDR2;
mci->csrows[i].channels[0].csrow = mci->csrows + i;
snprintf(mci->csrows[i].channels[0].label,
sizeof(mci->csrows[i].channels[0].label),
- "DIMM%u", i5100_rank_to_slot(mci, cntlr, rank));
+ "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
total_pages += npages;
}
pci_read_config_dword(pdev, I5100_MS, &dw);
ranksperch = !!(dw & (1 << 8)) * 2 + 4;
- if (ranksperch != 4) {
- /* FIXME: get 6 ranks / controller to work - need hw... */
- printk(KERN_INFO "i5100_edac: unsupported configuration.\n");
- ret = -ENODEV;
- goto bail_pdev;
- }
-
/* enable error reporting... */
pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
mci->dev = &pdev->dev;
priv = mci->pvt_info;
- priv->ranksperctlr = ranksperch;
+ priv->ranksperchan = ranksperch;
priv->mc = pdev;
priv->ch0mm = ch0mm;
priv->ch1mm = ch1mm;
+ INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
+
+ /* If scrubbing was already enabled by the bios, start maintaining it */
+ pci_read_config_dword(pdev, I5100_MC, &dw);
+ if (i5100_mc_scrben(dw)) {
+ priv->scrub_enable = 1;
+ schedule_delayed_work(&(priv->i5100_scrubbing),
+ I5100_SCRUB_REFRESH_RATE);
+ }
+
i5100_init_dimm_layout(pdev, mci);
i5100_init_interleaving(pdev, mci);
mci->ctl_page_to_phys = NULL;
mci->edac_check = i5100_check_error;
+ mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
+ mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
i5100_init_csrows(mci);
if (edac_mc_add_mc(mci)) {
ret = -ENODEV;
- goto bail_mc;
+ goto bail_scrub;
}
return ret;
-bail_mc:
+bail_scrub:
+ priv->scrub_enable = 0;
+ cancel_delayed_work_sync(&(priv->i5100_scrubbing));
edac_mc_free(mci);
bail_disable_ch1:
return;
priv = mci->pvt_info;
+
+ priv->scrub_enable = 0;
+ cancel_delayed_work_sync(&(priv->i5100_scrubbing));
+
pci_disable_device(pdev);
pci_disable_device(priv->ch0mm);
pci_disable_device(priv->ch1mm);
help
Say Y here to support Intel Moorestown platform GPIO.
+config GPIO_TIMBERDALE
+ bool "Support for timberdale GPIO IP"
+ depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
+ ---help---
+ Add support for the GPIO IP in the timberdale FPGA.
+
comment "SPI GPIO expanders:"
config GPIO_MAX7301
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PL061) += pl061.o
+obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
#define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */
#define FLAG_TRIG_FALL 5 /* trigger on falling edge */
#define FLAG_TRIG_RISE 6 /* trigger on rising edge */
+#define FLAG_ACTIVE_LOW 7 /* sysfs value has active low */
#define PDESC_ID_SHIFT 16 /* add new flags before this one */
* * configures behavior of poll(2) on /value
* * available only if pin can generate IRQs on input
* * is read/write as "none", "falling", "rising", or "both"
+ * /active_low
+ * * configures polarity of /value
+ * * is read/write as zero/nonzero
+ * * also affects existing and subsequent "falling" and "rising"
+ * /edge configuration
*/
static ssize_t gpio_direction_show(struct device *dev,
return status ? : size;
}
-static const DEVICE_ATTR(direction, 0644,
+static /* const */ DEVICE_ATTR(direction, 0644,
gpio_direction_show, gpio_direction_store);
static ssize_t gpio_value_show(struct device *dev,
mutex_lock(&sysfs_lock);
- if (!test_bit(FLAG_EXPORT, &desc->flags))
+ if (!test_bit(FLAG_EXPORT, &desc->flags)) {
status = -EIO;
- else
- status = sprintf(buf, "%d\n", !!gpio_get_value_cansleep(gpio));
+ } else {
+ int value;
+
+ value = !!gpio_get_value_cansleep(gpio);
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
+
+ status = sprintf(buf, "%d\n", value);
+ }
mutex_unlock(&sysfs_lock);
return status;
status = strict_strtol(buf, 0, &value);
if (status == 0) {
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
gpio_set_value_cansleep(gpio, value != 0);
status = size;
}
return status;
}
-static /*const*/ DEVICE_ATTR(value, 0644,
+static const DEVICE_ATTR(value, 0644,
gpio_value_show, gpio_value_store);
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
irq_flags = IRQF_SHARED;
if (test_bit(FLAG_TRIG_FALL, &gpio_flags))
- irq_flags |= IRQF_TRIGGER_FALLING;
+ irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
if (test_bit(FLAG_TRIG_RISE, &gpio_flags))
- irq_flags |= IRQF_TRIGGER_RISING;
+ irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (!pdesc) {
pdesc = kmalloc(sizeof(*pdesc), GFP_KERNEL);
static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store);
+static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev,
+ int value)
+{
+ int status = 0;
+
+ if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
+ return 0;
+
+ if (value)
+ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ else
+ clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
+
+ /* reconfigure poll(2) support if enabled on one edge only */
+ if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^
+ !!test_bit(FLAG_TRIG_FALL, &desc->flags))) {
+ unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK;
+
+ gpio_setup_irq(desc, dev, 0);
+ status = gpio_setup_irq(desc, dev, trigger_flags);
+ }
+
+ return status;
+}
+
+static ssize_t gpio_active_low_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ status = -EIO;
+ else
+ status = sprintf(buf, "%d\n",
+ !!test_bit(FLAG_ACTIVE_LOW, &desc->flags));
+
+ mutex_unlock(&sysfs_lock);
+
+ return status;
+}
+
+static ssize_t gpio_active_low_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags)) {
+ status = -EIO;
+ } else {
+ long value;
+
+ status = strict_strtol(buf, 0, &value);
+ if (status == 0)
+ status = sysfs_set_active_low(desc, dev, value != 0);
+ }
+
+ mutex_unlock(&sysfs_lock);
+
+ return status ? : size;
+}
+
+static const DEVICE_ATTR(active_low, 0644,
+ gpio_active_low_show, gpio_active_low_store);
+
static const struct attribute *gpio_attrs[] = {
- &dev_attr_direction.attr,
&dev_attr_value.attr,
+ &dev_attr_active_low.attr,
NULL,
};
dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
desc, ioname ? ioname : "gpio%d", gpio);
if (!IS_ERR(dev)) {
- if (direction_may_change)
- status = sysfs_create_group(&dev->kobj,
+ status = sysfs_create_group(&dev->kobj,
&gpio_attr_group);
- else
+
+ if (!status && direction_may_change)
status = device_create_file(dev,
- &dev_attr_value);
+ &dev_attr_direction);
if (!status && gpio_to_irq(gpio) >= 0
&& (direction_may_change
}
EXPORT_SYMBOL_GPL(gpio_export_link);
+
+/**
+ * gpio_sysfs_set_active_low - set the polarity of gpio sysfs value
+ * @gpio: gpio to change
+ * @value: non-zero to use active low, i.e. inverted values
+ *
+ * Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute.
+ * The GPIO does not have to be exported yet. If poll(2) support has
+ * been enabled for either rising or falling edge, it will be
+ * reconfigured to follow the new polarity.
+ *
+ * Returns zero on success, else an error.
+ */
+int gpio_sysfs_set_active_low(unsigned gpio, int value)
+{
+ struct gpio_desc *desc;
+ struct device *dev = NULL;
+ int status = -EINVAL;
+
+ if (!gpio_is_valid(gpio))
+ goto done;
+
+ mutex_lock(&sysfs_lock);
+
+ desc = &gpio_desc[gpio];
+
+ if (test_bit(FLAG_EXPORT, &desc->flags)) {
+ struct device *dev;
+
+ dev = class_find_device(&gpio_class, NULL, desc, match_export);
+ if (dev == NULL) {
+ status = -ENODEV;
+ goto unlock;
+ }
+ }
+
+ status = sysfs_set_active_low(desc, dev, value);
+
+unlock:
+ mutex_unlock(&sysfs_lock);
+
+done:
+ if (status)
+ pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
+
/**
* gpio_unexport - reverse effect of gpio_export()
* @gpio: gpio to make unavailable
}
desc_set_label(desc, NULL);
module_put(desc->chip->owner);
+ clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
clear_bit(FLAG_REQUESTED, &desc->flags);
} else
WARN_ON(extra_checks);
void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]);
void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]);
- if (gpio < 0 || gpio > lnw->chip.ngpio)
+ if (gpio >= lnw->chip.ngpio)
return -EINVAL;
spin_lock_irqsave(&lnw->lock, flags);
if (type & IRQ_TYPE_EDGE_RISING)
--- /dev/null
+/*
+ * timbgpio.c timberdale FPGA GPIO driver
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA GPIO
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/timb_gpio.h>
+#include <linux/interrupt.h>
+
+#define DRIVER_NAME "timb-gpio"
+
+#define TGPIOVAL 0x00
+#define TGPIODIR 0x04
+#define TGPIO_IER 0x08
+#define TGPIO_ISR 0x0c
+#define TGPIO_IPR 0x10
+#define TGPIO_ICR 0x14
+#define TGPIO_FLR 0x18
+#define TGPIO_LVR 0x1c
+
+struct timbgpio {
+ void __iomem *membase;
+ spinlock_t lock; /* mutual exclusion */
+ struct gpio_chip gpio;
+ int irq_base;
+};
+
+static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
+ unsigned offset, bool enabled)
+{
+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
+ u32 reg;
+
+ spin_lock(&tgpio->lock);
+ reg = ioread32(tgpio->membase + offset);
+
+ if (enabled)
+ reg |= (1 << index);
+ else
+ reg &= ~(1 << index);
+
+ iowrite32(reg, tgpio->membase + offset);
+ spin_unlock(&tgpio->lock);
+
+ return 0;
+}
+
+static int timbgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+ return timbgpio_update_bit(gpio, nr, TGPIODIR, true);
+}
+
+static int timbgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
+ u32 value;
+
+ value = ioread32(tgpio->membase + TGPIOVAL);
+ return (value & (1 << nr)) ? 1 : 0;
+}
+
+static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
+ unsigned nr, int val)
+{
+ return timbgpio_update_bit(gpio, nr, TGPIODIR, false);
+}
+
+static void timbgpio_gpio_set(struct gpio_chip *gpio,
+ unsigned nr, int val)
+{
+ timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
+}
+
+static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
+{
+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
+
+ if (tgpio->irq_base <= 0)
+ return -EINVAL;
+
+ return tgpio->irq_base + offset;
+}
+
+/*
+ * GPIO IRQ
+ */
+static void timbgpio_irq_disable(unsigned irq)
+{
+ struct timbgpio *tgpio = get_irq_chip_data(irq);
+ int offset = irq - tgpio->irq_base;
+
+ timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 0);
+}
+
+static void timbgpio_irq_enable(unsigned irq)
+{
+ struct timbgpio *tgpio = get_irq_chip_data(irq);
+ int offset = irq - tgpio->irq_base;
+
+ timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 1);
+}
+
+static int timbgpio_irq_type(unsigned irq, unsigned trigger)
+{
+ struct timbgpio *tgpio = get_irq_chip_data(irq);
+ int offset = irq - tgpio->irq_base;
+ unsigned long flags;
+ u32 lvr, flr;
+
+ if (offset < 0 || offset > tgpio->gpio.ngpio)
+ return -EINVAL;
+
+ spin_lock_irqsave(&tgpio->lock, flags);
+
+ lvr = ioread32(tgpio->membase + TGPIO_LVR);
+ flr = ioread32(tgpio->membase + TGPIO_FLR);
+
+ if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
+ flr &= ~(1 << offset);
+ if (trigger & IRQ_TYPE_LEVEL_HIGH)
+ lvr |= 1 << offset;
+ else
+ lvr &= ~(1 << offset);
+ }
+
+ if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ return -EINVAL;
+ else {
+ flr |= 1 << offset;
+ /* opposite compared to the datasheet, but it mirrors the
+ * reality
+ */
+ if (trigger & IRQ_TYPE_EDGE_FALLING)
+ lvr |= 1 << offset;
+ else
+ lvr &= ~(1 << offset);
+ }
+
+ iowrite32(lvr, tgpio->membase + TGPIO_LVR);
+ iowrite32(flr, tgpio->membase + TGPIO_FLR);
+ iowrite32(1 << offset, tgpio->membase + TGPIO_ICR);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
+
+ return 0;
+}
+
+static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct timbgpio *tgpio = get_irq_data(irq);
+ unsigned long ipr;
+ int offset;
+
+ desc->chip->ack(irq);
+ ipr = ioread32(tgpio->membase + TGPIO_IPR);
+ iowrite32(ipr, tgpio->membase + TGPIO_ICR);
+
+ for_each_bit(offset, &ipr, tgpio->gpio.ngpio)
+ generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
+}
+
+static struct irq_chip timbgpio_irqchip = {
+ .name = "GPIO",
+ .enable = timbgpio_irq_enable,
+ .disable = timbgpio_irq_disable,
+ .set_type = timbgpio_irq_type,
+};
+
+static int __devinit timbgpio_probe(struct platform_device *pdev)
+{
+ int err, i;
+ struct gpio_chip *gc;
+ struct timbgpio *tgpio;
+ struct resource *iomem;
+ struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
+ int irq = platform_get_irq(pdev, 0);
+
+ if (!pdata || pdata->nr_pins > 32) {
+ err = -EINVAL;
+ goto err_mem;
+ }
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ err = -EINVAL;
+ goto err_mem;
+ }
+
+ tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL);
+ if (!tgpio) {
+ err = -EINVAL;
+ goto err_mem;
+ }
+ tgpio->irq_base = pdata->irq_base;
+
+ spin_lock_init(&tgpio->lock);
+
+ if (!request_mem_region(iomem->start, resource_size(iomem),
+ DRIVER_NAME)) {
+ err = -EBUSY;
+ goto err_request;
+ }
+
+ tgpio->membase = ioremap(iomem->start, resource_size(iomem));
+ if (!tgpio->membase) {
+ err = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ gc = &tgpio->gpio;
+
+ gc->label = dev_name(&pdev->dev);
+ gc->owner = THIS_MODULE;
+ gc->dev = &pdev->dev;
+ gc->direction_input = timbgpio_gpio_direction_input;
+ gc->get = timbgpio_gpio_get;
+ gc->direction_output = timbgpio_gpio_direction_output;
+ gc->set = timbgpio_gpio_set;
+ gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL;
+ gc->dbg_show = NULL;
+ gc->base = pdata->gpio_base;
+ gc->ngpio = pdata->nr_pins;
+ gc->can_sleep = 0;
+
+ err = gpiochip_add(gc);
+ if (err)
+ goto err_chipadd;
+
+ platform_set_drvdata(pdev, tgpio);
+
+ /* make sure to disable interrupts */
+ iowrite32(0x0, tgpio->membase + TGPIO_IER);
+
+ if (irq < 0 || tgpio->irq_base <= 0)
+ return 0;
+
+ for (i = 0; i < pdata->nr_pins; i++) {
+ set_irq_chip_and_handler_name(tgpio->irq_base + i,
+ &timbgpio_irqchip, handle_simple_irq, "mux");
+ set_irq_chip_data(tgpio->irq_base + i, tgpio);
+#ifdef CONFIG_ARM
+ set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE);
+#endif
+ }
+
+ set_irq_data(irq, tgpio);
+ set_irq_chained_handler(irq, timbgpio_irq);
+
+ return 0;
+
+err_chipadd:
+ iounmap(tgpio->membase);
+err_ioremap:
+ release_mem_region(iomem->start, resource_size(iomem));
+err_request:
+ kfree(tgpio);
+err_mem:
+ printk(KERN_ERR DRIVER_NAME": Failed to register GPIOs: %d\n", err);
+
+ return err;
+}
+
+static int __devexit timbgpio_remove(struct platform_device *pdev)
+{
+ int err;
+ struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timbgpio *tgpio = platform_get_drvdata(pdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq >= 0 && tgpio->irq_base > 0) {
+ int i;
+ for (i = 0; i < pdata->nr_pins; i++) {
+ set_irq_chip(tgpio->irq_base + i, NULL);
+ set_irq_chip_data(tgpio->irq_base + i, NULL);
+ }
+
+ set_irq_handler(irq, NULL);
+ set_irq_data(irq, NULL);
+ }
+
+ err = gpiochip_remove(&tgpio->gpio);
+ if (err)
+ printk(KERN_ERR DRIVER_NAME": failed to remove gpio_chip\n");
+
+ iounmap(tgpio->membase);
+ release_mem_region(iomem->start, resource_size(iomem));
+ kfree(tgpio);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver timbgpio_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = timbgpio_probe,
+ .remove = timbgpio_remove,
+};
+
+/*--------------------------------------------------------------------------*/
+
+static int __init timbgpio_init(void)
+{
+ return platform_driver_register(&timbgpio_platform_driver);
+}
+
+static void __exit timbgpio_exit(void)
+{
+ platform_driver_unregister(&timbgpio_platform_driver);
+}
+
+module_init(timbgpio_init);
+module_exit(timbgpio_exit);
+
+MODULE_DESCRIPTION("Timberdale GPIO driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mocean Laboratories");
+MODULE_ALIAS("platform:"DRIVER_NAME);
+
int free_user_cbr;
};
+/*
+ * Statictics kept for each context.
+ */
+struct gru_gseg_statistics {
+ unsigned long fmm_tlbmiss;
+ unsigned long upm_tlbmiss;
+ unsigned long tlbdropin;
+ unsigned long context_stolen;
+ unsigned long reserved[10];
+};
+
/* Flags for GRU options on the gru_create_context() call */
/* Select one of the follow 4 options to specify how TLB misses are handled */
#define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */
#include <asm/intrinsics.h>
#define __flush_cache(p) ia64_fc((unsigned long)p)
/* Use volatile on IA64 to ensure ordering via st4.rel */
-#define gru_ordered_store_int(p, v) \
+#define gru_ordered_store_ulong(p, v) \
do { \
barrier(); \
- *((volatile int *)(p)) = v; /* force st.rel */ \
+ *((volatile unsigned long *)(p)) = v; /* force st.rel */ \
} while (0)
#elif defined(CONFIG_X86_64)
#define __flush_cache(p) clflush(p)
-#define gru_ordered_store_int(p, v) \
+#define gru_ordered_store_ulong(p, v) \
do { \
barrier(); \
- *(int *)p = v; \
+ *(unsigned long *)p = v; \
} while (0)
#else
#error "Unsupported architecture"
*/
struct gru_instruction {
/* DW 0 */
- unsigned int op32; /* icmd,xtype,iaa0,ima,opc */
- unsigned int tri0;
+ union {
+ unsigned long op64; /* icmd,xtype,iaa0,ima,opc,tri0 */
+ struct {
+ unsigned int op32;
+ unsigned int tri0;
+ };
+ };
unsigned long tri1_bufsize; /* DW 1 */
unsigned long baddr0; /* DW 2 */
unsigned long nelem; /* DW 3 */
unsigned long avalue; /* DW 7 */
};
-/* Some shifts and masks for the low 32 bits of a GRU command */
+/* Some shifts and masks for the low 64 bits of a GRU command */
#define GRU_CB_ICMD_SHFT 0
#define GRU_CB_ICMD_MASK 0x1
#define GRU_CB_XTYPE_SHFT 8
#define GRU_CB_OPC_MASK 0xff
#define GRU_CB_EXOPC_SHFT 24
#define GRU_CB_EXOPC_MASK 0xff
+#define GRU_IDEF2_SHFT 32
+#define GRU_IDEF2_MASK 0x3ffff
+#define GRU_ISTATUS_SHFT 56
+#define GRU_ISTATUS_MASK 0x3
/* GRU instruction opcodes (opc field) */
#define OP_NOP 0x00
#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16)
#define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17)
#define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18)
+#define CBE_CAUSE_FORCED_ERROR (1 << 19)
/* CBE cbrexecstatus bits */
#define CBR_EXS_ABORT_OCC_BIT 0
#define CBR_EXS_QUEUED_BIT 3
#define CBR_EXS_TLB_INVAL_BIT 4
#define CBR_EXS_EXCEPTION_BIT 5
+#define CBR_EXS_CB_INT_PENDING_BIT 6
#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT)
#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT)
#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT)
#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT)
-#define CBR_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT)
+#define CBR_EXS_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT)
#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT)
+#define CBR_EXS_CB_INT_PENDING (1 << CBR_EXS_CB_INT_PENDING_BIT)
/*
* Exceptions are retried for the following cases. If any OTHER bits are set
/* Generate the low word of a GRU instruction */
-static inline unsigned int
-__opword(unsigned char opcode, unsigned char exopc, unsigned char xtype,
+static inline unsigned long
+__opdword(unsigned char opcode, unsigned char exopc, unsigned char xtype,
unsigned char iaa0, unsigned char iaa1,
- unsigned char ima)
+ unsigned long idef2, unsigned char ima)
{
return (1 << GRU_CB_ICMD_SHFT) |
+ ((unsigned long)CBS_ACTIVE << GRU_ISTATUS_SHFT) |
+ (idef2<< GRU_IDEF2_SHFT) |
(iaa0 << GRU_CB_IAA0_SHFT) |
(iaa1 << GRU_CB_IAA1_SHFT) |
(ima << GRU_CB_IMA_SHFT) |
}
/*
- * Store the lower 32 bits of the command including the "start" bit. Then
+ * Store the lower 64 bits of the command including the "start" bit. Then
* start the instruction executing.
*/
-static inline void gru_start_instruction(struct gru_instruction *ins, int op32)
+static inline void gru_start_instruction(struct gru_instruction *ins, unsigned long op64)
{
- gru_ordered_store_int(ins, op32);
+ gru_ordered_store_ulong(ins, op64);
+ mb();
gru_flush_cache(ins);
}
* - nelem and stride are in elements
* - tri0/tri1 is in bytes for the beginning of the data segment.
*/
+static inline void gru_vload_phys(void *cb, unsigned long gpa,
+ unsigned int tri0, int iaa, unsigned long hints)
+{
+ struct gru_instruction *ins = (struct gru_instruction *)cb;
+
+ ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
+ ins->nelem = 1;
+ ins->op1_stride = 1;
+ gru_start_instruction(ins, __opdword(OP_VLOAD, 0, XTYPE_DW, iaa, 0,
+ (unsigned long)tri0, CB_IMA(hints)));
+}
+
+static inline void gru_vstore_phys(void *cb, unsigned long gpa,
+ unsigned int tri0, int iaa, unsigned long hints)
+{
+ struct gru_instruction *ins = (struct gru_instruction *)cb;
+
+ ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
+ ins->nelem = 1;
+ ins->op1_stride = 1;
+ gru_start_instruction(ins, __opdword(OP_VSTORE, 0, XTYPE_DW, iaa, 0,
+ (unsigned long)tri0, CB_IMA(hints)));
+}
+
static inline void gru_vload(void *cb, unsigned long mem_addr,
unsigned int tri0, unsigned char xtype, unsigned long nelem,
unsigned long stride, unsigned long hints)
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->op1_stride = stride;
- gru_start_instruction(ins, __opword(OP_VLOAD, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_VLOAD, 0, xtype, IAA_RAM, 0,
+ (unsigned long)tri0, CB_IMA(hints)));
}
static inline void gru_vstore(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->op1_stride = stride;
- gru_start_instruction(ins, __opword(OP_VSTORE, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_VSTORE, 0, xtype, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
}
static inline void gru_ivload(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->tri1_bufsize = tri1;
- gru_start_instruction(ins, __opword(OP_IVLOAD, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_IVLOAD, 0, xtype, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
}
static inline void gru_ivstore(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->tri1_bufsize = tri1;
- gru_start_instruction(ins, __opword(OP_IVSTORE, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_IVSTORE, 0, xtype, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
}
static inline void gru_vset(void *cb, unsigned long mem_addr,
ins->op2_value_baddr1 = value;
ins->nelem = nelem;
ins->op1_stride = stride;
- gru_start_instruction(ins, __opword(OP_VSET, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_VSET, 0, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_ivset(void *cb, unsigned long mem_addr,
ins->op2_value_baddr1 = value;
ins->nelem = nelem;
ins->tri1_bufsize = tri1;
- gru_start_instruction(ins, __opword(OP_IVSET, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_IVSET, 0, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_vflush(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->op1_stride = stride;
ins->nelem = nelem;
- gru_start_instruction(ins, __opword(OP_VFLUSH, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_VFLUSH, 0, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_nop(void *cb, int hints)
{
struct gru_instruction *ins = (void *)cb;
- gru_start_instruction(ins, __opword(OP_NOP, 0, 0, 0, 0, CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_NOP, 0, 0, 0, 0, 0, CB_IMA(hints)));
}
ins->baddr0 = (long)src;
ins->op2_value_baddr1 = (long)dest;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->tri1_bufsize = bufsize;
- gru_start_instruction(ins, __opword(OP_BCOPY, 0, xtype, IAA_RAM,
- IAA_RAM, CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_BCOPY, 0, xtype, IAA_RAM,
+ IAA_RAM, tri0, CB_IMA(hints)));
}
static inline void gru_bstore(void *cb, const unsigned long src,
ins->baddr0 = (long)src;
ins->op2_value_baddr1 = (long)dest;
ins->nelem = nelem;
- ins->tri0 = tri0;
- gru_start_instruction(ins, __opword(OP_BSTORE, 0, xtype, 0, IAA_RAM,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_BSTORE, 0, xtype, 0, IAA_RAM,
+ tri0, CB_IMA(hints)));
}
static inline void gru_gamir(void *cb, int exopc, unsigned long src,
struct gru_instruction *ins = (void *)cb;
ins->baddr0 = (long)src;
- gru_start_instruction(ins, __opword(OP_GAMIR, exopc, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMIR, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_gamirr(void *cb, int exopc, unsigned long src,
struct gru_instruction *ins = (void *)cb;
ins->baddr0 = (long)src;
- gru_start_instruction(ins, __opword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_gamer(void *cb, int exopc, unsigned long src,
ins->baddr0 = (long)src;
ins->op1_stride = operand1;
ins->op2_value_baddr1 = operand2;
- gru_start_instruction(ins, __opword(OP_GAMER, exopc, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMER, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_gamerr(void *cb, int exopc, unsigned long src,
ins->baddr0 = (long)src;
ins->op1_stride = operand1;
ins->op2_value_baddr1 = operand2;
- gru_start_instruction(ins, __opword(OP_GAMERR, exopc, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMERR, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_gamxr(void *cb, unsigned long src,
ins->baddr0 = (long)src;
ins->nelem = 4;
- gru_start_instruction(ins, __opword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW,
- IAA_RAM, 0, CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW,
+ IAA_RAM, 0, 0, CB_IMA(hints)));
}
static inline void gru_mesq(void *cb, unsigned long queue,
ins->baddr0 = (long)queue;
ins->nelem = nelem;
- ins->tri0 = tri0;
- gru_start_instruction(ins, __opword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
}
static inline unsigned long gru_get_amo_value(void *cb)
gru_wait_abort_proc(cb);
}
+/*
+ * Get a pointer to the start of a gseg
+ * p - Any valid pointer within the gseg
+ */
+static inline void *gru_get_gseg_pointer (void *p)
+{
+ return (void *)((unsigned long)p & ~(GRU_GSEG_PAGESIZE - 1));
+}
/*
* Get a pointer to a control block
#include "gru_instructions.h"
#include <asm/uv/uv_hub.h>
+/* Return codes for vtop functions */
+#define VTOP_SUCCESS 0
+#define VTOP_INVALID -1
+#define VTOP_RETRY -2
+
+
/*
* Test if a physical address is a valid GRU GSEG address
*/
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- struct gru_thread_state *gts = NULL;
+ struct gru_thread_state *gts = ERR_PTR(-EINVAL);
down_write(&mm->mmap_sem);
vma = gru_find_vma(vaddr);
- if (vma)
- gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
- if (gts) {
- mutex_lock(>s->ts_ctxlock);
- downgrade_write(&mm->mmap_sem);
- } else {
- up_write(&mm->mmap_sem);
- }
+ if (!vma)
+ goto err;
+ gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
+ if (IS_ERR(gts))
+ goto err;
+ mutex_lock(>s->ts_ctxlock);
+ downgrade_write(&mm->mmap_sem);
+ return gts;
+
+err:
+ up_write(&mm->mmap_sem);
return gts;
}
* is necessary to prevent the user from seeing a stale cb.istatus that will
* change as soon as the TFH restart is complete. Races may cause an
* occasional failure to clear the cb.istatus, but that is ok.
- *
- * If the cb address is not valid (should not happen, but...), nothing
- * bad will happen.. The get_user()/put_user() will fail but there
- * are no bad side-effects.
*/
-static void gru_cb_set_istatus_active(unsigned long __user *cb)
+static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
{
- union {
- struct gru_instruction_bits bits;
- unsigned long dw;
- } u;
-
- if (cb) {
- get_user(u.dw, cb);
- u.bits.istatus = CBS_ACTIVE;
- put_user(u.dw, cb);
+ if (cbk) {
+ cbk->istatus = CBS_ACTIVE;
}
}
/*
- * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the
- * interrupt. Interrupts are always sent to a cpu on the blade that contains the
- * GRU (except for headless blades which are not currently supported). A blade
- * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ
- * number uniquely identifies the GRU chiplet on the local blade that caused the
- * interrupt. Always called in interrupt context.
- */
-static inline struct gru_state *irq_to_gru(int irq)
-{
- return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU];
-}
-
-/*
* Read & clear a TFM
*
* The GRU has an array of fault maps. A map is private to a cpu
{
struct page *page;
- /* ZZZ Need to handle HUGE pages */
- if (is_vm_hugetlb_page(vma))
- return -EFAULT;
+#ifdef CONFIG_HUGETLB_PAGE
+ *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
+#else
*pageshift = PAGE_SHIFT;
+#endif
if (get_user_pages
(current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
return -EFAULT;
return 0;
err:
- local_irq_enable();
return 1;
}
paddr = paddr & ~((1UL << ps) - 1);
*gpa = uv_soc_phys_ram_to_gpa(paddr);
*pageshift = ps;
- return 0;
+ return VTOP_SUCCESS;
inval:
- return -1;
+ return VTOP_INVALID;
upm:
- return -2;
+ return VTOP_RETRY;
+}
+
+
+/*
+ * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
+ * CBE cacheline so that the line will be written back to home agent.
+ * Otherwise the line may be silently dropped. This has no impact
+ * except on performance.
+ */
+static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
+{
+ if (unlikely(cbe)) {
+ cbe->cbrexecstatus = 0; /* make CL dirty */
+ gru_flush_cache(cbe);
+ }
}
+/*
+ * Preload the TLB with entries that may be required. Currently, preloading
+ * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to
+ * the end of the bcopy tranfer, whichever is smaller.
+ */
+static void gru_preload_tlb(struct gru_state *gru,
+ struct gru_thread_state *gts, int atomic,
+ unsigned long fault_vaddr, int asid, int write,
+ unsigned char tlb_preload_count,
+ struct gru_tlb_fault_handle *tfh,
+ struct gru_control_block_extended *cbe)
+{
+ unsigned long vaddr = 0, gpa;
+ int ret, pageshift;
+
+ if (cbe->opccpy != OP_BCOPY)
+ return;
+
+ if (fault_vaddr == cbe->cbe_baddr0)
+ vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
+ else if (fault_vaddr == cbe->cbe_baddr1)
+ vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
+
+ fault_vaddr &= PAGE_MASK;
+ vaddr &= PAGE_MASK;
+ vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
+
+ while (vaddr > fault_vaddr) {
+ ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
+ if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
+ GRU_PAGESIZE(pageshift)))
+ return;
+ gru_dbg(grudev,
+ "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
+ atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
+ vaddr, asid, write, pageshift, gpa);
+ vaddr -= PAGE_SIZE;
+ STAT(tlb_preload_page);
+ }
+}
/*
* Drop a TLB entry into the GRU. The fault is described by info in an TFH.
* < 0 = error code
*
*/
-static int gru_try_dropin(struct gru_thread_state *gts,
+static int gru_try_dropin(struct gru_state *gru,
+ struct gru_thread_state *gts,
struct gru_tlb_fault_handle *tfh,
- unsigned long __user *cb)
+ struct gru_instruction_bits *cbk)
{
- int pageshift = 0, asid, write, ret, atomic = !cb;
+ struct gru_control_block_extended *cbe = NULL;
+ unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
+ int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
unsigned long gpa = 0, vaddr = 0;
/*
*/
/*
+ * Prefetch the CBE if doing TLB preloading
+ */
+ if (unlikely(tlb_preload_count)) {
+ cbe = gru_tfh_to_cbe(tfh);
+ prefetchw(cbe);
+ }
+
+ /*
* Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
* Might be a hardware race OR a stupid user. Ignore FMM because FMM
* is a transient state.
*/
if (tfh->status != TFHSTATUS_EXCEPTION) {
gru_flush_cache(tfh);
+ sync_core();
if (tfh->status != TFHSTATUS_EXCEPTION)
goto failnoexception;
STAT(tfh_stale_on_fault);
}
if (tfh->state == TFHSTATE_IDLE)
goto failidle;
- if (tfh->state == TFHSTATE_MISS_FMM && cb)
+ if (tfh->state == TFHSTATE_MISS_FMM && cbk)
goto failfmm;
write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
vaddr = tfh->missvaddr;
asid = tfh->missasid;
+ indexway = tfh->indexway;
if (asid == 0)
goto failnoasid;
goto failactive;
ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
- if (ret == -1)
+ if (ret == VTOP_INVALID)
goto failinval;
- if (ret == -2)
+ if (ret == VTOP_RETRY)
goto failupm;
if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
- if (atomic || !gru_update_cch(gts, 0)) {
+ if (atomic || !gru_update_cch(gts)) {
gts->ts_force_cch_reload = 1;
goto failupm;
}
}
- gru_cb_set_istatus_active(cb);
+
+ if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
+ gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
+ gru_flush_cache_cbe(cbe);
+ }
+
+ gru_cb_set_istatus_active(cbk);
+ gts->ustats.tlbdropin++;
tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
GRU_PAGESIZE(pageshift));
- STAT(tlb_dropin);
gru_dbg(grudev,
- "%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n",
- ret ? "non-atomic" : "atomic", tfh, vaddr, asid,
- pageshift, gpa);
+ "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
+ " rw %d, ps %d, gpa 0x%lx\n",
+ atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
+ indexway, write, pageshift, gpa);
+ STAT(tlb_dropin);
return 0;
failnoasid:
/* No asid (delayed unload). */
STAT(tlb_dropin_fail_no_asid);
gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
- if (!cb)
+ if (!cbk)
tfh_user_polling_mode(tfh);
else
gru_flush_cache(tfh);
+ gru_flush_cache_cbe(cbe);
return -EAGAIN;
failupm:
/* Atomic failure switch CBR to UPM */
tfh_user_polling_mode(tfh);
+ gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_upm);
gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
return 1;
failfmm:
/* FMM state on UPM call */
gru_flush_cache(tfh);
+ gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_fmm);
gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
return 0;
failnoexception:
/* TFH status did not show exception pending */
gru_flush_cache(tfh);
- if (cb)
- gru_flush_cache(cb);
+ gru_flush_cache_cbe(cbe);
+ if (cbk)
+ gru_flush_cache(cbk);
STAT(tlb_dropin_fail_no_exception);
- gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", tfh, tfh->status, tfh->state);
+ gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
+ tfh, tfh->status, tfh->state);
return 0;
failidle:
/* TFH state was idle - no miss pending */
gru_flush_cache(tfh);
- if (cb)
- gru_flush_cache(cb);
+ gru_flush_cache_cbe(cbe);
+ if (cbk)
+ gru_flush_cache(cbk);
STAT(tlb_dropin_fail_idle);
gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
return 0;
failinval:
/* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
tfh_exception(tfh);
+ gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_invalid);
gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
return -EFAULT;
failactive:
/* Range invalidate active. Switch to UPM iff atomic */
- if (!cb)
+ if (!cbk)
tfh_user_polling_mode(tfh);
else
gru_flush_cache(tfh);
+ gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_range_active);
gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
tfh, vaddr);
* Note that this is the interrupt handler that is registered with linux
* interrupt handlers.
*/
-irqreturn_t gru_intr(int irq, void *dev_id)
+static irqreturn_t gru_intr(int chiplet, int blade)
{
struct gru_state *gru;
struct gru_tlb_fault_map imap, dmap;
struct gru_thread_state *gts;
struct gru_tlb_fault_handle *tfh = NULL;
+ struct completion *cmp;
int cbrnum, ctxnum;
STAT(intr);
- gru = irq_to_gru(irq);
+ gru = &gru_base[blade]->bs_grus[chiplet];
if (!gru) {
- dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n",
- raw_smp_processor_id(), irq);
+ dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
+ raw_smp_processor_id(), chiplet);
return IRQ_NONE;
}
get_clear_fault_map(gru, &imap, &dmap);
+ gru_dbg(grudev,
+ "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
+ smp_processor_id(), chiplet, gru->gs_gid,
+ imap.fault_bits[0], imap.fault_bits[1],
+ dmap.fault_bits[0], dmap.fault_bits[1]);
for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
- complete(gru->gs_blade->bs_async_wq);
+ STAT(intr_cbr);
+ cmp = gru->gs_blade->bs_async_wq;
+ if (cmp)
+ complete(cmp);
gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
- gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done);
+ gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
}
for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
+ STAT(intr_tfh);
tfh = get_tfh_by_index(gru, cbrnum);
prefetchw(tfh); /* Helps on hdw, required for emulator */
ctxnum = tfh->ctxnum;
gts = gru->gs_gts[ctxnum];
+ /* Spurious interrupts can cause this. Ignore. */
+ if (!gts) {
+ STAT(intr_spurious);
+ continue;
+ }
+
/*
* This is running in interrupt context. Trylock the mmap_sem.
* If it fails, retry the fault in user context.
*/
+ gts->ustats.fmm_tlbmiss++;
if (!gts->ts_force_cch_reload &&
down_read_trylock(>s->ts_mm->mmap_sem)) {
- gts->ustats.fmm_tlbdropin++;
- gru_try_dropin(gts, tfh, NULL);
+ gru_try_dropin(gru, gts, tfh, NULL);
up_read(>s->ts_mm->mmap_sem);
} else {
tfh_user_polling_mode(tfh);
return IRQ_HANDLED;
}
+irqreturn_t gru0_intr(int irq, void *dev_id)
+{
+ return gru_intr(0, uv_numa_blade_id());
+}
+
+irqreturn_t gru1_intr(int irq, void *dev_id)
+{
+ return gru_intr(1, uv_numa_blade_id());
+}
+
+irqreturn_t gru_intr_mblade(int irq, void *dev_id)
+{
+ int blade;
+
+ for_each_possible_blade(blade) {
+ if (uv_blade_nr_possible_cpus(blade))
+ continue;
+ gru_intr(0, blade);
+ gru_intr(1, blade);
+ }
+ return IRQ_HANDLED;
+}
+
static int gru_user_dropin(struct gru_thread_state *gts,
struct gru_tlb_fault_handle *tfh,
- unsigned long __user *cb)
+ void *cb)
{
struct gru_mm_struct *gms = gts->ts_gms;
int ret;
- gts->ustats.upm_tlbdropin++;
+ gts->ustats.upm_tlbmiss++;
while (1) {
wait_event(gms->ms_wait_queue,
atomic_read(&gms->ms_range_active) == 0);
prefetchw(tfh); /* Helps on hdw, required for emulator */
- ret = gru_try_dropin(gts, tfh, cb);
+ ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
if (ret <= 0)
return ret;
STAT(call_os_wait_queue);
{
struct gru_tlb_fault_handle *tfh;
struct gru_thread_state *gts;
- unsigned long __user *cbp;
+ void *cbk;
int ucbnum, cbrnum, ret = -EINVAL;
STAT(call_os);
- gru_dbg(grudev, "address 0x%lx\n", cb);
/* sanity check the cb pointer */
ucbnum = get_cb_number((void *)cb);
if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
return -EINVAL;
- cbp = (unsigned long *)cb;
gts = gru_find_lock_gts(cb);
if (!gts)
return -EINVAL;
+ gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
goto exit;
- /*
- * If force_unload is set, the UPM TLB fault is phony. The task
- * has migrated to another node and the GSEG must be moved. Just
- * unload the context. The task will page fault and assign a new
- * context.
- */
- if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
- gts->ts_blade != uv_numa_blade_id()) {
- STAT(call_os_offnode_reference);
- gts->ts_force_unload = 1;
- }
+ gru_check_context_placement(gts);
/*
* CCH may contain stale data if ts_force_cch_reload is set.
*/
if (gts->ts_gru && gts->ts_force_cch_reload) {
gts->ts_force_cch_reload = 0;
- gru_update_cch(gts, 0);
+ gru_update_cch(gts);
}
ret = -EAGAIN;
cbrnum = thread_cbr_number(gts, ucbnum);
- if (gts->ts_force_unload) {
- gru_unload_context(gts, 1);
- } else if (gts->ts_gru) {
+ if (gts->ts_gru) {
tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
- ret = gru_user_dropin(gts, tfh, cbp);
+ cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
+ gts->ts_ctxnum, ucbnum);
+ ret = gru_user_dropin(gts, tfh, cbk);
}
exit:
gru_unlock_gts(gts);
if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
return -EFAULT;
- gru_dbg(grudev, "address 0x%lx\n", excdet.cb);
gts = gru_find_lock_gts(excdet.cb);
if (!gts)
return -EINVAL;
+ gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
ucbnum = get_cb_number((void *)excdet.cb);
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
ret = -EINVAL;
cbrnum = thread_cbr_number(gts, ucbnum);
cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
gru_flush_cache(cbe); /* CBE not coherent */
+ sync_core(); /* make sure we are have current data */
excdet.opc = cbe->opccpy;
excdet.exopc = cbe->exopccpy;
excdet.ecause = cbe->ecause;
excdet.exceptdet1 = cbe->idef3upd;
excdet.cbrstate = cbe->cbrstate;
excdet.cbrexecstatus = cbe->cbrexecstatus;
- gru_flush_cache(cbe);
+ gru_flush_cache_cbe(cbe);
ret = 0;
} else {
ret = -EAGAIN;
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
+ /*
+ * The library creates arrays of contexts for threaded programs.
+ * If no gts exists in the array, the context has never been used & all
+ * statistics are implicitly 0.
+ */
gts = gru_find_lock_gts(req.gseg);
if (gts) {
memcpy(&req.stats, >s->ustats, sizeof(gts->ustats));
return -EFAULT;
gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
- gts = gru_alloc_locked_gts(req.gseg);
- if (!gts)
- return -EINVAL;
+ gts = gru_find_lock_gts(req.gseg);
+ if (!gts) {
+ gts = gru_alloc_locked_gts(req.gseg);
+ if (IS_ERR(gts))
+ return PTR_ERR(gts);
+ }
switch (req.op) {
+ case sco_blade_chiplet:
+ /* Select blade/chiplet for GRU context */
+ if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] ||
+ req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) {
+ ret = -EINVAL;
+ } else {
+ gts->ts_user_blade_id = req.val1;
+ gts->ts_user_chiplet_id = req.val0;
+ gru_check_context_placement(gts);
+ }
+ break;
case sco_gseg_owner:
/* Register the current task as the GSEG owner */
gts->ts_tgid_owner = current->tgid;
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/uaccess.h>
+#ifdef CONFIG_X86_64
+#include <asm/uv/uv_irq.h>
+#endif
#include <asm/uv/uv.h>
#include "gru.h"
#include "grulib.h"
struct gru_vma_data *vdata;
int ret = -EINVAL;
-
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
vdata->vd_dsr_au_count =
GRU_DS_BYTES_TO_AU(req.data_segment_bytes);
vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks);
+ vdata->vd_tlb_preload_count = req.tlb_preload_count;
ret = 0;
}
up_write(¤t->mm->mmap_sem);
{
int err = -EBADRQC;
- gru_dbg(grudev, "file %p\n", file);
+ gru_dbg(grudev, "file %p, req 0x%x, 0x%lx\n", file, req, arg);
switch (req) {
case GRU_CREATE_CONTEXT:
* system.
*/
static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
- void *vaddr, int nid, int bid, int grunum)
+ void *vaddr, int blade_id, int chiplet_id)
{
spin_lock_init(&gru->gs_lock);
spin_lock_init(&gru->gs_asid_lock);
gru->gs_gru_base_paddr = paddr;
gru->gs_gru_base_vaddr = vaddr;
- gru->gs_gid = bid * GRU_CHIPLETS_PER_BLADE + grunum;
- gru->gs_blade = gru_base[bid];
- gru->gs_blade_id = bid;
+ gru->gs_gid = blade_id * GRU_CHIPLETS_PER_BLADE + chiplet_id;
+ gru->gs_blade = gru_base[blade_id];
+ gru->gs_blade_id = blade_id;
+ gru->gs_chiplet_id = chiplet_id;
gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1;
gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1;
gru->gs_asid_limit = MAX_ASID;
gru_tgh_flush_init(gru);
if (gru->gs_gid >= gru_max_gids)
gru_max_gids = gru->gs_gid + 1;
- gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n",
- bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr,
+ gru_dbg(grudev, "bid %d, gid %d, vaddr %p (0x%lx)\n",
+ blade_id, gru->gs_gid, gru->gs_gru_base_vaddr,
gru->gs_gru_base_paddr);
}
max_user_cbrs = GRU_NUM_CB;
max_user_dsr_bytes = GRU_NUM_DSR_BYTES;
- for_each_online_node(nid) {
- bid = uv_node_to_blade_id(nid);
- pnode = uv_node_to_pnode(nid);
- if (bid < 0 || gru_base[bid])
- continue;
- page = alloc_pages_exact_node(nid, GFP_KERNEL, order);
+ for_each_possible_blade(bid) {
+ pnode = uv_blade_to_pnode(bid);
+ nid = uv_blade_to_memory_nid(bid);/* -1 if no memory on blade */
+ page = alloc_pages_node(nid, GFP_KERNEL, order);
if (!page)
goto fail;
gru_base[bid] = page_address(page);
chip++, gru++) {
paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
- gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip);
+ gru_init_chiplet(gru, paddr, vaddr, bid, chip);
n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
cbrs = max(cbrs, n);
n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
return 0;
fail:
- for (nid--; nid >= 0; nid--)
- free_pages((unsigned long)gru_base[nid], order);
+ for (bid--; bid >= 0; bid--)
+ free_pages((unsigned long)gru_base[bid], order);
return -ENOMEM;
}
-#ifdef CONFIG_IA64
+static void gru_free_tables(void)
+{
+ int bid;
+ int order = get_order(sizeof(struct gru_state) *
+ GRU_CHIPLETS_PER_BLADE);
-static int get_base_irq(void)
+ for (bid = 0; bid < GRU_MAX_BLADES; bid++)
+ free_pages((unsigned long)gru_base[bid], order);
+}
+
+static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
{
- return IRQ_GRU;
+ unsigned long mmr = 0;
+ int core;
+
+ /*
+ * We target the cores of a blade and not the hyperthreads themselves.
+ * There is a max of 8 cores per socket and 2 sockets per blade,
+ * making for a max total of 16 cores (i.e., 16 CPUs without
+ * hyperthreading and 32 CPUs with hyperthreading).
+ */
+ core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
+ if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu))
+ return 0;
+
+ if (chiplet == 0) {
+ mmr = UVH_GR0_TLB_INT0_CONFIG +
+ core * (UVH_GR0_TLB_INT1_CONFIG - UVH_GR0_TLB_INT0_CONFIG);
+ } else if (chiplet == 1) {
+ mmr = UVH_GR1_TLB_INT0_CONFIG +
+ core * (UVH_GR1_TLB_INT1_CONFIG - UVH_GR1_TLB_INT0_CONFIG);
+ } else {
+ BUG();
+ }
+
+ *corep = core;
+ return mmr;
}
-#elif defined CONFIG_X86_64
+#ifdef CONFIG_IA64
-static void noop(unsigned int irq)
+static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
+
+static void gru_noop(unsigned int irq)
{
}
-static struct irq_chip gru_chip = {
- .name = "gru",
- .mask = noop,
- .unmask = noop,
- .ack = noop,
+static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
+ [0 ... GRU_CHIPLETS_PER_BLADE - 1] {
+ .mask = gru_noop,
+ .unmask = gru_noop,
+ .ack = gru_noop
+ }
};
-static int get_base_irq(void)
+static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
+ irq_handler_t irq_handler, int cpu, int blade)
+{
+ unsigned long mmr;
+ int irq = IRQ_GRU + chiplet;
+ int ret, core;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr == 0)
+ return 0;
+
+ if (gru_irq_count[chiplet] == 0) {
+ gru_chip[chiplet].name = irq_name;
+ ret = set_irq_chip(irq, &gru_chip[chiplet]);
+ if (ret) {
+ printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -ret);
+ return ret;
+ }
+
+ ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
+ if (ret) {
+ printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -ret);
+ return ret;
+ }
+ }
+ gru_irq_count[chiplet]++;
+
+ return 0;
+}
+
+static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
+{
+ unsigned long mmr;
+ int core, irq = IRQ_GRU + chiplet;
+
+ if (gru_irq_count[chiplet] == 0)
+ return;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr == 0)
+ return;
+
+ if (--gru_irq_count[chiplet] == 0)
+ free_irq(irq, NULL);
+}
+
+#elif defined CONFIG_X86_64
+
+static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
+ irq_handler_t irq_handler, int cpu, int blade)
+{
+ unsigned long mmr;
+ int irq, core;
+ int ret;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr == 0)
+ return 0;
+
+ irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU);
+ if (irq < 0) {
+ printk(KERN_ERR "%s: uv_setup_irq failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -irq);
+ return irq;
+ }
+
+ ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
+ if (ret) {
+ uv_teardown_irq(irq);
+ printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -ret);
+ return ret;
+ }
+ gru_base[blade]->bs_grus[chiplet].gs_irq[core] = irq;
+ return 0;
+}
+
+static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
{
- set_irq_chip(IRQ_GRU, &gru_chip);
- set_irq_chip(IRQ_GRU + 1, &gru_chip);
- return IRQ_GRU;
+ int irq, core;
+ unsigned long mmr;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr) {
+ irq = gru_base[blade]->bs_grus[chiplet].gs_irq[core];
+ if (irq) {
+ free_irq(irq, NULL);
+ uv_teardown_irq(irq);
+ }
+ }
}
+
#endif
+static void gru_teardown_tlb_irqs(void)
+{
+ int blade;
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ blade = uv_cpu_to_blade_id(cpu);
+ gru_chiplet_teardown_tlb_irq(0, cpu, blade);
+ gru_chiplet_teardown_tlb_irq(1, cpu, blade);
+ }
+ for_each_possible_blade(blade) {
+ if (uv_blade_nr_possible_cpus(blade))
+ continue;
+ gru_chiplet_teardown_tlb_irq(0, 0, blade);
+ gru_chiplet_teardown_tlb_irq(1, 0, blade);
+ }
+}
+
+static int gru_setup_tlb_irqs(void)
+{
+ int blade;
+ int cpu;
+ int ret;
+
+ for_each_online_cpu(cpu) {
+ blade = uv_cpu_to_blade_id(cpu);
+ ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade);
+ if (ret != 0)
+ goto exit1;
+
+ ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade);
+ if (ret != 0)
+ goto exit1;
+ }
+ for_each_possible_blade(blade) {
+ if (uv_blade_nr_possible_cpus(blade))
+ continue;
+ ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade, 0, blade);
+ if (ret != 0)
+ goto exit1;
+
+ ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade, 0, blade);
+ if (ret != 0)
+ goto exit1;
+ }
+
+ return 0;
+
+exit1:
+ gru_teardown_tlb_irqs();
+ return ret;
+}
+
/*
* gru_init
*
*/
static int __init gru_init(void)
{
- int ret, irq, chip;
- char id[10];
+ int ret;
if (!is_uv_system())
return 0;
gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
gru_start_paddr, gru_end_paddr);
- irq = get_base_irq();
- for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) {
- ret = request_irq(irq + chip, gru_intr, 0, id, NULL);
- /* TODO: fix irq handling on x86. For now ignore failure because
- * interrupts are not required & not yet fully supported */
- if (ret) {
- printk(KERN_WARNING
- "!!!WARNING: GRU ignoring request failure!!!\n");
- ret = 0;
- }
- if (ret) {
- printk(KERN_ERR "%s: request_irq failed\n",
- GRU_DRIVER_ID_STR);
- goto exit1;
- }
- }
-
ret = misc_register(&gru_miscdev);
if (ret) {
printk(KERN_ERR "%s: misc_register failed\n",
GRU_DRIVER_ID_STR);
- goto exit1;
+ goto exit0;
}
ret = gru_proc_init();
if (ret) {
printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR);
- goto exit2;
+ goto exit1;
}
ret = gru_init_tables(gru_start_paddr, gru_start_vaddr);
if (ret) {
printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
- goto exit3;
+ goto exit2;
}
+
+ ret = gru_setup_tlb_irqs();
+ if (ret != 0)
+ goto exit3;
+
gru_kservices_init();
printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
return 0;
exit3:
- gru_proc_exit();
+ gru_free_tables();
exit2:
- misc_deregister(&gru_miscdev);
+ gru_proc_exit();
exit1:
- for (--chip; chip >= 0; chip--)
- free_irq(irq + chip, NULL);
+ misc_deregister(&gru_miscdev);
+exit0:
return ret;
}
static void __exit gru_exit(void)
{
- int i, bid;
- int order = get_order(sizeof(struct gru_state) *
- GRU_CHIPLETS_PER_BLADE);
-
if (!is_uv_system())
return;
- for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++)
- free_irq(IRQ_GRU + i, NULL);
+ gru_teardown_tlb_irqs();
gru_kservices_exit();
- for (bid = 0; bid < GRU_MAX_BLADES; bid++)
- free_pages((unsigned long)gru_base[bid], order);
-
+ gru_free_tables();
misc_deregister(&gru_miscdev);
gru_proc_exit();
}
#ifdef CONFIG_IA64
#include <asm/processor.h>
#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
+#define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq)
#else
#include <asm/tsc.h>
#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
+#define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz)
#endif
/* Extract the status field from a kernel handle */
static void update_mcs_stats(enum mcs_op op, unsigned long clks)
{
+ unsigned long nsec;
+
+ nsec = CLKS2NSEC(clks);
atomic_long_inc(&mcs_op_statistics[op].count);
- atomic_long_add(clks, &mcs_op_statistics[op].total);
- if (mcs_op_statistics[op].max < clks)
- mcs_op_statistics[op].max = clks;
+ atomic_long_add(nsec, &mcs_op_statistics[op].total);
+ if (mcs_op_statistics[op].max < nsec)
+ mcs_op_statistics[op].max = nsec;
}
static void start_instruction(void *h)
{
unsigned long *w0 = h;
- wmb(); /* setting CMD bit must be last */
- *w0 = *w0 | 1;
+ wmb(); /* setting CMD/STATUS bits must be last */
+ *w0 = *w0 | 0x20001;
gru_flush_cache(h);
}
+static void report_instruction_timeout(void *h)
+{
+ unsigned long goff = GSEGPOFF((unsigned long)h);
+ char *id = "???";
+
+ if (TYPE_IS(CCH, goff))
+ id = "CCH";
+ else if (TYPE_IS(TGH, goff))
+ id = "TGH";
+ else if (TYPE_IS(TFH, goff))
+ id = "TFH";
+
+ panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id);
+}
+
static int wait_instruction_complete(void *h, enum mcs_op opc)
{
int status;
status = GET_MSEG_HANDLE_STATUS(h);
if (status != CCHSTATUS_ACTIVE)
break;
- if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time))
- panic("GRU %p is malfunctioning: start %ld, end %ld\n",
- h, start_time, (unsigned long)get_cycles());
+ if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) {
+ report_instruction_timeout(h);
+ start_time = get_cycles();
+ }
}
if (gru_options & OPT_STATS)
update_mcs_stats(opc, get_cycles() - start_time);
int cch_allocate(struct gru_context_configuration_handle *cch)
{
+ int ret;
+
cch->opc = CCHOP_ALLOCATE;
start_instruction(cch);
- return wait_instruction_complete(cch, cchop_allocate);
+ ret = wait_instruction_complete(cch, cchop_allocate);
+
+ /*
+ * Stop speculation into the GSEG being mapped by the previous ALLOCATE.
+ * The GSEG memory does not exist until the ALLOCATE completes.
+ */
+ sync_core();
+ return ret;
}
int cch_start(struct gru_context_configuration_handle *cch)
int cch_deallocate(struct gru_context_configuration_handle *cch)
{
+ int ret;
+
cch->opc = CCHOP_DEALLOCATE;
start_instruction(cch);
- return wait_instruction_complete(cch, cchop_deallocate);
+ ret = wait_instruction_complete(cch, cchop_deallocate);
+
+ /*
+ * Stop speculation into the GSEG being unmapped by the previous
+ * DEALLOCATE.
+ */
+ sync_core();
+ return ret;
}
int cch_interrupt_sync(struct gru_context_configuration_handle
return wait_instruction_complete(tgh, tghop_invalidate);
}
-void tfh_write_only(struct gru_tlb_fault_handle *tfh,
- unsigned long pfn, unsigned long vaddr,
- int asid, int dirty, int pagesize)
+int tfh_write_only(struct gru_tlb_fault_handle *tfh,
+ unsigned long paddr, int gaa,
+ unsigned long vaddr, int asid, int dirty,
+ int pagesize)
{
tfh->fillasid = asid;
tfh->fillvaddr = vaddr;
- tfh->pfn = pfn;
+ tfh->pfn = paddr >> GRU_PADDR_SHIFT;
+ tfh->gaa = gaa;
tfh->dirty = dirty;
tfh->pagesize = pagesize;
tfh->opc = TFHOP_WRITE_ONLY;
start_instruction(tfh);
+ return wait_instruction_complete(tfh, tfhop_write_only);
}
void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
/* Convert an arbitrary handle address to the beginning of the GRU segment */
#define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1)))
+/* Test a valid handle address to determine the type */
+#define TYPE_IS(hn, h) ((h) >= GRU_##hn##_BASE && (h) < \
+ GRU_##hn##_BASE + GRU_NUM_##hn * GRU_HANDLE_STRIDE && \
+ (((h) & (GRU_HANDLE_STRIDE - 1)) == 0))
+
+
/* General addressing macros. */
static inline void *get_gseg_base_address(void *base, int ctxnum)
{
return vaddr + GRU_SIZE * (2 * pnode + chiplet);
}
+static inline struct gru_control_block_extended *gru_tfh_to_cbe(
+ struct gru_tlb_fault_handle *tfh)
+{
+ unsigned long cbe;
+
+ cbe = (unsigned long)tfh - GRU_TFH_BASE + GRU_CBE_BASE;
+ return (struct gru_control_block_extended*)cbe;
+}
+
+
/*
TGHSTATE_RESTART_CTX,
};
+enum gru_tgh_cause {
+ TGHCAUSE_RR_ECC,
+ TGHCAUSE_TLB_ECC,
+ TGHCAUSE_LRU_ECC,
+ TGHCAUSE_PS_ECC,
+ TGHCAUSE_MUL_ERR,
+ TGHCAUSE_DATA_ERR,
+ TGHCAUSE_SW_FORCE
+};
+
+
/*
* TFH - TLB Global Handle
* Used for TLB dropins into the GRU TLB.
unsigned int cbrexecstatus:8;
};
+/* CBE fields for active BCOPY instructions */
+#define cbe_baddr0 idef1upd
+#define cbe_baddr1 idef3upd
+#define cbe_src_cl idef6cpy
+#define cbe_nelemcur idef5upd
+
enum gru_cbr_state {
CBRSTATE_INACTIVE,
CBRSTATE_IDLE,
int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr,
unsigned long vaddrmask, int asid, int pagesize, int global, int n,
unsigned short ctxbitmap);
-void tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long pfn,
- unsigned long vaddr, int asid, int dirty, int pagesize);
+int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
+ int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
void tfh_restart(struct gru_tlb_fault_handle *tfh);
static int gru_dump_context_data(void *grubase,
struct gru_context_configuration_handle *cch,
- void __user *ubuf, int ctxnum, int dsrcnt)
+ void __user *ubuf, int ctxnum, int dsrcnt,
+ int flush_cbrs)
{
void *cb, *cbe, *tfh, *gseg;
int i, scr;
tfh = grubase + GRU_TFH_BASE;
for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
+ if (flush_cbrs)
+ gru_flush_cache(cb);
if (gru_user_copy_handle(&ubuf, cb))
goto fail;
if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
static int gru_dump_context(struct gru_state *gru, int ctxnum,
void __user *ubuf, void __user *ubufend, char data_opt,
- char lock_cch)
+ char lock_cch, char flush_cbrs)
{
struct gru_dump_context_header hdr;
struct gru_dump_context_header __user *uhdr = ubuf;
ret = -EFBIG;
else
ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
- dsrcnt);
-
+ dsrcnt, flush_cbrs);
}
if (cch_locked)
unlock_cch_handle(cch);
for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
if (req.ctxnum == ctxnum || req.ctxnum < 0) {
ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
- req.data_opt, req.lock_cch);
+ req.data_opt, req.lock_cch,
+ req.flush_cbrs);
if (ret < 0)
goto fail;
ubuf += ret;
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
+#include <asm/io_apic.h>
#include "gru.h"
#include "grulib.h"
#include "grutables.h"
#define ASYNC_HAN_TO_BID(h) ((h) - 1)
#define ASYNC_BID_TO_HAN(b) ((b) + 1)
#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
-#define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \
- (GRU_SIZE * GRU_CHIPLETS_PER_BLADE))
-#define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)]
#define GRU_NUM_KERNEL_CBR 1
#define GRU_NUM_KERNEL_DSR_BYTES 256
up_read(&bs->bs_kgts_sema);
down_write(&bs->bs_kgts_sema);
- if (!bs->bs_kgts)
- bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0);
+ if (!bs->bs_kgts) {
+ bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0);
+ bs->bs_kgts->ts_user_blade_id = blade_id;
+ }
kgts = bs->bs_kgts;
if (!kgts->ts_gru) {
kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
GRU_NUM_KERNEL_DSR_BYTES * ncpus +
bs->bs_async_dsr_bytes);
- while (!gru_assign_gru_context(kgts, blade_id)) {
+ while (!gru_assign_gru_context(kgts)) {
msleep(1);
- gru_steal_context(kgts, blade_id);
+ gru_steal_context(kgts);
}
gru_load_context(kgts);
gru = bs->bs_kgts->ts_gru;
bs = gru_base[bid];
if (!bs)
continue;
+
+ /* Ignore busy contexts. Don't want to block here. */
if (down_write_trylock(&bs->bs_kgts_sema)) {
kgts = bs->bs_kgts;
if (kgts && kgts->ts_gru)
gru_unload_context(kgts, 0);
- kfree(kgts);
bs->bs_kgts = NULL;
up_write(&bs->bs_kgts_sema);
+ kfree(kgts);
} else {
ret++;
}
static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
{
struct gru_blade_state *bs;
+ int bid;
STAT(lock_kernel_context);
- bs = gru_base[blade_id];
+again:
+ bid = blade_id < 0 ? uv_numa_blade_id() : blade_id;
+ bs = gru_base[bid];
+ /* Handle the case where migration occured while waiting for the sema */
down_read(&bs->bs_kgts_sema);
+ if (blade_id < 0 && bid != uv_numa_blade_id()) {
+ up_read(&bs->bs_kgts_sema);
+ goto again;
+ }
if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
- gru_load_kernel_context(bs, blade_id);
+ gru_load_kernel_context(bs, bid);
return bs;
}
BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
preempt_disable();
- bs = gru_lock_kernel_context(uv_numa_blade_id());
+ bs = gru_lock_kernel_context(-1);
lcpu = uv_blade_processor_id();
*cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
*dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
struct control_block_extended_exc_detail *excdet)
{
struct gru_control_block_extended *cbe;
- struct gru_blade_state *bs;
- int cbrnum;
-
- bs = KCB_TO_BS(cb);
- cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb));
+ struct gru_thread_state *kgts = NULL;
+ unsigned long off;
+ int cbrnum, bid;
+
+ /*
+ * Locate kgts for cb. This algorithm is SLOW but
+ * this function is rarely called (ie., almost never).
+ * Performance does not matter.
+ */
+ for_each_possible_blade(bid) {
+ if (!gru_base[bid])
+ break;
+ kgts = gru_base[bid]->bs_kgts;
+ if (!kgts || !kgts->ts_gru)
+ continue;
+ off = cb - kgts->ts_gru->gs_gru_base_vaddr;
+ if (off < GRU_SIZE)
+ break;
+ kgts = NULL;
+ }
+ BUG_ON(!kgts);
+ cbrnum = thread_cbr_number(kgts, get_cb_number(cb));
cbe = get_cbe(GRUBASE(cb), cbrnum);
gru_flush_cache(cbe); /* CBE not coherent */
+ sync_core();
excdet->opc = cbe->opccpy;
excdet->exopc = cbe->exopccpy;
excdet->ecause = cbe->ecause;
if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
gru_get_cb_exception_detail(cb, &excdet);
snprintf(buf, size,
- "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
- "excdet0 0x%lx, excdet1 0x%x",
+ "GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
+ "excdet0 0x%lx, excdet1 0x%x", smp_processor_id(),
gen, excdet.opc, excdet.exopc, excdet.ecause,
excdet.exceptdet0, excdet.exceptdet1);
} else {
int ret;
ret = gen->istatus;
- if (ret != CBS_EXCEPTION)
- return ret;
- return gru_retry_exception(cb);
+ if (ret == CBS_EXCEPTION)
+ ret = gru_retry_exception(cb);
+ rmb();
+ return ret;
}
ret = gru_wait_idle_or_exception(gen);
if (ret == CBS_EXCEPTION)
ret = gru_retry_exception(cb);
-
+ rmb();
return ret;
}
mqd->mq = mq;
mqd->mq_gpa = uv_gpa(mq);
mqd->qlines = qlines;
- mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid);
+ mqd->interrupt_pnode = nasid >> 1;
mqd->interrupt_vector = vector;
mqd->interrupt_apicid = apicid;
return 0;
ret = MQE_UNEXPECTED_CB_ERR;
break;
case CBSS_PAGE_OVERFLOW:
+ STAT(mesq_noop_page_overflow);
+ /* fallthru */
default:
BUG();
}
}
/*
- * Send a cross-partition interrupt to the SSI that contains the target
- * message queue. Normally, the interrupt is automatically delivered by hardware
- * but some error conditions require explicit delivery.
- */
-static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
-{
- if (mqd->interrupt_vector)
- uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
- mqd->interrupt_vector);
-}
-
-/*
* Handle a PUT failure. Note: if message was a 2-line message, one of the
* lines might have successfully have been written. Before sending the
* message, "present" must be cleared in BOTH lines to prevent the receiver
static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
void *mesg, int lines)
{
- unsigned long m;
+ unsigned long m, *val = mesg, gpa, save;
+ int ret;
m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
if (lines == 2) {
gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
if (gru_wait(cb) != CBS_IDLE)
return MQE_UNEXPECTED_CB_ERR;
- send_message_queue_interrupt(mqd);
+
+ if (!mqd->interrupt_vector)
+ return MQE_OK;
+
+ /*
+ * Send a cross-partition interrupt to the SSI that contains the target
+ * message queue. Normally, the interrupt is automatically delivered by
+ * hardware but some error conditions require explicit delivery.
+ * Use the GRU to deliver the interrupt. Otherwise partition failures
+ * could cause unrecovered errors.
+ */
+ gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT);
+ save = *val;
+ *val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector,
+ dest_Fixed);
+ gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA);
+ ret = gru_wait(cb);
+ *val = save;
+ if (ret != CBS_IDLE)
+ return MQE_UNEXPECTED_CB_ERR;
return MQE_OK;
}
STAT(mesq_send_put_nacked);
ret = send_message_put_nacked(cb, mqd, mesg, lines);
break;
+ case CBSS_PAGE_OVERFLOW:
+ STAT(mesq_page_overflow);
+ /* fallthru */
default:
BUG();
}
int present = mhdr->present;
/* skip NOOP messages */
- STAT(mesq_receive);
while (present == MQS_NOOP) {
gru_free_message(mqd, mhdr);
mhdr = mq->next;
if (mhdr->lines == 2)
restore_present2(mhdr, mhdr->present2);
+ STAT(mesq_receive);
return mhdr;
}
EXPORT_SYMBOL_GPL(gru_get_next_message);
/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
/*
+ * Load a DW from a global GPA. The GPA can be a memory or MMR address.
+ */
+int gru_read_gpa(unsigned long *value, unsigned long gpa)
+{
+ void *cb;
+ void *dsr;
+ int ret, iaa;
+
+ STAT(read_gpa);
+ if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
+ return MQE_BUG_NO_RESOURCES;
+ iaa = gpa >> 62;
+ gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA);
+ ret = gru_wait(cb);
+ if (ret == CBS_IDLE)
+ *value = *(unsigned long *)dsr;
+ gru_free_cpu_resources(cb, dsr);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gru_read_gpa);
+
+
+/*
* Copy a block of data using the GRU resources
*/
int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
if (gru_wait(cb) != CBS_IDLE) {
- printk(KERN_DEBUG "GRU quicktest0: CBR failure 1\n");
+ printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 1\n", smp_processor_id());
goto done;
}
if (*p != MAGIC) {
- printk(KERN_DEBUG "GRU: quicktest0 bad magic 0x%lx\n", *p);
+ printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p);
goto done;
}
gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
if (gru_wait(cb) != CBS_IDLE) {
- printk(KERN_DEBUG "GRU quicktest0: CBR failure 2\n");
+ printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 2\n", smp_processor_id());
goto done;
}
if (word0 != word1 || word1 != MAGIC) {
printk(KERN_DEBUG
- "GRU quicktest0 err: found 0x%lx, expected 0x%lx\n",
- word1, MAGIC);
+ "GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n",
+ smp_processor_id(), word1, MAGIC);
goto done;
}
ret = 0;
if (ret)
break;
}
- if (ret != MQE_QUEUE_FULL || i != 4)
+ if (ret != MQE_QUEUE_FULL || i != 4) {
+ printk(KERN_DEBUG "GRU:%d quicktest1: unexpect status %d, i %d\n",
+ smp_processor_id(), ret, i);
goto done;
+ }
for (i = 0; i < 6; i++) {
m = gru_get_next_message(&mqd);
break;
gru_free_message(&mqd, m);
}
- ret = (i == 4) ? 0 : -EIO;
+ if (i != 4) {
+ printk(KERN_DEBUG "GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n",
+ smp_processor_id(), i, m, m ? m[8] : -1);
+ goto done;
+ }
+ ret = 0;
done:
kfree(p);
int ret = 0;
unsigned long *buf;
void *cb0, *cb;
+ struct gru_control_block_status *gen;
int i, k, istatus, bytes;
bytes = numcb * 4 * 8;
XTYPE_DW, 4, 1, IMA_INTERRUPT);
ret = 0;
- for (k = 0; k < numcb; k++) {
+ k = numcb;
+ do {
gru_wait_async_cbr(han);
for (i = 0; i < numcb; i++) {
cb = cb0 + i * GRU_HANDLE_STRIDE;
istatus = gru_check_status(cb);
- if (istatus == CBS_ACTIVE)
- continue;
- if (istatus == CBS_EXCEPTION)
- ret = -EFAULT;
- else if (buf[i] || buf[i + 1] || buf[i + 2] ||
- buf[i + 3])
- ret = -EIO;
+ if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS)
+ break;
}
- }
+ if (i == numcb)
+ continue;
+ if (istatus != CBS_IDLE) {
+ printk(KERN_DEBUG "GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i);
+ ret = -EFAULT;
+ } else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] ||
+ buf[4 * i + 3]) {
+ printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n",
+ smp_processor_id(), i, buf[4 * i], buf[4 * i + 1], buf[4 * i + 2], buf[4 * i + 3]);
+ ret = -EIO;
+ }
+ k--;
+ gen = cb;
+ gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */
+ } while (k);
BUG_ON(cmp.done);
gru_unlock_async_resource(han);
return ret;
}
+#define BUFSIZE 200
+static int quicktest3(unsigned long arg)
+{
+ char buf1[BUFSIZE], buf2[BUFSIZE];
+ int ret = 0;
+
+ memset(buf2, 0, sizeof(buf2));
+ memset(buf1, get_cycles() & 255, sizeof(buf1));
+ gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE);
+ if (memcmp(buf1, buf2, BUFSIZE)) {
+ printk(KERN_DEBUG "GRU:%d quicktest3 error\n", smp_processor_id());
+ ret = -EIO;
+ }
+ return ret;
+}
+
/*
* Debugging only. User hook for various kernel tests
* of driver & gru.
case 2:
ret = quicktest2(arg);
break;
+ case 3:
+ ret = quicktest3(arg);
+ break;
case 99:
ret = gru_free_kernel_contexts();
break;
/*
+ * Read a GRU global GPA. Source can be located in a remote partition.
+ *
+ * Input:
+ * value memory address where MMR value is returned
+ * gpa source numalink physical address of GPA
+ *
+ * Output:
+ * 0 OK
+ * >0 error
+ */
+int gru_read_gpa(unsigned long *value, unsigned long gpa);
+
+
+/*
* Copy data using the GRU. Source or destination can be located in a remote
* partition.
*
#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th))
#define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1)))
-/*
- * Statictics kept on a per-GTS basis.
- */
-struct gts_statistics {
- unsigned long fmm_tlbdropin;
- unsigned long upm_tlbdropin;
- unsigned long context_stolen;
-};
-
struct gru_get_gseg_statistics_req {
- unsigned long gseg;
- struct gts_statistics stats;
+ unsigned long gseg;
+ struct gru_gseg_statistics stats;
};
/*
unsigned int control_blocks;
unsigned int maximum_thread_count;
unsigned int options;
+ unsigned char tlb_preload_count;
};
/*
/*
* Structure used to set context options
*/
-enum {sco_gseg_owner, sco_cch_req_slice};
+enum {sco_gseg_owner, sco_cch_req_slice, sco_blade_chiplet};
struct gru_set_context_option_req {
unsigned long gseg;
int op;
- unsigned long val1;
+ int val0;
+ long val1;
};
/*
int ctxnum;
char data_opt;
char lock_cch;
+ char flush_cbrs;
+ char fill[10];
pid_t pid;
void *buf;
size_t buflen;
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/list.h>
+#include <linux/err.h>
#include <asm/uv/uv_hub.h>
#include "gru.h"
#include "grutables.h"
/*
* Select a gru fault map to be used by the current cpu. Note that
* multiple cpus may be using the same map.
- * ZZZ should "shift" be used?? Depends on HT cpu numbering
* ZZZ should be inline but did not work on emulator
*/
int gru_cpu_fault_map_id(void)
{
+#ifdef CONFIG_IA64
return uv_blade_processor_id() % GRU_NUM_TFM;
+#else
+ int cpu = smp_processor_id();
+ int id, core;
+
+ core = uv_cpu_core_number(cpu);
+ id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
+ return id;
+#endif
}
/*--------- ASID Management -------------------------------------------
void gts_drop(struct gru_thread_state *gts)
{
if (gts && atomic_dec_return(>s->ts_refcnt) == 0) {
- gru_drop_mmu_notifier(gts->ts_gms);
+ if (gts->ts_gms)
+ gru_drop_mmu_notifier(gts->ts_gms);
kfree(gts);
STAT(gts_free);
}
* Allocate a thread state structure.
*/
struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
- int cbr_au_count, int dsr_au_count, int options, int tsid)
+ int cbr_au_count, int dsr_au_count,
+ unsigned char tlb_preload_count, int options, int tsid)
{
struct gru_thread_state *gts;
+ struct gru_mm_struct *gms;
int bytes;
bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
bytes += sizeof(struct gru_thread_state);
gts = kmalloc(bytes, GFP_KERNEL);
if (!gts)
- return NULL;
+ return ERR_PTR(-ENOMEM);
STAT(gts_alloc);
memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
mutex_init(>s->ts_ctxlock);
gts->ts_cbr_au_count = cbr_au_count;
gts->ts_dsr_au_count = dsr_au_count;
+ gts->ts_tlb_preload_count = tlb_preload_count;
gts->ts_user_options = options;
+ gts->ts_user_blade_id = -1;
+ gts->ts_user_chiplet_id = -1;
gts->ts_tsid = tsid;
gts->ts_ctxnum = NULLCTX;
gts->ts_tlb_int_select = -1;
if (vma) {
gts->ts_mm = current->mm;
gts->ts_vma = vma;
- gts->ts_gms = gru_register_mmu_notifier();
- if (!gts->ts_gms)
+ gms = gru_register_mmu_notifier();
+ if (IS_ERR(gms))
goto err;
+ gts->ts_gms = gms;
}
gru_dbg(grudev, "alloc gts %p\n", gts);
err:
gts_drop(gts);
- return NULL;
+ return ERR_CAST(gms);
}
/*
if (!vdata)
return NULL;
+ STAT(vdata_alloc);
INIT_LIST_HEAD(&vdata->vd_head);
spin_lock_init(&vdata->vd_lock);
gru_dbg(grudev, "alloc vdata %p\n", vdata);
struct gru_vma_data *vdata = vma->vm_private_data;
struct gru_thread_state *gts, *ngts;
- gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count,
+ gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
+ vdata->vd_dsr_au_count,
+ vdata->vd_tlb_preload_count,
vdata->vd_user_options, tsid);
- if (!gts)
- return NULL;
+ if (IS_ERR(gts))
+ return gts;
spin_lock(&vdata->vd_lock);
ngts = gru_find_current_gts_nolock(vdata, tsid);
memset(cbe + i * GRU_HANDLE_STRIDE, 0,
GRU_CACHE_LINE_BYTES);
}
+ /* Flush CBE to hide race in context restart */
+ mb();
+ gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
cb += GRU_HANDLE_STRIDE;
}
cb = gseg + GRU_CB_BASE;
cbe = grubase + GRU_CBE_BASE;
length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
+
+ /* CBEs may not be coherent. Flush them from cache */
+ for_each_cbr_in_allocation_map(i, &cbrmap, scr)
+ gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
+ mb(); /* Let the CL flush complete */
+
gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
- gru_dbg(grudev, "gts %p\n", gts);
+ gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
+ gts, gts->ts_cbr_map, gts->ts_dsr_map);
lock_cch_handle(cch);
if (cch_interrupt_sync(cch))
BUG();
if (cch_deallocate(cch))
BUG();
- gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */
unlock_cch_handle(cch);
gru_free_gru_context(gts);
struct gru_context_configuration_handle *cch;
int i, err, asid, ctxnum = gts->ts_ctxnum;
- gru_dbg(grudev, "gts %p\n", gts);
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
-
lock_cch_handle(cch);
cch->tfm_fault_bit_enable =
(gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
cch->unmap_enable = 1;
cch->tfm_done_bit_enable = 1;
cch->cb_int_enable = 1;
+ cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */
} else {
cch->unmap_enable = 0;
cch->tfm_done_bit_enable = 0;
if (cch_start(cch))
BUG();
unlock_cch_handle(cch);
+
+ gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n",
+ gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map,
+ (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select);
}
/*
* Update fields in an active CCH:
* - retarget interrupts on local blade
* - update sizeavail mask
- * - force a delayed context unload by clearing the CCH asids. This
- * forces TLB misses for new GRU instructions. The context is unloaded
- * when the next TLB miss occurs.
*/
-int gru_update_cch(struct gru_thread_state *gts, int force_unload)
+int gru_update_cch(struct gru_thread_state *gts)
{
struct gru_context_configuration_handle *cch;
struct gru_state *gru = gts->ts_gru;
goto exit;
if (cch_interrupt(cch))
BUG();
- if (!force_unload) {
- for (i = 0; i < 8; i++)
- cch->sizeavail[i] = gts->ts_sizeavail;
- gts->ts_tlb_int_select = gru_cpu_fault_map_id();
- cch->tlb_int_select = gru_cpu_fault_map_id();
- cch->tfm_fault_bit_enable =
- (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
- || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
- } else {
- for (i = 0; i < 8; i++)
- cch->asid[i] = 0;
- cch->tfm_fault_bit_enable = 0;
- cch->tlb_int_enable = 0;
- gts->ts_force_unload = 1;
- }
+ for (i = 0; i < 8; i++)
+ cch->sizeavail[i] = gts->ts_sizeavail;
+ gts->ts_tlb_int_select = gru_cpu_fault_map_id();
+ cch->tlb_int_select = gru_cpu_fault_map_id();
+ cch->tfm_fault_bit_enable =
+ (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
+ || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
if (cch_start(cch))
BUG();
ret = 1;
gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
gru_cpu_fault_map_id());
- return gru_update_cch(gts, 0);
+ return gru_update_cch(gts);
+}
+
+/*
+ * Check if a GRU context is allowed to use a specific chiplet. By default
+ * a context is assigned to any blade-local chiplet. However, users can
+ * override this.
+ * Returns 1 if assignment allowed, 0 otherwise
+ */
+static int gru_check_chiplet_assignment(struct gru_state *gru,
+ struct gru_thread_state *gts)
+{
+ int blade_id;
+ int chiplet_id;
+
+ blade_id = gts->ts_user_blade_id;
+ if (blade_id < 0)
+ blade_id = uv_numa_blade_id();
+
+ chiplet_id = gts->ts_user_chiplet_id;
+ return gru->gs_blade_id == blade_id &&
+ (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id);
+}
+
+/*
+ * Unload the gru context if it is not assigned to the correct blade or
+ * chiplet. Misassignment can occur if the process migrates to a different
+ * blade or if the user changes the selected blade/chiplet.
+ */
+void gru_check_context_placement(struct gru_thread_state *gts)
+{
+ struct gru_state *gru;
+
+ /*
+ * If the current task is the context owner, verify that the
+ * context is correctly placed. This test is skipped for non-owner
+ * references. Pthread apps use non-owner references to the CBRs.
+ */
+ gru = gts->ts_gru;
+ if (!gru || gts->ts_tgid_owner != current->tgid)
+ return;
+
+ if (!gru_check_chiplet_assignment(gru, gts)) {
+ STAT(check_context_unload);
+ gru_unload_context(gts, 1);
+ } else if (gru_retarget_intr(gts)) {
+ STAT(check_context_retarget_intr);
+ }
}
}
}
-void gru_steal_context(struct gru_thread_state *gts, int blade_id)
+void gru_steal_context(struct gru_thread_state *gts)
{
struct gru_blade_state *blade;
struct gru_state *gru, *gru0;
struct gru_thread_state *ngts = NULL;
int ctxnum, ctxnum0, flag = 0, cbr, dsr;
+ int blade_id;
+ blade_id = gts->ts_user_blade_id;
+ if (blade_id < 0)
+ blade_id = uv_numa_blade_id();
cbr = gts->ts_cbr_au_count;
dsr = gts->ts_dsr_au_count;
gru = blade->bs_lru_gru;
if (ctxnum == 0)
gru = next_gru(blade, gru);
+ blade->bs_lru_gru = gru;
+ blade->bs_lru_ctxnum = ctxnum;
ctxnum0 = ctxnum;
gru0 = gru;
while (1) {
- if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
- break;
- spin_lock(&gru->gs_lock);
- for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
- if (flag && gru == gru0 && ctxnum == ctxnum0)
+ if (gru_check_chiplet_assignment(gru, gts)) {
+ if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
break;
- ngts = gru->gs_gts[ctxnum];
- /*
- * We are grabbing locks out of order, so trylock is
- * needed. GTSs are usually not locked, so the odds of
- * success are high. If trylock fails, try to steal a
- * different GSEG.
- */
- if (ngts && is_gts_stealable(ngts, blade))
+ spin_lock(&gru->gs_lock);
+ for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
+ if (flag && gru == gru0 && ctxnum == ctxnum0)
+ break;
+ ngts = gru->gs_gts[ctxnum];
+ /*
+ * We are grabbing locks out of order, so trylock is
+ * needed. GTSs are usually not locked, so the odds of
+ * success are high. If trylock fails, try to steal a
+ * different GSEG.
+ */
+ if (ngts && is_gts_stealable(ngts, blade))
+ break;
+ ngts = NULL;
+ }
+ spin_unlock(&gru->gs_lock);
+ if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
break;
- ngts = NULL;
- flag = 1;
}
- spin_unlock(&gru->gs_lock);
- if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
+ if (flag && gru == gru0)
break;
+ flag = 1;
ctxnum = 0;
gru = next_gru(blade, gru);
}
- blade->bs_lru_gru = gru;
- blade->bs_lru_ctxnum = ctxnum;
spin_unlock(&blade->bs_lock);
if (ngts) {
}
/*
+ * Assign a gru context.
+ */
+static int gru_assign_context_number(struct gru_state *gru)
+{
+ int ctxnum;
+
+ ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
+ __set_bit(ctxnum, &gru->gs_context_map);
+ return ctxnum;
+}
+
+/*
* Scan the GRUs on the local blade & assign a GRU context.
*/
-struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
- int blade)
+struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
{
struct gru_state *gru, *grux;
int i, max_active_contexts;
+ int blade_id = gts->ts_user_blade_id;
-
+ if (blade_id < 0)
+ blade_id = uv_numa_blade_id();
again:
gru = NULL;
max_active_contexts = GRU_NUM_CCH;
- for_each_gru_on_blade(grux, blade, i) {
+ for_each_gru_on_blade(grux, blade_id, i) {
+ if (!gru_check_chiplet_assignment(grux, gts))
+ continue;
if (check_gru_resources(grux, gts->ts_cbr_au_count,
gts->ts_dsr_au_count,
max_active_contexts)) {
reserve_gru_resources(gru, gts);
gts->ts_gru = gru;
gts->ts_blade = gru->gs_blade_id;
- gts->ts_ctxnum =
- find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
- BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH);
+ gts->ts_ctxnum = gru_assign_context_number(gru);
atomic_inc(>s->ts_refcnt);
gru->gs_gts[gts->ts_ctxnum] = gts;
- __set_bit(gts->ts_ctxnum, &gru->gs_context_map);
spin_unlock(&gru->gs_lock);
STAT(assign_context);
{
struct gru_thread_state *gts;
unsigned long paddr, vaddr;
- int blade_id;
vaddr = (unsigned long)vmf->virtual_address;
gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
again:
mutex_lock(>s->ts_ctxlock);
preempt_disable();
- blade_id = uv_numa_blade_id();
- if (gts->ts_gru) {
- if (gts->ts_gru->gs_blade_id != blade_id) {
- STAT(migrated_nopfn_unload);
- gru_unload_context(gts, 1);
- } else {
- if (gru_retarget_intr(gts))
- STAT(migrated_nopfn_retarget);
- }
- }
+ gru_check_context_placement(gts);
if (!gts->ts_gru) {
STAT(load_user_context);
- if (!gru_assign_gru_context(gts, blade_id)) {
+ if (!gru_assign_gru_context(gts)) {
preempt_enable();
mutex_unlock(>s->ts_ctxlock);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
- blade_id = uv_numa_blade_id();
if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
- gru_steal_context(gts, blade_id);
+ gru_steal_context(gts);
goto again;
}
gru_load_context(gts);
{
unsigned long val = atomic_long_read(v);
- if (val)
- seq_printf(s, "%16lu %s\n", val, id);
+ seq_printf(s, "%16lu %s\n", val, id);
}
static int statistics_show(struct seq_file *s, void *p)
printstat(s, vdata_free);
printstat(s, gts_alloc);
printstat(s, gts_free);
- printstat(s, vdata_double_alloc);
+ printstat(s, gms_alloc);
+ printstat(s, gms_free);
printstat(s, gts_double_allocate);
printstat(s, assign_context);
printstat(s, assign_context_failed);
printstat(s, steal_kernel_context);
printstat(s, steal_context_failed);
printstat(s, nopfn);
- printstat(s, break_cow);
printstat(s, asid_new);
printstat(s, asid_next);
printstat(s, asid_wrap);
printstat(s, asid_reuse);
printstat(s, intr);
+ printstat(s, intr_cbr);
+ printstat(s, intr_tfh);
+ printstat(s, intr_spurious);
printstat(s, intr_mm_lock_failed);
printstat(s, call_os);
- printstat(s, call_os_offnode_reference);
- printstat(s, call_os_check_for_bug);
printstat(s, call_os_wait_queue);
printstat(s, user_flush_tlb);
printstat(s, user_unload_context);
printstat(s, user_exception);
printstat(s, set_context_option);
- printstat(s, migrate_check);
- printstat(s, migrated_retarget);
- printstat(s, migrated_unload);
- printstat(s, migrated_unload_delay);
- printstat(s, migrated_nopfn_retarget);
- printstat(s, migrated_nopfn_unload);
+ printstat(s, check_context_retarget_intr);
+ printstat(s, check_context_unload);
printstat(s, tlb_dropin);
+ printstat(s, tlb_preload_page);
printstat(s, tlb_dropin_fail_no_asid);
printstat(s, tlb_dropin_fail_upm);
printstat(s, tlb_dropin_fail_invalid);
printstat(s, tlb_dropin_fail_idle);
printstat(s, tlb_dropin_fail_fmm);
printstat(s, tlb_dropin_fail_no_exception);
- printstat(s, tlb_dropin_fail_no_exception_war);
printstat(s, tfh_stale_on_fault);
printstat(s, mmu_invalidate_range);
printstat(s, mmu_invalidate_page);
- printstat(s, mmu_clear_flush_young);
printstat(s, flush_tlb);
printstat(s, flush_tlb_gru);
printstat(s, flush_tlb_gru_tgh);
printstat(s, flush_tlb_gru_zero_asid);
printstat(s, copy_gpa);
+ printstat(s, read_gpa);
printstat(s, mesq_receive);
printstat(s, mesq_receive_none);
printstat(s, mesq_send);
printstat(s, mesq_send_qlimit_reached);
printstat(s, mesq_send_amo_nacked);
printstat(s, mesq_send_put_nacked);
- printstat(s, mesq_qf_not_full);
printstat(s, mesq_qf_locked);
printstat(s, mesq_qf_noop_not_full);
printstat(s, mesq_qf_switch_head_failed);
printstat(s, mesq_noop_qlimit_reached);
printstat(s, mesq_noop_amo_nacked);
printstat(s, mesq_noop_put_nacked);
+ printstat(s, mesq_noop_page_overflow);
return 0;
}
int op;
unsigned long total, count, max;
static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt",
- "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
+ "cch_interrupt_sync", "cch_deallocate", "tfh_write_only",
+ "tfh_write_restart", "tgh_invalidate"};
+ seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
for (op = 0; op < mcsop_last; op++) {
count = atomic_long_read(&mcs_op_statistics[op].count);
total = atomic_long_read(&mcs_op_statistics[op].total);
static int options_show(struct seq_file *s, void *p)
{
+ seq_printf(s, "#bitmask: 1=trace, 2=statistics\n");
seq_printf(s, "0x%lx\n", gru_options);
return 0;
}
const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" };
if (gid == 0)
- seq_printf(file, "#%5s%5s%6s%9s%6s%8s%8s\n", "gid", "bid",
- "ctx#", "pid", "cbrs", "dsbytes", "mode");
+ seq_printf(file, "#%5s%5s%6s%7s%9s%6s%8s%8s\n", "gid", "bid",
+ "ctx#", "asid", "pid", "cbrs", "dsbytes", "mode");
if (gru)
for (i = 0; i < GRU_NUM_CCH; i++) {
ts = gru->gs_gts[i];
if (!ts)
continue;
- seq_printf(file, " %5d%5d%6d%9d%6d%8d%8s\n",
+ seq_printf(file, " %5d%5d%6d%7d%9d%6d%8d%8s\n",
gru->gs_gid, gru->gs_blade_id, i,
- ts->ts_tgid_owner,
+ is_kernel_context(ts) ? 0 : ts->ts_gms->ms_asids[gid].mt_asid,
+ is_kernel_context(ts) ? 0 : ts->ts_tgid_owner,
ts->ts_cbr_au_count * GRU_CBR_AU_SIZE,
ts->ts_cbr_au_count * GRU_DSR_AU_BYTES,
mode[ts->ts_user_options &
for (p = proc_files; p->name; p++)
if (p->entry)
remove_proc_entry(p->name, proc_gru);
- remove_proc_entry("gru", NULL);
+ remove_proc_entry("gru", proc_gru->parent);
}
}
#define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE)
#define GRU_DRIVER_ID_STR "SGI GRU Device Driver"
-#define GRU_DRIVER_VERSION_STR "0.80"
+#define GRU_DRIVER_VERSION_STR "0.85"
/*
* GRU statistics.
atomic_long_t vdata_free;
atomic_long_t gts_alloc;
atomic_long_t gts_free;
- atomic_long_t vdata_double_alloc;
+ atomic_long_t gms_alloc;
+ atomic_long_t gms_free;
atomic_long_t gts_double_allocate;
atomic_long_t assign_context;
atomic_long_t assign_context_failed;
atomic_long_t steal_kernel_context;
atomic_long_t steal_context_failed;
atomic_long_t nopfn;
- atomic_long_t break_cow;
atomic_long_t asid_new;
atomic_long_t asid_next;
atomic_long_t asid_wrap;
atomic_long_t asid_reuse;
atomic_long_t intr;
+ atomic_long_t intr_cbr;
+ atomic_long_t intr_tfh;
+ atomic_long_t intr_spurious;
atomic_long_t intr_mm_lock_failed;
atomic_long_t call_os;
- atomic_long_t call_os_offnode_reference;
- atomic_long_t call_os_check_for_bug;
atomic_long_t call_os_wait_queue;
atomic_long_t user_flush_tlb;
atomic_long_t user_unload_context;
atomic_long_t user_exception;
atomic_long_t set_context_option;
- atomic_long_t migrate_check;
- atomic_long_t migrated_retarget;
- atomic_long_t migrated_unload;
- atomic_long_t migrated_unload_delay;
- atomic_long_t migrated_nopfn_retarget;
- atomic_long_t migrated_nopfn_unload;
+ atomic_long_t check_context_retarget_intr;
+ atomic_long_t check_context_unload;
atomic_long_t tlb_dropin;
+ atomic_long_t tlb_preload_page;
atomic_long_t tlb_dropin_fail_no_asid;
atomic_long_t tlb_dropin_fail_upm;
atomic_long_t tlb_dropin_fail_invalid;
atomic_long_t tlb_dropin_fail_idle;
atomic_long_t tlb_dropin_fail_fmm;
atomic_long_t tlb_dropin_fail_no_exception;
- atomic_long_t tlb_dropin_fail_no_exception_war;
atomic_long_t tfh_stale_on_fault;
atomic_long_t mmu_invalidate_range;
atomic_long_t mmu_invalidate_page;
- atomic_long_t mmu_clear_flush_young;
atomic_long_t flush_tlb;
atomic_long_t flush_tlb_gru;
atomic_long_t flush_tlb_gru_tgh;
atomic_long_t flush_tlb_gru_zero_asid;
atomic_long_t copy_gpa;
+ atomic_long_t read_gpa;
atomic_long_t mesq_receive;
atomic_long_t mesq_receive_none;
atomic_long_t mesq_send_qlimit_reached;
atomic_long_t mesq_send_amo_nacked;
atomic_long_t mesq_send_put_nacked;
- atomic_long_t mesq_qf_not_full;
+ atomic_long_t mesq_page_overflow;
atomic_long_t mesq_qf_locked;
atomic_long_t mesq_qf_noop_not_full;
atomic_long_t mesq_qf_switch_head_failed;
atomic_long_t mesq_noop_qlimit_reached;
atomic_long_t mesq_noop_amo_nacked;
atomic_long_t mesq_noop_put_nacked;
+ atomic_long_t mesq_noop_page_overflow;
};
enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
- cchop_deallocate, tghop_invalidate, mcsop_last};
+ cchop_deallocate, tfhop_write_only, tfhop_write_restart,
+ tghop_invalidate, mcsop_last};
struct mcs_op_statistic {
atomic_long_t count;
extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
-#define OPT_DPRINT 1
-#define OPT_STATS 2
+#define OPT_DPRINT 1
+#define OPT_STATS 2
#define IRQ_GRU 110 /* Starting IRQ number for interrupts */
#define gru_dbg(dev, fmt, x...) \
do { \
if (gru_options & OPT_DPRINT) \
- dev_dbg(dev, "%s: " fmt, __func__, x); \
+ printk(KERN_DEBUG "GRU:%d %s: " fmt, smp_processor_id(), __func__, x);\
} while (0)
#else
#define gru_dbg(x...)
#define ASID_INC 8 /* number of regions */
/* Generate a GRU asid value from a GRU base asid & a virtual address. */
-#if defined CONFIG_IA64
#define VADDR_HI_BIT 64
-#elif defined CONFIG_X86_64
-#define VADDR_HI_BIT 48
-#else
-#error "Unsupported architecture"
-#endif
#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
#define GRUASID(asid, addr) ((asid) + GRUREGION(addr))
long vd_user_options;/* misc user option flags */
int vd_cbr_au_count;
int vd_dsr_au_count;
+ unsigned char vd_tlb_preload_count;
};
/*
struct gru_state *ts_gru; /* GRU where the context is
loaded */
struct gru_mm_struct *ts_gms; /* asid & ioproc struct */
+ unsigned char ts_tlb_preload_count; /* TLB preload pages */
unsigned long ts_cbr_map; /* map of allocated CBRs */
unsigned long ts_dsr_map; /* map of allocated DATA
resources */
long ts_user_options;/* misc user option flags */
pid_t ts_tgid_owner; /* task that is using the
context - for migration */
+ short ts_user_blade_id;/* user selected blade */
+ char ts_user_chiplet_id;/* user selected chiplet */
unsigned short ts_sizeavail; /* Pagesizes in use */
int ts_tsid; /* thread that owns the
structure */
char ts_blade; /* If >= 0, migrate context if
ref from diferent blade */
char ts_force_cch_reload;
- char ts_force_unload;/* force context to be unloaded
- after migration */
char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
allocated CB */
int ts_data_valid; /* Indicates if ts_gdata has
valid data */
- struct gts_statistics ustats; /* User statistics */
+ struct gru_gseg_statistics ustats; /* User statistics */
unsigned long ts_gdata[0]; /* save area for GRU data (CB,
DS, CBE) */
};
gru segments (64) */
unsigned short gs_gid; /* unique GRU number */
unsigned short gs_blade_id; /* blade of GRU */
+ unsigned char gs_chiplet_id; /* blade chiplet of GRU */
unsigned char gs_tgh_local_shift; /* used to pick TGH for
local flush */
unsigned char gs_tgh_first_remote; /* starting TGH# for
in use */
struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using
the context */
+ int gs_irq[GRU_NUM_TFM]; /* Interrupt irqs */
};
/*
return !gts->ts_mm;
}
+/*
+ * The following are for Nehelem-EX. A more general scheme is needed for
+ * future processors.
+ */
+#define UV_MAX_INT_CORES 8
+#define uv_cpu_socket_number(p) ((cpu_physical_id(p) >> 5) & 1)
+#define uv_cpu_ht_number(p) (cpu_physical_id(p) & 1)
+#define uv_cpu_core_number(p) (((cpu_physical_id(p) >> 2) & 4) | \
+ ((cpu_physical_id(p) >> 1) & 3))
/*-----------------------------------------------------------------------------
* Function prototypes & externs
*/
*vma, int tsid);
extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
*vma, int tsid);
-extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
- int blade);
+extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts);
extern void gru_load_context(struct gru_thread_state *gts);
-extern void gru_steal_context(struct gru_thread_state *gts, int blade_id);
+extern void gru_steal_context(struct gru_thread_state *gts);
extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
-extern int gru_update_cch(struct gru_thread_state *gts, int force_unload);
+extern int gru_update_cch(struct gru_thread_state *gts);
extern void gts_drop(struct gru_thread_state *gts);
extern void gru_tgh_flush_init(struct gru_state *gru);
extern int gru_kservices_init(void);
extern void gru_kservices_exit(void);
+extern irqreturn_t gru0_intr(int irq, void *dev_id);
+extern irqreturn_t gru1_intr(int irq, void *dev_id);
+extern irqreturn_t gru_intr_mblade(int irq, void *dev_id);
extern int gru_dump_chiplet_request(unsigned long arg);
extern long gru_get_gseg_statistics(unsigned long arg);
-extern irqreturn_t gru_intr(int irq, void *dev_id);
extern int gru_handle_user_call_os(unsigned long address);
extern int gru_user_flush_tlb(unsigned long arg);
extern int gru_user_unload_context(unsigned long arg);
extern int gru_get_exception_detail(unsigned long arg);
extern int gru_set_context_option(unsigned long address);
+extern void gru_check_context_placement(struct gru_thread_state *gts);
extern int gru_cpu_fault_map_id(void);
extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
extern void gru_flush_all_tlb(struct gru_state *gru);
extern void gru_proc_exit(void);
extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
- int cbr_au_count, int dsr_au_count, int options, int tsid);
+ int cbr_au_count, int dsr_au_count,
+ unsigned char tlb_preload_count, int options, int tsid);
extern unsigned long gru_reserve_cb_resources(struct gru_state *gru,
int cbr_au_count, char *cbmap);
extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,
STAT(flush_tlb_gru_tgh);
asid = GRUASID(asid, start);
gru_dbg(grudev,
- " FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n",
- gid, asid, num, asids->mt_ctxbitmap);
+ " FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n",
+ gid, asid, start, grupagesize, num, asids->mt_ctxbitmap);
tgh = get_lock_tgh_handle(gru);
tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0,
num - 1, asids->mt_ctxbitmap);
{
struct gru_mm_struct *gms;
struct mmu_notifier *mn;
+ int err;
mn = mmu_find_ops(current->mm, &gru_mmuops);
if (mn) {
} else {
gms = kzalloc(sizeof(*gms), GFP_KERNEL);
if (gms) {
+ STAT(gms_alloc);
spin_lock_init(&gms->ms_asid_lock);
gms->ms_notifier.ops = &gru_mmuops;
atomic_set(&gms->ms_refcnt, 1);
init_waitqueue_head(&gms->ms_wait_queue);
- __mmu_notifier_register(&gms->ms_notifier, current->mm);
+ err = __mmu_notifier_register(&gms->ms_notifier, current->mm);
+ if (err)
+ goto error;
}
}
gru_dbg(grudev, "gms %p, refcnt %d\n", gms,
atomic_read(&gms->ms_refcnt));
return gms;
+error:
+ kfree(gms);
+ return ERR_PTR(err);
}
void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
if (!gms->ms_released)
mmu_notifier_unregister(&gms->ms_notifier, current->mm);
kfree(gms);
+ STAT(gms_free);
}
}
extern u8 xp_region_size;
extern unsigned long (*xp_pa) (void *);
+extern unsigned long (*xp_socket_pa) (unsigned long);
extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
size_t);
extern int (*xp_cpu_to_nasid) (int);
unsigned long (*xp_pa) (void *addr);
EXPORT_SYMBOL_GPL(xp_pa);
+unsigned long (*xp_socket_pa) (unsigned long gpa);
+EXPORT_SYMBOL_GPL(xp_socket_pa);
+
enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa,
const unsigned long src_gpa, size_t len);
EXPORT_SYMBOL_GPL(xp_remote_memcpy);
}
/*
+ * Convert a global physical to a socket physical address.
+ */
+static unsigned long
+xp_socket_pa_sn2(unsigned long gpa)
+{
+ return gpa;
+}
+
+/*
* Wrapper for bte_copy().
*
* dst_pa - physical address of the destination of the transfer.
xp_region_size = sn_region_size;
xp_pa = xp_pa_sn2;
+ xp_socket_pa = xp_socket_pa_sn2;
xp_remote_memcpy = xp_remote_memcpy_sn2;
xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
xp_expand_memprotect = xp_expand_memprotect_sn2;
return uv_gpa(addr);
}
+/*
+ * Convert a global physical to socket physical address.
+ */
+static unsigned long
+xp_socket_pa_uv(unsigned long gpa)
+{
+ return uv_gpa_to_soc_phys_ram(gpa);
+}
+
+static enum xp_retval
+xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa,
+ size_t len)
+{
+ int ret;
+ unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa));
+
+ BUG_ON(!uv_gpa_in_mmr_space(src_gpa));
+ BUG_ON(len != 8);
+
+ ret = gru_read_gpa(dst_va, src_gpa);
+ if (ret == 0)
+ return xpSuccess;
+
+ dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
+ "len=%ld\n", dst_gpa, src_gpa, len);
+ return xpGruCopyError;
+}
+
+
static enum xp_retval
xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa,
size_t len)
{
int ret;
+ if (uv_gpa_in_mmr_space(src_gpa))
+ return xp_remote_mmr_read(dst_gpa, src_gpa, len);
+
ret = gru_copy_gpa(dst_gpa, src_gpa, len);
if (ret == 0)
return xpSuccess;
xp_region_size = sn_region_size;
xp_pa = xp_pa_uv;
+ xp_socket_pa = xp_socket_pa_uv;
xp_remote_memcpy = xp_remote_memcpy_uv;
xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
xp_expand_memprotect = xp_expand_memprotect_uv;
#include <linux/device.h>
#include <linux/hardirq.h>
#include "xpc.h"
+#include <asm/uv/uv_hub.h>
/* XPC is exiting flag */
int xpc_exiting;
break;
/* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
- if (L1_CACHE_ALIGN(len) > buf_len) {
- kfree(buf_base);
+ if (is_shub())
+ len = L1_CACHE_ALIGN(len);
+
+ if (len > buf_len) {
+ if (buf_base != NULL)
+ kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
&buf_base);
}
}
- ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len);
+ ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len);
if (ret != xpSuccess) {
dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
break;
dev_err(xpc_part, "SAL failed to locate the reserved page\n");
return -ESRCH;
}
- rp = (struct xpc_rsvd_page *)__va(rp_pa);
+ rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
if (rp->SAL_version < 3) {
/* SAL_versions < 3 had a SAL_partid defined as a u8 */
{
int ret;
-#if defined CONFIG_X86_64
- ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
- mq->order, &mq->mmr_offset);
- if (ret < 0) {
- dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
- "ret=%d\n", ret);
- return ret;
- }
-#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
- ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address),
+#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
+
+ ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
mq->order, &mq->mmr_offset);
if (ret < 0) {
dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
ret);
return -EBUSY;
}
+#elif defined CONFIG_X86_64
+ ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
+ mq->order, &mq->mmr_offset);
+ if (ret < 0) {
+ dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
+ "ret=%d\n", ret);
+ return ret;
+ }
#else
#error not a supported configuration
#endif
xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
+ int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
#if defined CONFIG_X86_64
- ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
+ ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != BIOS_STATUS_SUCCESS);
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
- ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
+ ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != SALRET_OK);
#else
#error not a supported configuration
enum xp_retval xp_ret;
int ret;
int nid;
+ int nasid;
int pg_order;
struct page *page;
struct xpc_gru_mq_uv *mq;
goto out_5;
}
+ nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
+
mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
- nid, mmr_value->vector, mmr_value->dest);
+ nasid, mmr_value->vector, mmr_value->dest);
if (ret != 0) {
dev_err(xpc_part, "gru_create_message_queue() returned "
"error=%d\n", ret);
head->first = first->next;
if (head->first == NULL)
head->last = NULL;
+
+ head->n_entries--;
+ BUG_ON(head->n_entries < 0);
+
+ first->next = NULL;
}
- head->n_entries--;
- BUG_ON(head->n_entries < 0);
spin_unlock_irqrestore(&head->lock, irq_flags);
- first->next = NULL;
return first;
}
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
- while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) {
+ while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
+ (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
dev_dbg(xpc_part, "waiting to make first contact with "
"partition %d\n", XPC_PARTID(part));
msg_slot = ch_uv->recv_msg_slots +
(msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
- BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
BUG_ON(msg_slot->hdr.size != 0);
memcpy(msg_slot, msg, msg->hdr.size);
sizeof(struct xpc_notify_mq_msghdr_uv));
if (ret != xpSuccess)
XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
-
- msg->hdr.msg_slot_number += ch->remote_nentries;
}
static struct xpc_arch_operations xpc_arch_ops_uv = {
mlx4_bitmap_free_range(bitmap, obj, 1);
}
-static unsigned long find_aligned_range(unsigned long *bitmap,
- u32 start, u32 nbits,
- int len, int align)
-{
- unsigned long end, i;
-
-again:
- start = ALIGN(start, align);
-
- while ((start < nbits) && test_bit(start, bitmap))
- start += align;
-
- if (start >= nbits)
- return -1;
-
- end = start+len;
- if (end > nbits)
- return -1;
-
- for (i = start + 1; i < end; i++) {
- if (test_bit(i, bitmap)) {
- start = i + 1;
- goto again;
- }
- }
-
- return start;
-}
-
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
{
u32 obj, i;
spin_lock(&bitmap->lock);
- obj = find_aligned_range(bitmap->table, bitmap->last,
- bitmap->max, cnt, align);
+ obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+ bitmap->last, cnt, align - 1);
if (obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
- obj = find_aligned_range(bitmap->table, 0, bitmap->max,
- cnt, align);
+ obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+ 0, cnt, align - 1);
}
if (obj < bitmap->max) {
*val = automatic;
else if (!strncmp(s, "none", 4))
*val = none;
- else if (nofifo && !strncmp(s, "nofifo", 4))
+ else if (nofifo && !strncmp(s, "nofifo", 6))
*val = nofifo;
else {
char *ep;
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/pnp.h>
+#include <linux/seq_file.h>
#include <linux/init.h>
#include <asm/uaccess.h>
static struct proc_dir_entry *proc_pnp = NULL;
static struct proc_dir_entry *proc_pnp_boot = NULL;
-static int proc_read_pnpconfig(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static int pnpconfig_proc_show(struct seq_file *m, void *v)
{
struct pnp_isa_config_struc pnps;
if (pnp_bios_isapnp_config(&pnps))
return -EIO;
- return snprintf(buf, count,
- "structure_revision %d\n"
- "number_of_CSNs %d\n"
- "ISA_read_data_port 0x%x\n",
- pnps.revision, pnps.no_csns, pnps.isa_rd_data_port);
+ seq_printf(m, "structure_revision %d\n"
+ "number_of_CSNs %d\n"
+ "ISA_read_data_port 0x%x\n",
+ pnps.revision, pnps.no_csns, pnps.isa_rd_data_port);
+ return 0;
}
-static int proc_read_escdinfo(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static int pnpconfig_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pnpconfig_proc_show, NULL);
+}
+
+static const struct file_operations pnpconfig_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pnpconfig_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int escd_info_proc_show(struct seq_file *m, void *v)
{
struct escd_info_struc escd;
if (pnp_bios_escd_info(&escd))
return -EIO;
- return snprintf(buf, count,
- "min_ESCD_write_size %d\n"
+ seq_printf(m, "min_ESCD_write_size %d\n"
"ESCD_size %d\n"
"NVRAM_base 0x%x\n",
escd.min_escd_write_size,
escd.escd_size, escd.nv_storage_base);
+ return 0;
}
+static int escd_info_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, escd_info_proc_show, NULL);
+}
+
+static const struct file_operations escd_info_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = escd_info_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
#define MAX_SANE_ESCD_SIZE (32*1024)
-static int proc_read_escd(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static int escd_proc_show(struct seq_file *m, void *v)
{
struct escd_info_struc escd;
char *tmpbuf;
- int escd_size, escd_left_to_read, n;
+ int escd_size;
if (pnp_bios_escd_info(&escd))
return -EIO;
/* sanity check */
if (escd.escd_size > MAX_SANE_ESCD_SIZE) {
printk(KERN_ERR
- "PnPBIOS: proc_read_escd: ESCD size reported by BIOS escd_info call is too great\n");
+ "PnPBIOS: %s: ESCD size reported by BIOS escd_info call is too great\n", __func__);
return -EFBIG;
}
/* sanity check */
if (escd_size > MAX_SANE_ESCD_SIZE) {
- printk(KERN_ERR "PnPBIOS: proc_read_escd: ESCD size reported by"
- " BIOS read_escd call is too great\n");
+ printk(KERN_ERR "PnPBIOS: %s: ESCD size reported by"
+ " BIOS read_escd call is too great\n", __func__);
kfree(tmpbuf);
return -EFBIG;
}
- escd_left_to_read = escd_size - pos;
- if (escd_left_to_read < 0)
- escd_left_to_read = 0;
- if (escd_left_to_read == 0)
- *eof = 1;
- n = min(count, escd_left_to_read);
- memcpy(buf, tmpbuf + pos, n);
+ seq_write(m, tmpbuf, escd_size);
kfree(tmpbuf);
- *start = buf;
- return n;
+ return 0;
}
-static int proc_read_legacyres(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static int escd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, escd_proc_show, NULL);
+}
+
+static const struct file_operations escd_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = escd_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pnp_legacyres_proc_show(struct seq_file *m, void *v)
{
- /* Assume that the following won't overflow the buffer */
- if (pnp_bios_get_stat_res(buf))
+ void *buf;
+
+ buf = kmalloc(65536, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (pnp_bios_get_stat_res(buf)) {
+ kfree(buf);
return -EIO;
+ }
+
+ seq_write(m, buf, 65536);
+ kfree(buf);
+ return 0;
+}
- return count; // FIXME: Return actual length
+static int pnp_legacyres_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pnp_legacyres_proc_show, NULL);
}
-static int proc_read_devices(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static const struct file_operations pnp_legacyres_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pnp_legacyres_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pnp_devices_proc_show(struct seq_file *m, void *v)
{
struct pnp_bios_node *node;
u8 nodenum;
- char *p = buf;
-
- if (pos >= 0xff)
- return 0;
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -ENOMEM;
- for (nodenum = pos; nodenum < 0xff;) {
+ for (nodenum = 0; nodenum < 0xff;) {
u8 thisnodenum = nodenum;
- /* 26 = the number of characters per line sprintf'ed */
- if ((p - buf + 26) > count)
- break;
+
if (pnp_bios_get_dev_node(&nodenum, PNPMODE_DYNAMIC, node))
break;
- p += sprintf(p, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n",
+ seq_printf(m, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n",
node->handle, node->eisa_id,
node->type_code[0], node->type_code[1],
node->type_code[2], node->flags);
"PnPBIOS: proc_read_devices:",
(unsigned int)nodenum,
(unsigned int)thisnodenum);
- *eof = 1;
break;
}
}
kfree(node);
- if (nodenum == 0xff)
- *eof = 1;
- *start = (char *)((off_t) nodenum - pos);
- return p - buf;
+ return 0;
+}
+
+static int pnp_devices_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pnp_devices_proc_show, NULL);
}
-static int proc_read_node(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static const struct file_operations pnp_devices_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pnp_devices_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pnpbios_proc_show(struct seq_file *m, void *v)
{
+ void *data = m->private;
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
return -EIO;
}
len = node->size - sizeof(struct pnp_bios_node);
- memcpy(buf, node->data, len);
+ seq_write(m, node->data, len);
kfree(node);
- return len;
+ return 0;
+}
+
+static int pnpbios_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pnpbios_proc_show, PDE(inode)->data);
}
-static int proc_write_node(struct file *file, const char __user * buf,
- unsigned long count, void *data)
+static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
{
+ void *data = PDE(file->f_path.dentry->d_inode)->data;
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
return ret;
}
+static const struct file_operations pnpbios_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pnpbios_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pnpbios_proc_write,
+};
+
int pnpbios_interface_attach_device(struct pnp_bios_node *node)
{
char name[3];
- struct proc_dir_entry *ent;
sprintf(name, "%02x", node->handle);
if (!proc_pnp)
return -EIO;
if (!pnpbios_dont_use_current_config) {
- ent = create_proc_entry(name, 0, proc_pnp);
- if (ent) {
- ent->read_proc = proc_read_node;
- ent->write_proc = proc_write_node;
- ent->data = (void *)(long)(node->handle);
- }
+ proc_create_data(name, 0644, proc_pnp, &pnpbios_proc_fops,
+ (void *)(long)(node->handle));
}
if (!proc_pnp_boot)
return -EIO;
- ent = create_proc_entry(name, 0, proc_pnp_boot);
- if (ent) {
- ent->read_proc = proc_read_node;
- ent->write_proc = proc_write_node;
- ent->data = (void *)(long)(node->handle + 0x100);
+ if (proc_create_data(name, 0644, proc_pnp_boot, &pnpbios_proc_fops,
+ (void *)(long)(node->handle + 0x100)))
return 0;
- }
-
return -EIO;
}
proc_pnp_boot = proc_mkdir("boot", proc_pnp);
if (!proc_pnp_boot)
return -EIO;
- create_proc_read_entry("devices", 0, proc_pnp, proc_read_devices, NULL);
- create_proc_read_entry("configuration_info", 0, proc_pnp,
- proc_read_pnpconfig, NULL);
- create_proc_read_entry("escd_info", 0, proc_pnp, proc_read_escdinfo,
- NULL);
- create_proc_read_entry("escd", S_IRUSR, proc_pnp, proc_read_escd, NULL);
- create_proc_read_entry("legacy_device_resources", 0, proc_pnp,
- proc_read_legacyres, NULL);
+ proc_create("devices", 0, proc_pnp, &pnp_devices_proc_fops);
+ proc_create("configuration_info", 0, proc_pnp, &pnpconfig_proc_fops);
+ proc_create("escd_info", 0, proc_pnp, &escd_info_proc_fops);
+ proc_create("escd", S_IRUSR, proc_pnp, &escd_proc_fops);
+ proc_create("legacy_device_resources", 0, proc_pnp, &pnp_legacyres_proc_fops);
return 0;
}
If you say Y here you will get support for the
watchdog timer in the ST M41T60 and M41T80 RTC chips series.
+config RTC_DRV_BQ32K
+ tristate "TI BQ32000"
+ help
+ If you say Y here you will get support for the TI
+ BQ32000 I2C RTC chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-bq32k.
+
config RTC_DRV_DM355EVM
tristate "TI DaVinci DM355 EVM RTC"
depends on MFD_DM355EVM_MSP
Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC
support. This chip contains a battery- and capacitor-backed RTC.
+config RTC_DRV_NUC900
+ tristate "NUC910/NUC920 RTC driver"
+ depends on RTC_CLASS && ARCH_W90X900
+ help
+ If you say yes here you get support for the RTC subsystem of the
+ NUC910/NUC920 used in embedded systems.
comment "on-CPU RTC drivers"
config RTC_DRV_OMAP
tristate "TI OMAP1"
- depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730
+ depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
help
- Say "yes" here to support the real time clock on TI OMAP1 chips.
- This driver can also be built as a module called rtc-omap.
+ Say "yes" here to support the real time clock on TI OMAP1 and
+ DA8xx/OMAP-L13x chips. This driver can also be built as a
+ module called rtc-omap.
config RTC_DRV_S3C
tristate "Samsung S3C series SoC RTC"
If you say Y here you will get support for the RTC found on
the PCAP2 ASIC used on some Motorola phones.
+config RTC_DRV_MC13783
+ depends on MFD_MC13783
+ tristate "Freescale MC13783 RTC"
+ help
+ This enables support for the Freescale MC13783 PMIC RTC
+
endif # RTC_CLASS
obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o
obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
+obj-$(CONFIG_RTC_DRV_BQ32K) += rtc-bq32k.o
obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
obj-$(CONFIG_RTC_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
+obj-$(CONFIG_RTC_DRV_MC13783) += rtc-mc13783.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
+obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
goto out_iounmap;
}
+ platform_set_drvdata(pdev, rtc);
+
rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
&at32_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtc)) {
goto out_free_irq;
}
- platform_set_drvdata(pdev, rtc);
device_init_wakeup(&pdev->dev, 1);
dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n",
return 0;
out_free_irq:
+ platform_set_drvdata(pdev, NULL);
free_irq(irq, rtc);
out_iounmap:
iounmap(rtc->regs);
--- /dev/null
+/*
+ * Driver for TI BQ32000 RTC.
+ *
+ * Copyright (C) 2009 Semihalf.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/bcd.h>
+
+#define BQ32K_SECONDS 0x00 /* Seconds register address */
+#define BQ32K_SECONDS_MASK 0x7F /* Mask over seconds value */
+#define BQ32K_STOP 0x80 /* Oscillator Stop flat */
+
+#define BQ32K_MINUTES 0x01 /* Minutes register address */
+#define BQ32K_MINUTES_MASK 0x7F /* Mask over minutes value */
+#define BQ32K_OF 0x80 /* Oscillator Failure flag */
+
+#define BQ32K_HOURS_MASK 0x3F /* Mask over hours value */
+#define BQ32K_CENT 0x40 /* Century flag */
+#define BQ32K_CENT_EN 0x80 /* Century flag enable bit */
+
+struct bq32k_regs {
+ uint8_t seconds;
+ uint8_t minutes;
+ uint8_t cent_hours;
+ uint8_t day;
+ uint8_t date;
+ uint8_t month;
+ uint8_t years;
+};
+
+static struct i2c_driver bq32k_driver;
+
+static int bq32k_read(struct device *dev, void *data, uint8_t off, uint8_t len)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &off,
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = data,
+ }
+ };
+
+ if (i2c_transfer(client->adapter, msgs, 2) == 2)
+ return 0;
+
+ return -EIO;
+}
+
+static int bq32k_write(struct device *dev, void *data, uint8_t off, uint8_t len)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ uint8_t buffer[len + 1];
+
+ buffer[0] = off;
+ memcpy(&buffer[1], data, len);
+
+ if (i2c_master_send(client, buffer, len + 1) == len + 1)
+ return 0;
+
+ return -EIO;
+}
+
+static int bq32k_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct bq32k_regs regs;
+ int error;
+
+ error = bq32k_read(dev, ®s, 0, sizeof(regs));
+ if (error)
+ return error;
+
+ tm->tm_sec = bcd2bin(regs.seconds & BQ32K_SECONDS_MASK);
+ tm->tm_min = bcd2bin(regs.minutes & BQ32K_SECONDS_MASK);
+ tm->tm_hour = bcd2bin(regs.cent_hours & BQ32K_HOURS_MASK);
+ tm->tm_mday = bcd2bin(regs.date);
+ tm->tm_wday = bcd2bin(regs.day) - 1;
+ tm->tm_mon = bcd2bin(regs.month) - 1;
+ tm->tm_year = bcd2bin(regs.years) +
+ ((regs.cent_hours & BQ32K_CENT) ? 100 : 0);
+
+ return rtc_valid_tm(tm);
+}
+
+static int bq32k_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct bq32k_regs regs;
+
+ regs.seconds = bin2bcd(tm->tm_sec);
+ regs.minutes = bin2bcd(tm->tm_min);
+ regs.cent_hours = bin2bcd(tm->tm_hour) | BQ32K_CENT_EN;
+ regs.day = bin2bcd(tm->tm_wday + 1);
+ regs.date = bin2bcd(tm->tm_mday);
+ regs.month = bin2bcd(tm->tm_mon + 1);
+
+ if (tm->tm_year >= 100) {
+ regs.cent_hours |= BQ32K_CENT;
+ regs.years = bin2bcd(tm->tm_year - 100);
+ } else
+ regs.years = bin2bcd(tm->tm_year);
+
+ return bq32k_write(dev, ®s, 0, sizeof(regs));
+}
+
+static const struct rtc_class_ops bq32k_rtc_ops = {
+ .read_time = bq32k_rtc_read_time,
+ .set_time = bq32k_rtc_set_time,
+};
+
+static int bq32k_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct rtc_device *rtc;
+ uint8_t reg;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ /* Check Oscillator Stop flag */
+ error = bq32k_read(dev, ®, BQ32K_SECONDS, 1);
+ if (!error && (reg & BQ32K_STOP)) {
+ dev_warn(dev, "Oscillator was halted. Restarting...\n");
+ reg &= ~BQ32K_STOP;
+ error = bq32k_write(dev, ®, BQ32K_SECONDS, 1);
+ }
+ if (error)
+ return error;
+
+ /* Check Oscillator Failure flag */
+ error = bq32k_read(dev, ®, BQ32K_MINUTES, 1);
+ if (!error && (reg & BQ32K_OF)) {
+ dev_warn(dev, "Oscillator Failure. Check RTC battery.\n");
+ reg &= ~BQ32K_OF;
+ error = bq32k_write(dev, ®, BQ32K_MINUTES, 1);
+ }
+ if (error)
+ return error;
+
+ rtc = rtc_device_register(bq32k_driver.driver.name, &client->dev,
+ &bq32k_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ i2c_set_clientdata(client, rtc);
+
+ return 0;
+}
+
+static int __devexit bq32k_remove(struct i2c_client *client)
+{
+ struct rtc_device *rtc = i2c_get_clientdata(client);
+
+ rtc_device_unregister(rtc);
+ return 0;
+}
+
+static const struct i2c_device_id bq32k_id[] = {
+ { "bq32000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bq32k_id);
+
+static struct i2c_driver bq32k_driver = {
+ .driver = {
+ .name = "bq32k",
+ .owner = THIS_MODULE,
+ },
+ .probe = bq32k_probe,
+ .remove = __devexit_p(bq32k_remove),
+ .id_table = bq32k_id,
+};
+
+static __init int bq32k_init(void)
+{
+ return i2c_add_driver(&bq32k_driver);
+}
+module_init(bq32k_init);
+
+static __exit void bq32k_exit(void)
+{
+ i2c_del_driver(&bq32k_driver);
+}
+module_exit(bq32k_exit);
+
+MODULE_AUTHOR("Semihalf, Piotr Ziecik <kosmo@semihalf.com>");
+MODULE_DESCRIPTION("TI BQ32000 I2C RTC driver");
+MODULE_LICENSE("GPL");
goto out_free;
}
+ platform_set_drvdata(pdev, p);
+
p->rtc = rtc_device_register("bq4802", &pdev->dev,
&bq4802_ops, THIS_MODULE);
if (IS_ERR(p->rtc)) {
goto out_iounmap;
}
- platform_set_drvdata(pdev, p);
err = 0;
out:
return err;
return 0;
}
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-
-static int
-cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned long flags;
- switch (cmd) {
- case RTC_AIE_OFF:
- case RTC_AIE_ON:
- case RTC_UIE_OFF:
- case RTC_UIE_ON:
- if (!is_valid_irq(cmos->irq))
- return -EINVAL;
- break;
- /* PIE ON/OFF is handled by cmos_irq_set_state() */
- default:
- return -ENOIOCTLCMD;
- }
+ if (!is_valid_irq(cmos->irq))
+ return -EINVAL;
spin_lock_irqsave(&rtc_lock, flags);
- switch (cmd) {
- case RTC_AIE_OFF: /* alarm off */
- cmos_irq_disable(cmos, RTC_AIE);
- break;
- case RTC_AIE_ON: /* alarm on */
+
+ if (enabled)
cmos_irq_enable(cmos, RTC_AIE);
- break;
- case RTC_UIE_OFF: /* update off */
- cmos_irq_disable(cmos, RTC_UIE);
- break;
- case RTC_UIE_ON: /* update on */
- cmos_irq_enable(cmos, RTC_UIE);
- break;
- }
+ else
+ cmos_irq_disable(cmos, RTC_AIE);
+
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
-#else
-#define cmos_rtc_ioctl NULL
-#endif
+static int cmos_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ if (!is_valid_irq(cmos->irq))
+ return -EINVAL;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+
+ if (enabled)
+ cmos_irq_enable(cmos, RTC_UIE);
+ else
+ cmos_irq_disable(cmos, RTC_UIE);
+
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return 0;
+}
#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
#endif
static const struct rtc_class_ops cmos_rtc_ops = {
- .ioctl = cmos_rtc_ioctl,
- .read_time = cmos_read_time,
- .set_time = cmos_set_time,
- .read_alarm = cmos_read_alarm,
- .set_alarm = cmos_set_alarm,
- .proc = cmos_procfs,
- .irq_set_freq = cmos_irq_set_freq,
- .irq_set_state = cmos_irq_set_state,
+ .read_time = cmos_read_time,
+ .set_time = cmos_set_time,
+ .read_alarm = cmos_read_alarm,
+ .set_alarm = cmos_set_alarm,
+ .proc = cmos_procfs,
+ .irq_set_freq = cmos_irq_set_freq,
+ .irq_set_state = cmos_irq_set_state,
+ .alarm_irq_enable = cmos_alarm_irq_enable,
+ .update_irq_enable = cmos_update_irq_enable,
};
/*----------------------------------------------------------------*/
mask = RTC_IRQMASK;
tmp &= ~mask;
CMOS_WRITE(tmp, RTC_CONTROL);
- hpet_mask_rtc_irq_bit(mask);
+ /* shut down hpet emulation - we don't need it for alarm */
+ hpet_mask_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE);
cmos_checkintr(cmos, tmp);
}
spin_unlock_irq(&rtc_lock);
#ifdef RTC_SET_CHARGE
case RTC_SET_CHARGE:
{
- struct ds1302_rtc *rtc = dev_get_drvdata(dev);
int tcs_val;
if (copy_from_user(&tcs_val, (int __user *)arg, sizeof(int)))
static int __devinit ds1305_probe(struct spi_device *spi)
{
struct ds1305 *ds1305;
- struct rtc_device *rtc;
int status;
u8 addr, value;
struct ds1305_platform_data *pdata = spi->dev.platform_data;
dev_dbg(&spi->dev, "AM/PM\n");
/* register RTC ... from here on, ds1305->ctrl needs locking */
- rtc = rtc_device_register("ds1305", &spi->dev,
+ ds1305->rtc = rtc_device_register("ds1305", &spi->dev,
&ds1305_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- status = PTR_ERR(rtc);
+ if (IS_ERR(ds1305->rtc)) {
+ status = PTR_ERR(ds1305->rtc);
dev_dbg(&spi->dev, "register rtc --> %d\n", status);
goto fail0;
}
- ds1305->rtc = rtc;
/* Maybe set up alarm IRQ; be ready to handle it triggering right
* away. NOTE that we don't share this. The signal is active low,
if (spi->irq) {
INIT_WORK(&ds1305->work, ds1305_work);
status = request_irq(spi->irq, ds1305_irq,
- 0, dev_name(&rtc->dev), ds1305);
+ 0, dev_name(&ds1305->rtc->dev), ds1305);
if (status < 0) {
dev_dbg(&spi->dev, "request_irq %d --> %d\n",
spi->irq, status);
fail2:
free_irq(spi->irq, ds1305);
fail1:
- rtc_device_unregister(rtc);
+ rtc_device_unregister(ds1305->rtc);
fail0:
kfree(ds1305);
return status;
static int __devexit ds1305_remove(struct spi_device *spi)
{
- struct ds1305 *ds1305 = spi_get_drvdata(spi);
+ struct ds1305 *ds1305 = spi_get_drvdata(spi);
sysfs_remove_bin_file(&spi->dev.kobj, &nvram);
}
if (want_irq) {
- err = request_irq(client->irq, ds1307_irq, 0,
+ err = request_irq(client->irq, ds1307_irq, IRQF_SHARED,
ds1307->rtc->name, client);
if (err) {
dev_err(&client->dev,
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr; /* virtual base address */
- unsigned long baseaddr; /* physical base address */
int size; /* amount of memory mapped */
int irq;
unsigned int irqen;
int alrm_min;
int alrm_hour;
int alrm_mday;
+ spinlock_t lock;
};
static DEFINE_SPINLOCK(ds1511_lock);
{
unsigned long flags;
- spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
+ spin_lock_irqsave(&pdata->lock, flags);
rtc_write(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
0x80 : bin2bcd(pdata->alrm_mday) & 0x3f,
RTC_ALARM_DATE);
RTC_ALARM_SEC);
rtc_write(rtc_read(RTC_CMD) | (pdata->irqen ? RTC_TIE : 0), RTC_CMD);
rtc_read(RTC_CMD1); /* clear interrupts */
- spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
+ spin_unlock_irqrestore(&pdata->lock, flags);
}
static int
{
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
- unsigned long events = RTC_IRQF;
+ unsigned long events = 0;
+ spin_lock(&pdata->lock);
/*
* read and clear interrupt
*/
- if (!(rtc_read(RTC_CMD1) & DS1511_IRQF)) {
- return IRQ_NONE;
- }
- if (rtc_read(RTC_ALARM_SEC) & 0x80) {
- events |= RTC_UF;
- } else {
- events |= RTC_AF;
- }
- rtc_update_irq(pdata->rtc, 1, events);
- return IRQ_HANDLED;
+ if (rtc_read(RTC_CMD1) & DS1511_IRQF) {
+ events = RTC_IRQF;
+ if (rtc_read(RTC_ALARM_SEC) & 0x80)
+ events |= RTC_UF;
+ else
+ events |= RTC_AF;
+ if (likely(pdata->rtc))
+ rtc_update_irq(pdata->rtc, 1, events);
+ }
+ spin_unlock(&pdata->lock);
+ return events ? IRQ_HANDLED : IRQ_NONE;
}
- static int
-ds1511_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int ds1511_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
- if (pdata->irq <= 0) {
- return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
- }
- switch (cmd) {
- case RTC_AIE_OFF:
- pdata->irqen &= ~RTC_AF;
- ds1511_rtc_update_alarm(pdata);
- break;
- case RTC_AIE_ON:
+ if (pdata->irq <= 0)
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_AF;
- ds1511_rtc_update_alarm(pdata);
- break;
- case RTC_UIE_OFF:
- pdata->irqen &= ~RTC_UF;
- ds1511_rtc_update_alarm(pdata);
- break;
- case RTC_UIE_ON:
+ else
+ pdata->irqen &= ~RTC_AF;
+ ds1511_rtc_update_alarm(pdata);
+ return 0;
+}
+
+static int ds1511_rtc_update_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq <= 0)
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_UF;
- ds1511_rtc_update_alarm(pdata);
- break;
- default:
- return -ENOIOCTLCMD;
- }
+ else
+ pdata->irqen &= ~RTC_UF;
+ ds1511_rtc_update_alarm(pdata);
return 0;
}
static const struct rtc_class_ops ds1511_rtc_ops = {
- .read_time = ds1511_rtc_read_time,
- .set_time = ds1511_rtc_set_time,
- .read_alarm = ds1511_rtc_read_alarm,
- .set_alarm = ds1511_rtc_set_alarm,
- .ioctl = ds1511_rtc_ioctl,
+ .read_time = ds1511_rtc_read_time,
+ .set_time = ds1511_rtc_set_time,
+ .read_alarm = ds1511_rtc_read_alarm,
+ .set_alarm = ds1511_rtc_set_alarm,
+ .alarm_irq_enable = ds1511_rtc_alarm_irq_enable,
+ .update_irq_enable = ds1511_rtc_update_irq_enable,
};
static ssize_t
{
struct rtc_device *rtc;
struct resource *res;
- struct rtc_plat_data *pdata = NULL;
+ struct rtc_plat_data *pdata;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
return -ENODEV;
}
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
return -ENOMEM;
- }
pdata->size = res->end - res->start + 1;
- if (!request_mem_region(res->start, pdata->size, pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- pdata->baseaddr = res->start;
- pdata->size = pdata->size;
- ds1511_base = ioremap(pdata->baseaddr, pdata->size);
- if (!ds1511_base) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
+ pdev->name))
+ return -EBUSY;
+ ds1511_base = devm_ioremap(&pdev->dev, res->start, pdata->size);
+ if (!ds1511_base)
+ return -ENOMEM;
pdata->ioaddr = ds1511_base;
pdata->irq = platform_get_irq(pdev, 0);
dev_warn(&pdev->dev, "voltage-low detected.\n");
}
+ spin_lock_init(&pdata->lock);
+ platform_set_drvdata(pdev, pdata);
/*
* if the platform has an interrupt in mind for this device,
* then by all means, set it
*/
if (pdata->irq > 0) {
rtc_read(RTC_CMD1);
- if (request_irq(pdata->irq, ds1511_interrupt,
+ if (devm_request_irq(&pdev->dev, pdata->irq, ds1511_interrupt,
IRQF_DISABLED | IRQF_SHARED, pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
rtc = rtc_device_register(pdev->name, &pdev->dev, &ds1511_rtc_ops,
THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto out;
- }
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
pdata->rtc = rtc;
- platform_set_drvdata(pdev, pdata);
+
ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
- if (ret) {
- goto out;
- }
- return 0;
- out:
- if (pdata->rtc) {
+ if (ret)
rtc_device_unregister(pdata->rtc);
- }
- if (pdata->irq > 0) {
- free_irq(pdata->irq, pdev);
- }
- if (ds1511_base) {
- iounmap(ds1511_base);
- ds1511_base = NULL;
- }
- if (pdata->baseaddr) {
- release_mem_region(pdata->baseaddr, pdata->size);
- }
-
- kfree(pdata);
return ret;
}
sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
rtc_device_unregister(pdata->rtc);
- pdata->rtc = NULL;
if (pdata->irq > 0) {
/*
* disable the alarm interrupt
*/
rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD);
rtc_read(RTC_CMD1);
- free_irq(pdata->irq, pdev);
}
- iounmap(pdata->ioaddr);
- ds1511_base = NULL;
- release_mem_region(pdata->baseaddr, pdata->size);
- kfree(pdata);
return 0;
}
#include <linux/platform_device.h>
#include <linux/io.h>
-#define DRV_VERSION "0.2"
+#define DRV_VERSION "0.3"
#define RTC_REG_SIZE 0x2000
#define RTC_OFFSET 0x1ff0
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr;
- resource_size_t baseaddr;
unsigned long last_jiffies;
int irq;
unsigned int irqen;
int alrm_min;
int alrm_hour;
int alrm_mday;
+ spinlock_t lock;
};
static int ds1553_rtc_set_time(struct device *dev, struct rtc_time *tm)
void __iomem *ioaddr = pdata->ioaddr;
unsigned long flags;
- spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
+ spin_lock_irqsave(&pdata->lock, flags);
writeb(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
0x80 : bin2bcd(pdata->alrm_mday),
ioaddr + RTC_DATE_ALARM);
ioaddr + RTC_SECONDS_ALARM);
writeb(pdata->irqen ? RTC_INTS_AE : 0, ioaddr + RTC_INTERRUPTS);
readb(ioaddr + RTC_FLAGS); /* clear interrupts */
- spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
+ spin_unlock_irqrestore(&pdata->lock, flags);
}
static int ds1553_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
- unsigned long events = RTC_IRQF;
+ unsigned long events = 0;
+ spin_lock(&pdata->lock);
/* read and clear interrupt */
- if (!(readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF))
- return IRQ_NONE;
- if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
- events |= RTC_UF;
- else
- events |= RTC_AF;
- rtc_update_irq(pdata->rtc, 1, events);
- return IRQ_HANDLED;
+ if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF) {
+ events = RTC_IRQF;
+ if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
+ events |= RTC_UF;
+ else
+ events |= RTC_AF;
+ if (likely(pdata->rtc))
+ rtc_update_irq(pdata->rtc, 1, events);
+ }
+ spin_unlock(&pdata->lock);
+ return events ? IRQ_HANDLED : IRQ_NONE;
}
-static int ds1553_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
+static int ds1553_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq <= 0)
- return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
- switch (cmd) {
- case RTC_AIE_OFF:
- pdata->irqen &= ~RTC_AF;
- ds1553_rtc_update_alarm(pdata);
- break;
- case RTC_AIE_ON:
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_AF;
- ds1553_rtc_update_alarm(pdata);
- break;
- case RTC_UIE_OFF:
- pdata->irqen &= ~RTC_UF;
- ds1553_rtc_update_alarm(pdata);
- break;
- case RTC_UIE_ON:
+ else
+ pdata->irqen &= ~RTC_AF;
+ ds1553_rtc_update_alarm(pdata);
+ return 0;
+}
+
+static int ds1553_rtc_update_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq <= 0)
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_UF;
- ds1553_rtc_update_alarm(pdata);
- break;
- default:
- return -ENOIOCTLCMD;
- }
+ else
+ pdata->irqen &= ~RTC_UF;
+ ds1553_rtc_update_alarm(pdata);
return 0;
}
static const struct rtc_class_ops ds1553_rtc_ops = {
- .read_time = ds1553_rtc_read_time,
- .set_time = ds1553_rtc_set_time,
- .read_alarm = ds1553_rtc_read_alarm,
- .set_alarm = ds1553_rtc_set_alarm,
- .ioctl = ds1553_rtc_ioctl,
+ .read_time = ds1553_rtc_read_time,
+ .set_time = ds1553_rtc_set_time,
+ .read_alarm = ds1553_rtc_read_alarm,
+ .set_alarm = ds1553_rtc_set_alarm,
+ .alarm_irq_enable = ds1553_rtc_alarm_irq_enable,
+ .update_irq_enable = ds1553_rtc_update_irq_enable,
};
static ssize_t ds1553_nvram_read(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
struct rtc_device *rtc;
struct resource *res;
unsigned int cen, sec;
- struct rtc_plat_data *pdata = NULL;
- void __iomem *ioaddr = NULL;
+ struct rtc_plat_data *pdata;
+ void __iomem *ioaddr;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- pdata->baseaddr = res->start;
- ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE);
- if (!ioaddr) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
+ pdev->name))
+ return -EBUSY;
+
+ ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
+ if (!ioaddr)
+ return -ENOMEM;
pdata->ioaddr = ioaddr;
pdata->irq = platform_get_irq(pdev, 0);
if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_BLF)
dev_warn(&pdev->dev, "voltage-low detected.\n");
+ spin_lock_init(&pdata->lock);
+ pdata->last_jiffies = jiffies;
+ platform_set_drvdata(pdev, pdata);
if (pdata->irq > 0) {
writeb(0, ioaddr + RTC_INTERRUPTS);
- if (request_irq(pdata->irq, ds1553_rtc_interrupt,
+ if (devm_request_irq(&pdev->dev, pdata->irq,
+ ds1553_rtc_interrupt,
IRQF_DISABLED, pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
pdata->irq = 0;
rtc = rtc_device_register(pdev->name, &pdev->dev,
&ds1553_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto out;
- }
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
pdata->rtc = rtc;
- pdata->last_jiffies = jiffies;
- platform_set_drvdata(pdev, pdata);
+
ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr);
if (ret)
- goto out;
- return 0;
- out:
- if (pdata->rtc)
- rtc_device_unregister(pdata->rtc);
- if (pdata->irq > 0)
- free_irq(pdata->irq, pdev);
- if (ioaddr)
- iounmap(ioaddr);
- if (pdata->baseaddr)
- release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
- kfree(pdata);
+ rtc_device_unregister(rtc);
return ret;
}
sysfs_remove_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr);
rtc_device_unregister(pdata->rtc);
- if (pdata->irq > 0) {
+ if (pdata->irq > 0)
writeb(0, pdata->ioaddr + RTC_INTERRUPTS);
- free_irq(pdata->irq, pdev);
- }
- iounmap(pdata->ioaddr);
- release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
- kfree(pdata);
return 0;
}
#include <linux/platform_device.h>
#include <linux/io.h>
-#define DRV_VERSION "0.3"
+#define DRV_VERSION "0.4"
#define RTC_SIZE 8
void __iomem *ioaddr_rtc;
size_t size_nvram;
size_t size;
- resource_size_t baseaddr;
unsigned long last_jiffies;
struct bin_attribute nvram_attr;
};
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr_nvram;
ssize_t count;
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr_nvram;
ssize_t count;
struct rtc_device *rtc;
struct resource *res;
unsigned int cen, sec;
- struct rtc_plat_data *pdata = NULL;
- void __iomem *ioaddr = NULL;
+ struct rtc_plat_data *pdata;
+ void __iomem *ioaddr;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->size = res->end - res->start + 1;
- if (!request_mem_region(res->start, pdata->size, pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- pdata->baseaddr = res->start;
- ioaddr = ioremap(pdata->baseaddr, pdata->size);
- if (!ioaddr) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
+ pdev->name))
+ return -EBUSY;
+ ioaddr = devm_ioremap(&pdev->dev, res->start, pdata->size);
+ if (!ioaddr)
+ return -ENOMEM;
+
pdata->ioaddr_nvram = ioaddr;
pdata->size_nvram = pdata->size - RTC_SIZE;
pdata->ioaddr_rtc = ioaddr + pdata->size_nvram;
if (!(readb(ioaddr + RTC_DAY) & RTC_BATT_FLAG))
dev_warn(&pdev->dev, "voltage-low detected.\n");
+ pdata->last_jiffies = jiffies;
+ platform_set_drvdata(pdev, pdata);
rtc = rtc_device_register(pdev->name, &pdev->dev,
&ds1742_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto out;
- }
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
pdata->rtc = rtc;
- pdata->last_jiffies = jiffies;
- platform_set_drvdata(pdev, pdata);
ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
if (ret) {
dev_err(&pdev->dev, "creating nvram file in sysfs failed\n");
- goto out;
+ rtc_device_unregister(rtc);
}
-
- return 0;
- out:
- if (pdata->rtc)
- rtc_device_unregister(pdata->rtc);
- if (pdata->ioaddr_nvram)
- iounmap(pdata->ioaddr_nvram);
- if (pdata->baseaddr)
- release_mem_region(pdata->baseaddr, pdata->size);
- kfree(pdata);
return ret;
}
sysfs_remove_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
rtc_device_unregister(pdata->rtc);
- iounmap(pdata->ioaddr_nvram);
- release_mem_region(pdata->baseaddr, pdata->size);
- kfree(pdata);
return 0;
}
static int __devinit m48t35_probe(struct platform_device *pdev)
{
- struct rtc_device *rtc;
struct resource *res;
struct m48t35_priv *priv;
int ret = 0;
ret = -ENOMEM;
goto out;
}
+
spin_lock_init(&priv->lock);
- rtc = rtc_device_register("m48t35", &pdev->dev,
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->rtc = rtc_device_register("m48t35", &pdev->dev,
&m48t35_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
+ if (IS_ERR(priv->rtc)) {
+ ret = PTR_ERR(priv->rtc);
goto out;
}
- priv->rtc = rtc;
- platform_set_drvdata(pdev, priv);
+
return 0;
out:
- if (priv->rtc)
- rtc_device_unregister(priv->rtc);
if (priv->reg)
iounmap(priv->reg);
if (priv->baseaddr)
goto out;
}
+ spin_lock_init(&m48t59->lock);
+ platform_set_drvdata(pdev, m48t59);
+
m48t59->rtc = rtc_device_register(name, &pdev->dev, ops, THIS_MODULE);
if (IS_ERR(m48t59->rtc)) {
ret = PTR_ERR(m48t59->rtc);
m48t59_nvram_attr.size = pdata->offset;
ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
- if (ret)
+ if (ret) {
+ rtc_device_unregister(m48t59->rtc);
goto out;
+ }
- spin_lock_init(&m48t59->lock);
- platform_set_drvdata(pdev, m48t59);
return 0;
out:
- if (!IS_ERR(m48t59->rtc))
- rtc_device_unregister(m48t59->rtc);
if (m48t59->irq != NO_IRQ)
free_irq(m48t59->irq, &pdev->dev);
if (m48t59->ioaddr)
--- /dev/null
+/*
+ * Real Time Clock driver for Freescale MC13783 PMIC
+ *
+ * (C) 2009 Sascha Hauer, Pengutronix
+ * (C) 2009 Uwe Kleine-Koenig, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/mc13783.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+
+#define DRIVER_NAME "mc13783-rtc"
+
+#define MC13783_RTCTOD 20
+#define MC13783_RTCTODA 21
+#define MC13783_RTCDAY 22
+#define MC13783_RTCDAYA 23
+
+struct mc13783_rtc {
+ struct rtc_device *rtc;
+ struct mc13783 *mc13783;
+ int valid;
+};
+
+static int mc13783_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct mc13783_rtc *priv = dev_get_drvdata(dev);
+ unsigned int seconds, days1, days2;
+ unsigned long s1970;
+ int ret;
+
+ mc13783_lock(priv->mc13783);
+
+ if (!priv->valid) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ ret = mc13783_reg_read(priv->mc13783, MC13783_RTCDAY, &days1);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_reg_read(priv->mc13783, MC13783_RTCTOD, &seconds);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_reg_read(priv->mc13783, MC13783_RTCDAY, &days2);
+out:
+ mc13783_unlock(priv->mc13783);
+
+ if (ret)
+ return ret;
+
+ if (days2 == days1 + 1) {
+ if (seconds >= 86400 / 2)
+ days2 = days1;
+ else
+ days1 = days2;
+ }
+
+ if (days1 != days2)
+ return -EIO;
+
+ s1970 = days1 * 86400 + seconds;
+
+ rtc_time_to_tm(s1970, tm);
+
+ return rtc_valid_tm(tm);
+}
+
+static int mc13783_rtc_set_mmss(struct device *dev, unsigned long secs)
+{
+ struct mc13783_rtc *priv = dev_get_drvdata(dev);
+ unsigned int seconds, days;
+ int ret;
+
+ seconds = secs % 86400;
+ days = secs / 86400;
+
+ mc13783_lock(priv->mc13783);
+
+ /*
+ * first write seconds=0 to prevent a day switch between writing days
+ * and seconds below
+ */
+ ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTOD, 0);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_reg_write(priv->mc13783, MC13783_RTCDAY, days);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTOD, seconds);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_ackirq(priv->mc13783, MC13783_IRQ_RTCRST);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_unmask(priv->mc13783, MC13783_IRQ_RTCRST);
+out:
+ priv->valid = !ret;
+
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static irqreturn_t mc13783_rtc_update_handler(int irq, void *dev)
+{
+ struct mc13783_rtc *priv = dev;
+ struct mc13783 *mc13783 = priv->mc13783;
+
+ dev_dbg(&priv->rtc->dev, "1HZ\n");
+
+ rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_UF);
+
+ mc13783_ackirq(mc13783, irq);
+
+ return IRQ_HANDLED;
+}
+
+static int mc13783_rtc_update_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct mc13783_rtc *priv = dev_get_drvdata(dev);
+ int ret = -ENODATA;
+
+ mc13783_lock(priv->mc13783);
+ if (!priv->valid)
+ goto out;
+
+ ret = (enabled ? mc13783_unmask : mc13783_mask)(priv->mc13783,
+ MC13783_IRQ_1HZ);
+out:
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static const struct rtc_class_ops mc13783_rtc_ops = {
+ .read_time = mc13783_rtc_read_time,
+ .set_mmss = mc13783_rtc_set_mmss,
+ .update_irq_enable = mc13783_rtc_update_irq_enable,
+};
+
+static irqreturn_t mc13783_rtc_reset_handler(int irq, void *dev)
+{
+ struct mc13783_rtc *priv = dev;
+ struct mc13783 *mc13783 = priv->mc13783;
+
+ dev_dbg(&priv->rtc->dev, "RTCRST\n");
+ priv->valid = 0;
+
+ mc13783_mask(mc13783, irq);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mc13783_rtc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct mc13783_rtc *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mc13783 = dev_get_drvdata(pdev->dev.parent);
+ platform_set_drvdata(pdev, priv);
+
+ priv->valid = 1;
+
+ mc13783_lock(priv->mc13783);
+
+ ret = mc13783_irq_request(priv->mc13783, MC13783_IRQ_RTCRST,
+ mc13783_rtc_reset_handler, DRIVER_NAME, priv);
+ if (ret)
+ goto err_reset_irq_request;
+
+ ret = mc13783_irq_request_nounmask(priv->mc13783, MC13783_IRQ_1HZ,
+ mc13783_rtc_update_handler, DRIVER_NAME, priv);
+ if (ret)
+ goto err_update_irq_request;
+
+ mc13783_unlock(priv->mc13783);
+
+ priv->rtc = rtc_device_register(pdev->name,
+ &pdev->dev, &mc13783_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(priv->rtc)) {
+ ret = PTR_ERR(priv->rtc);
+
+ mc13783_lock(priv->mc13783);
+
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_1HZ, priv);
+err_update_irq_request:
+
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_RTCRST, priv);
+err_reset_irq_request:
+
+ mc13783_unlock(priv->mc13783);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(priv);
+ }
+
+ return ret;
+}
+
+static int __devexit mc13783_rtc_remove(struct platform_device *pdev)
+{
+ struct mc13783_rtc *priv = platform_get_drvdata(pdev);
+
+ rtc_device_unregister(priv->rtc);
+
+ mc13783_lock(priv->mc13783);
+
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_1HZ, priv);
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_RTCRST, priv);
+
+ mc13783_unlock(priv->mc13783);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(priv);
+
+ return 0;
+}
+
+static struct platform_driver mc13783_rtc_driver = {
+ .remove = __devexit_p(mc13783_rtc_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mc13783_rtc_init(void)
+{
+ return platform_driver_probe(&mc13783_rtc_driver, &mc13783_rtc_probe);
+}
+module_init(mc13783_rtc_init);
+
+static void __exit mc13783_rtc_exit(void)
+{
+ platform_driver_unregister(&mc13783_rtc_driver);
+}
+module_exit(mc13783_rtc_exit);
+
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("RTC driver for Freescale MC13783 PMIC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
#define RTC_MONTH_OFFS 8
#define RTC_YEAR_OFFS 16
+#define RTC_ALARM_TIME_REG_OFFS 8
+#define RTC_ALARM_DATE_REG_OFFS 0xc
+#define RTC_ALARM_VALID (1 << 7)
+
+#define RTC_ALARM_INTERRUPT_MASK_REG_OFFS 0x10
+#define RTC_ALARM_INTERRUPT_CASUE_REG_OFFS 0x14
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr;
+ int irq;
};
static int mv_rtc_set_time(struct device *dev, struct rtc_time *tm)
return rtc_valid_tm(tm);
}
+static int mv_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ u32 rtc_time, rtc_date;
+ unsigned int year, month, day, hour, minute, second, wday;
+
+ rtc_time = readl(ioaddr + RTC_ALARM_TIME_REG_OFFS);
+ rtc_date = readl(ioaddr + RTC_ALARM_DATE_REG_OFFS);
+
+ second = rtc_time & 0x7f;
+ minute = (rtc_time >> RTC_MINUTES_OFFS) & 0x7f;
+ hour = (rtc_time >> RTC_HOURS_OFFS) & 0x3f; /* assume 24 hours mode */
+ wday = (rtc_time >> RTC_WDAY_OFFS) & 0x7;
+
+ day = rtc_date & 0x3f;
+ month = (rtc_date >> RTC_MONTH_OFFS) & 0x3f;
+ year = (rtc_date >> RTC_YEAR_OFFS) & 0xff;
+
+ alm->time.tm_sec = bcd2bin(second);
+ alm->time.tm_min = bcd2bin(minute);
+ alm->time.tm_hour = bcd2bin(hour);
+ alm->time.tm_mday = bcd2bin(day);
+ alm->time.tm_wday = bcd2bin(wday);
+ alm->time.tm_mon = bcd2bin(month) - 1;
+ /* hw counts from year 2000, but tm_year is relative to 1900 */
+ alm->time.tm_year = bcd2bin(year) + 100;
+
+ if (rtc_valid_tm(&alm->time) < 0) {
+ dev_err(dev, "retrieved alarm date/time is not valid.\n");
+ rtc_time_to_tm(0, &alm->time);
+ }
+
+ alm->enabled = !!readl(ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+ return 0;
+}
+
+static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ u32 rtc_reg = 0;
+
+ if (alm->time.tm_sec >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_sec))
+ << RTC_SECONDS_OFFS;
+ if (alm->time.tm_min >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_min))
+ << RTC_MINUTES_OFFS;
+ if (alm->time.tm_hour >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_hour))
+ << RTC_HOURS_OFFS;
+
+ writel(rtc_reg, ioaddr + RTC_ALARM_TIME_REG_OFFS);
+
+ if (alm->time.tm_mday >= 0)
+ rtc_reg = (RTC_ALARM_VALID | bin2bcd(alm->time.tm_mday))
+ << RTC_MDAY_OFFS;
+ else
+ rtc_reg = 0;
+
+ if (alm->time.tm_mon >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_mon + 1))
+ << RTC_MONTH_OFFS;
+
+ if (alm->time.tm_year >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_year % 100))
+ << RTC_YEAR_OFFS;
+
+ writel(rtc_reg, ioaddr + RTC_ALARM_DATE_REG_OFFS);
+ writel(0, ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS);
+ writel(alm->enabled ? 1 : 0,
+ ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+
+ return 0;
+}
+
+static int mv_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+
+ if (pdata->irq < 0)
+ return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
+ switch (cmd) {
+ case RTC_AIE_OFF:
+ writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+ break;
+ case RTC_AIE_ON:
+ writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static irqreturn_t mv_rtc_interrupt(int irq, void *data)
+{
+ struct rtc_plat_data *pdata = data;
+ void __iomem *ioaddr = pdata->ioaddr;
+
+ /* alarm irq? */
+ if (!readl(ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS))
+ return IRQ_NONE;
+
+ /* clear interrupt */
+ writel(0, ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS);
+ rtc_update_irq(pdata->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
static const struct rtc_class_ops mv_rtc_ops = {
.read_time = mv_rtc_read_time,
.set_time = mv_rtc_set_time,
};
-static int __init mv_rtc_probe(struct platform_device *pdev)
+static const struct rtc_class_ops mv_rtc_alarm_ops = {
+ .read_time = mv_rtc_read_time,
+ .set_time = mv_rtc_set_time,
+ .read_alarm = mv_rtc_read_alarm,
+ .set_alarm = mv_rtc_set_alarm,
+ .ioctl = mv_rtc_ioctl,
+};
+
+static int __devinit mv_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct rtc_plat_data *pdata;
}
}
+ pdata->irq = platform_get_irq(pdev, 0);
+
platform_set_drvdata(pdev, pdata);
- pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
- &mv_rtc_ops, THIS_MODULE);
+
+ if (pdata->irq >= 0) {
+ device_init_wakeup(&pdev->dev, 1);
+ pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &mv_rtc_alarm_ops,
+ THIS_MODULE);
+ } else
+ pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &mv_rtc_ops, THIS_MODULE);
if (IS_ERR(pdata->rtc))
return PTR_ERR(pdata->rtc);
+ if (pdata->irq >= 0) {
+ writel(0, pdata->ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+ if (devm_request_irq(&pdev->dev, pdata->irq, mv_rtc_interrupt,
+ IRQF_DISABLED | IRQF_SHARED,
+ pdev->name, pdata) < 0) {
+ dev_warn(&pdev->dev, "interrupt not available.\n");
+ pdata->irq = -1;
+ }
+ }
+
return 0;
}
{
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ if (pdata->irq >= 0)
+ device_init_wakeup(&pdev->dev, 0);
+
rtc_device_unregister(pdata->rtc);
return 0;
}
--- /dev/null
+/*
+ * Copyright (c) 2008-2009 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/bcd.h>
+
+/* RTC Control Registers */
+#define REG_RTC_INIR 0x00
+#define REG_RTC_AER 0x04
+#define REG_RTC_FCR 0x08
+#define REG_RTC_TLR 0x0C
+#define REG_RTC_CLR 0x10
+#define REG_RTC_TSSR 0x14
+#define REG_RTC_DWR 0x18
+#define REG_RTC_TAR 0x1C
+#define REG_RTC_CAR 0x20
+#define REG_RTC_LIR 0x24
+#define REG_RTC_RIER 0x28
+#define REG_RTC_RIIR 0x2C
+#define REG_RTC_TTR 0x30
+
+#define RTCSET 0x01
+#define AERRWENB 0x10000
+#define INIRRESET 0xa5eb1357
+#define AERPOWERON 0xA965
+#define AERPOWEROFF 0x0000
+#define LEAPYEAR 0x0001
+#define TICKENB 0x80
+#define TICKINTENB 0x0002
+#define ALARMINTENB 0x0001
+#define MODE24 0x0001
+
+struct nuc900_rtc {
+ int irq_num;
+ void __iomem *rtc_reg;
+ struct rtc_device *rtcdev;
+};
+
+struct nuc900_bcd_time {
+ int bcd_sec;
+ int bcd_min;
+ int bcd_hour;
+ int bcd_mday;
+ int bcd_mon;
+ int bcd_year;
+};
+
+static irqreturn_t nuc900_rtc_interrupt(int irq, void *_rtc)
+{
+ struct nuc900_rtc *rtc = _rtc;
+ unsigned long events = 0, rtc_irq;
+
+ rtc_irq = __raw_readl(rtc->rtc_reg + REG_RTC_RIIR);
+
+ if (rtc_irq & ALARMINTENB) {
+ rtc_irq &= ~ALARMINTENB;
+ __raw_writel(rtc_irq, rtc->rtc_reg + REG_RTC_RIIR);
+ events |= RTC_AF | RTC_IRQF;
+ }
+
+ if (rtc_irq & TICKINTENB) {
+ rtc_irq &= ~TICKINTENB;
+ __raw_writel(rtc_irq, rtc->rtc_reg + REG_RTC_RIIR);
+ events |= RTC_UF | RTC_IRQF;
+ }
+
+ rtc_update_irq(rtc->rtcdev, 1, events);
+
+ return IRQ_HANDLED;
+}
+
+static int *check_rtc_access_enable(struct nuc900_rtc *nuc900_rtc)
+{
+ unsigned int i;
+ __raw_writel(INIRRESET, nuc900_rtc->rtc_reg + REG_RTC_INIR);
+
+ mdelay(10);
+
+ __raw_writel(AERPOWERON, nuc900_rtc->rtc_reg + REG_RTC_AER);
+
+ for (i = 0; i < 1000; i++) {
+ if (__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_AER) & AERRWENB)
+ return 0;
+ }
+
+ if ((__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_AER) & AERRWENB) == 0x0)
+ return ERR_PTR(-ENODEV);
+
+ return ERR_PTR(-EPERM);
+}
+
+static void nuc900_rtc_bcd2bin(unsigned int timereg,
+ unsigned int calreg, struct rtc_time *tm)
+{
+ tm->tm_mday = bcd2bin(calreg >> 0);
+ tm->tm_mon = bcd2bin(calreg >> 8);
+ tm->tm_year = bcd2bin(calreg >> 16) + 100;
+
+ tm->tm_sec = bcd2bin(timereg >> 0);
+ tm->tm_min = bcd2bin(timereg >> 8);
+ tm->tm_hour = bcd2bin(timereg >> 16);
+
+ rtc_valid_tm(tm);
+}
+
+static void nuc900_rtc_bin2bcd(struct rtc_time *settm,
+ struct nuc900_bcd_time *gettm)
+{
+ gettm->bcd_mday = bin2bcd(settm->tm_mday) << 0;
+ gettm->bcd_mon = bin2bcd(settm->tm_mon) << 8;
+ gettm->bcd_year = bin2bcd(settm->tm_year - 100) << 16;
+
+ gettm->bcd_sec = bin2bcd(settm->tm_sec) << 0;
+ gettm->bcd_min = bin2bcd(settm->tm_min) << 8;
+ gettm->bcd_hour = bin2bcd(settm->tm_hour) << 16;
+}
+
+static int nuc900_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+
+ if (enabled)
+ __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)|
+ (TICKINTENB), rtc->rtc_reg + REG_RTC_RIER);
+ else
+ __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)&
+ (~TICKINTENB), rtc->rtc_reg + REG_RTC_RIER);
+
+ return 0;
+}
+
+static int nuc900_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+
+ if (enabled)
+ __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)|
+ (ALARMINTENB), rtc->rtc_reg + REG_RTC_RIER);
+ else
+ __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)&
+ (~ALARMINTENB), rtc->rtc_reg + REG_RTC_RIER);
+
+ return 0;
+}
+
+static int nuc900_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+ unsigned int timeval, clrval;
+
+ timeval = __raw_readl(rtc->rtc_reg + REG_RTC_TLR);
+ clrval = __raw_readl(rtc->rtc_reg + REG_RTC_CLR);
+
+ nuc900_rtc_bcd2bin(timeval, clrval, tm);
+
+ return 0;
+}
+
+static int nuc900_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+ struct nuc900_bcd_time gettm;
+ unsigned long val;
+ int *err;
+
+ nuc900_rtc_bin2bcd(tm, &gettm);
+
+ err = check_rtc_access_enable(rtc);
+ if (IS_ERR(err))
+ return PTR_ERR(err);
+
+ val = gettm.bcd_mday | gettm.bcd_mon | gettm.bcd_year;
+ __raw_writel(val, rtc->rtc_reg + REG_RTC_CLR);
+
+ val = gettm.bcd_sec | gettm.bcd_min | gettm.bcd_hour;
+ __raw_writel(val, rtc->rtc_reg + REG_RTC_TLR);
+
+ return 0;
+}
+
+static int nuc900_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+ unsigned int timeval, carval;
+
+ timeval = __raw_readl(rtc->rtc_reg + REG_RTC_TAR);
+ carval = __raw_readl(rtc->rtc_reg + REG_RTC_CAR);
+
+ nuc900_rtc_bcd2bin(timeval, carval, &alrm->time);
+
+ return 0;
+}
+
+static int nuc900_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+ struct nuc900_bcd_time tm;
+ unsigned long val;
+ int *err;
+
+ nuc900_rtc_bin2bcd(&alrm->time, &tm);
+
+ err = check_rtc_access_enable(rtc);
+ if (IS_ERR(err))
+ return PTR_ERR(err);
+
+ val = tm.bcd_mday | tm.bcd_mon | tm.bcd_year;
+ __raw_writel(val, rtc->rtc_reg + REG_RTC_CAR);
+
+ val = tm.bcd_sec | tm.bcd_min | tm.bcd_hour;
+ __raw_writel(val, rtc->rtc_reg + REG_RTC_TAR);
+
+ return 0;
+}
+
+static struct rtc_class_ops nuc900_rtc_ops = {
+ .read_time = nuc900_rtc_read_time,
+ .set_time = nuc900_rtc_set_time,
+ .read_alarm = nuc900_rtc_read_alarm,
+ .set_alarm = nuc900_rtc_set_alarm,
+ .alarm_irq_enable = nuc900_alarm_irq_enable,
+ .update_irq_enable = nuc900_update_irq_enable,
+};
+
+static int __devinit nuc900_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct nuc900_rtc *nuc900_rtc;
+ int err = 0;
+
+ nuc900_rtc = kzalloc(sizeof(struct nuc900_rtc), GFP_KERNEL);
+ if (!nuc900_rtc) {
+ dev_err(&pdev->dev, "kzalloc nuc900_rtc failed\n");
+ return -ENOMEM;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "platform_get_resource failed\n");
+ err = -ENXIO;
+ goto fail1;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res),
+ pdev->name)) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ err = -EBUSY;
+ goto fail1;
+ }
+
+ nuc900_rtc->rtc_reg = ioremap(res->start, resource_size(res));
+ if (!nuc900_rtc->rtc_reg) {
+ dev_err(&pdev->dev, "ioremap rtc_reg failed\n");
+ err = -ENOMEM;
+ goto fail2;
+ }
+
+ nuc900_rtc->irq_num = platform_get_irq(pdev, 0);
+ if (request_irq(nuc900_rtc->irq_num, nuc900_rtc_interrupt,
+ IRQF_DISABLED, "nuc900rtc", nuc900_rtc)) {
+ dev_err(&pdev->dev, "NUC900 RTC request irq failed\n");
+ err = -EBUSY;
+ goto fail3;
+ }
+
+ nuc900_rtc->rtcdev = rtc_device_register(pdev->name, &pdev->dev,
+ &nuc900_rtc_ops, THIS_MODULE);
+ if (IS_ERR(nuc900_rtc->rtcdev)) {
+ dev_err(&pdev->dev, "rtc device register faild\n");
+ err = PTR_ERR(nuc900_rtc->rtcdev);
+ goto fail4;
+ }
+
+ platform_set_drvdata(pdev, nuc900_rtc);
+ __raw_writel(__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_TSSR) | MODE24,
+ nuc900_rtc->rtc_reg + REG_RTC_TSSR);
+
+ return 0;
+
+fail4: free_irq(nuc900_rtc->irq_num, nuc900_rtc);
+fail3: iounmap(nuc900_rtc->rtc_reg);
+fail2: release_mem_region(res->start, resource_size(res));
+fail1: kfree(nuc900_rtc);
+ return err;
+}
+
+static int __devexit nuc900_rtc_remove(struct platform_device *pdev)
+{
+ struct nuc900_rtc *nuc900_rtc = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ rtc_device_unregister(nuc900_rtc->rtcdev);
+ free_irq(nuc900_rtc->irq_num, nuc900_rtc);
+ iounmap(nuc900_rtc->rtc_reg);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(nuc900_rtc);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver nuc900_rtc_driver = {
+ .remove = __devexit_p(nuc900_rtc_remove),
+ .driver = {
+ .name = "nuc900-rtc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init nuc900_rtc_init(void)
+{
+ return platform_driver_probe(&nuc900_rtc_driver, nuc900_rtc_probe);
+}
+
+static void __exit nuc900_rtc_exit(void)
+{
+ platform_driver_unregister(&nuc900_rtc_driver);
+}
+
+module_init(nuc900_rtc_init);
+module_exit(nuc900_rtc_exit);
+
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_DESCRIPTION("nuc910/nuc920 RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nuc900-rtc");
#define OMAP_RTC_INTERRUPTS_IT_ALARM (1<<3)
#define OMAP_RTC_INTERRUPTS_IT_TIMER (1<<2)
+static void __iomem *rtc_base;
-#define rtc_read(addr) omap_readb(OMAP_RTC_BASE + (addr))
-#define rtc_write(val, addr) omap_writeb(val, OMAP_RTC_BASE + (addr))
+#define rtc_read(addr) __raw_readb(rtc_base + (addr))
+#define rtc_write(val, addr) __raw_writeb(val, rtc_base + (addr))
/* we rely on the rtc framework to handle locking (rtc->ops_lock),
return -ENOENT;
}
- /* NOTE: using static mapping for RTC registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res && res->start != OMAP_RTC_BASE) {
- pr_debug("%s: RTC registers at %08x, expected %08x\n",
- pdev->name, (unsigned) res->start, OMAP_RTC_BASE);
+ if (!res) {
+ pr_debug("%s: RTC resource data missing\n", pdev->name);
return -ENOENT;
}
- if (res)
- mem = request_mem_region(res->start,
- res->end - res->start + 1,
- pdev->name);
- else
- mem = NULL;
+ mem = request_mem_region(res->start, resource_size(res), pdev->name);
if (!mem) {
pr_debug("%s: RTC registers at %08x are not free\n",
- pdev->name, OMAP_RTC_BASE);
+ pdev->name, res->start);
return -EBUSY;
}
+ rtc_base = ioremap(res->start, resource_size(res));
+ if (!rtc_base) {
+ pr_debug("%s: RTC registers can't be mapped\n", pdev->name);
+ goto fail;
+ }
+
rtc = rtc_device_register(pdev->name, &pdev->dev,
&omap_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
pr_debug("%s: can't register RTC device, err %ld\n",
pdev->name, PTR_ERR(rtc));
- goto fail;
+ goto fail0;
}
platform_set_drvdata(pdev, rtc);
dev_set_drvdata(&rtc->dev, mem);
dev_name(&rtc->dev), rtc)) {
pr_debug("%s: RTC timer interrupt IRQ%d already claimed\n",
pdev->name, omap_rtc_timer);
- goto fail0;
+ goto fail1;
}
- if (request_irq(omap_rtc_alarm, rtc_irq, IRQF_DISABLED,
- dev_name(&rtc->dev), rtc)) {
+ if ((omap_rtc_timer != omap_rtc_alarm) &&
+ (request_irq(omap_rtc_alarm, rtc_irq, IRQF_DISABLED,
+ dev_name(&rtc->dev), rtc))) {
pr_debug("%s: RTC alarm interrupt IRQ%d already claimed\n",
pdev->name, omap_rtc_alarm);
- goto fail1;
+ goto fail2;
}
/* On boards with split power, RTC_ON_NOFF won't reset the RTC */
return 0;
-fail1:
+fail2:
free_irq(omap_rtc_timer, NULL);
-fail0:
+fail1:
rtc_device_unregister(rtc);
+fail0:
+ iounmap(rtc_base);
fail:
release_resource(mem);
return -EIO;
rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
free_irq(omap_rtc_timer, rtc);
- free_irq(omap_rtc_alarm, rtc);
+
+ if (omap_rtc_timer != omap_rtc_alarm)
+ free_irq(omap_rtc_alarm, rtc);
release_resource(dev_get_drvdata(&rtc->dev));
rtc_device_unregister(rtc);
struct pcf50633_rtc {
int alarm_enabled;
int second_enabled;
+ int alarm_pending;
struct pcf50633 *pcf;
struct rtc_device *rtc_dev;
rtc = dev_get_drvdata(dev);
alrm->enabled = rtc->alarm_enabled;
+ alrm->pending = rtc->alarm_pending;
ret = pcf50633_read_block(rtc->pcf, PCF50633_REG_RTCSCA,
PCF50633_TI_EXTENT, &pcf_tm.time[0]);
/* Returns 0 on success */
ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSCA,
PCF50633_TI_EXTENT, &pcf_tm.time[0]);
+ if (!alrm->enabled)
+ rtc->alarm_pending = 0;
if (!alarm_masked || alrm->enabled)
pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
switch (irq) {
case PCF50633_IRQ_ALARM:
rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
+ rtc->alarm_pending = 1;
break;
case PCF50633_IRQ_SECOND:
rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF);
dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
+ i2c_set_clientdata(client, pcf8563);
+
pcf8563->rtc = rtc_device_register(pcf8563_driver.driver.name,
&client->dev, &pcf8563_rtc_ops, THIS_MODULE);
goto exit_kfree;
}
- i2c_set_clientdata(client, pcf8563);
-
return 0;
exit_kfree:
if (!pcf8583)
return -ENOMEM;
+ i2c_set_clientdata(client, pcf8583);
+
pcf8583->rtc = rtc_device_register(pcf8583_driver.driver.name,
&client->dev, &pcf8583_rtc_ops, THIS_MODULE);
goto exit_kfree;
}
- i2c_set_clientdata(client, pcf8583);
return 0;
exit_kfree:
switch (cmd) {
case RTC_AIE_OFF:
- __raw_writel(1, ldata->base + RTC_MIS);
+ writel(1, ldata->base + RTC_MIS);
return 0;
case RTC_AIE_ON:
- __raw_writel(0, ldata->base + RTC_MIS);
+ writel(0, ldata->base + RTC_MIS);
return 0;
}
{
struct pl031_local *ldata = dev_get_drvdata(dev);
- rtc_time_to_tm(__raw_readl(ldata->base + RTC_DR), tm);
+ rtc_time_to_tm(readl(ldata->base + RTC_DR), tm);
return 0;
}
struct pl031_local *ldata = dev_get_drvdata(dev);
rtc_tm_to_time(tm, &time);
- __raw_writel(time, ldata->base + RTC_LR);
+ writel(time, ldata->base + RTC_LR);
return 0;
}
{
struct pl031_local *ldata = dev_get_drvdata(dev);
- rtc_time_to_tm(__raw_readl(ldata->base + RTC_MR), &alarm->time);
- alarm->pending = __raw_readl(ldata->base + RTC_RIS);
- alarm->enabled = __raw_readl(ldata->base + RTC_IMSC);
+ rtc_time_to_tm(readl(ldata->base + RTC_MR), &alarm->time);
+ alarm->pending = readl(ldata->base + RTC_RIS);
+ alarm->enabled = readl(ldata->base + RTC_IMSC);
return 0;
}
rtc_tm_to_time(&alarm->time, &time);
- __raw_writel(time, ldata->base + RTC_MR);
- __raw_writel(!alarm->enabled, ldata->base + RTC_MIS);
+ writel(time, ldata->base + RTC_MR);
+ writel(!alarm->enabled, ldata->base + RTC_MIS);
return 0;
}
static struct amba_id pl031_ids[] __initdata = {
{
- .id = 0x00041031,
- .mask = 0x000fffff, },
+ .id = 0x00041031,
+ .mask = 0x000fffff,
+ },
{0, 0},
};
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr;
- unsigned long baseaddr;
unsigned long last_jiffies;
int irq;
unsigned int irqen;
int alrm_min;
int alrm_hour;
int alrm_mday;
+ spinlock_t lock;
};
static int stk17ta8_rtc_set_time(struct device *dev, struct rtc_time *tm)
unsigned long irqflags;
u8 flags;
- spin_lock_irqsave(&pdata->rtc->irq_lock, irqflags);
+ spin_lock_irqsave(&pdata->lock, irqflags);
flags = readb(ioaddr + RTC_FLAGS);
writeb(flags | RTC_WRITE, ioaddr + RTC_FLAGS);
writeb(pdata->irqen ? RTC_INTS_AIE : 0, ioaddr + RTC_INTERRUPTS);
readb(ioaddr + RTC_FLAGS); /* clear interrupts */
writeb(flags & ~RTC_WRITE, ioaddr + RTC_FLAGS);
- spin_unlock_irqrestore(&pdata->rtc->irq_lock, irqflags);
+ spin_unlock_irqrestore(&pdata->lock, irqflags);
}
static int stk17ta8_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
- unsigned long events = RTC_IRQF;
+ unsigned long events = 0;
+ spin_lock(&pdata->lock);
/* read and clear interrupt */
- if (!(readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF))
- return IRQ_NONE;
- if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
- events |= RTC_UF;
- else
- events |= RTC_AF;
- rtc_update_irq(pdata->rtc, 1, events);
- return IRQ_HANDLED;
+ if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF) {
+ events = RTC_IRQF;
+ if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
+ events |= RTC_UF;
+ else
+ events |= RTC_AF;
+ if (likely(pdata->rtc))
+ rtc_update_irq(pdata->rtc, 1, events);
+ }
+ spin_unlock(&pdata->lock);
+ return events ? IRQ_HANDLED : IRQ_NONE;
}
-static int stk17ta8_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
+static int stk17ta8_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq <= 0)
- return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
- switch (cmd) {
- case RTC_AIE_OFF:
- pdata->irqen &= ~RTC_AF;
- stk17ta8_rtc_update_alarm(pdata);
- break;
- case RTC_AIE_ON:
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_AF;
- stk17ta8_rtc_update_alarm(pdata);
- break;
- default:
- return -ENOIOCTLCMD;
- }
+ else
+ pdata->irqen &= ~RTC_AF;
+ stk17ta8_rtc_update_alarm(pdata);
return 0;
}
static const struct rtc_class_ops stk17ta8_rtc_ops = {
- .read_time = stk17ta8_rtc_read_time,
- .set_time = stk17ta8_rtc_set_time,
- .read_alarm = stk17ta8_rtc_read_alarm,
- .set_alarm = stk17ta8_rtc_set_alarm,
- .ioctl = stk17ta8_rtc_ioctl,
+ .read_time = stk17ta8_rtc_read_time,
+ .set_time = stk17ta8_rtc_set_time,
+ .read_alarm = stk17ta8_rtc_read_alarm,
+ .set_alarm = stk17ta8_rtc_set_alarm,
+ .alarm_irq_enable = stk17ta8_rtc_alarm_irq_enable,
};
static ssize_t stk17ta8_nvram_read(struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
struct bin_attribute *attr, char *buf,
loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
static int __devinit stk17ta8_rtc_probe(struct platform_device *pdev)
{
- struct rtc_device *rtc;
struct resource *res;
unsigned int cal;
unsigned int flags;
struct rtc_plat_data *pdata;
- void __iomem *ioaddr = NULL;
+ void __iomem *ioaddr;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- pdata->baseaddr = res->start;
- ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE);
- if (!ioaddr) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
+ pdev->name))
+ return -EBUSY;
+ ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
+ if (!ioaddr)
+ return -ENOMEM;
pdata->ioaddr = ioaddr;
pdata->irq = platform_get_irq(pdev, 0);
if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_PF)
dev_warn(&pdev->dev, "voltage-low detected.\n");
+ spin_lock_init(&pdata->lock);
+ pdata->last_jiffies = jiffies;
+ platform_set_drvdata(pdev, pdata);
if (pdata->irq > 0) {
writeb(0, ioaddr + RTC_INTERRUPTS);
- if (request_irq(pdata->irq, stk17ta8_rtc_interrupt,
+ if (devm_request_irq(&pdev->dev, pdata->irq,
+ stk17ta8_rtc_interrupt,
IRQF_DISABLED | IRQF_SHARED,
pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
}
}
- rtc = rtc_device_register(pdev->name, &pdev->dev,
+ pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
&stk17ta8_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto out;
- }
- pdata->rtc = rtc;
- pdata->last_jiffies = jiffies;
- platform_set_drvdata(pdev, pdata);
+ if (IS_ERR(pdata->rtc))
+ return PTR_ERR(pdata->rtc);
+
ret = sysfs_create_bin_file(&pdev->dev.kobj, &stk17ta8_nvram_attr);
if (ret)
- goto out;
- return 0;
- out:
- if (pdata->rtc)
rtc_device_unregister(pdata->rtc);
- if (pdata->irq > 0)
- free_irq(pdata->irq, pdev);
- if (ioaddr)
- iounmap(ioaddr);
- if (pdata->baseaddr)
- release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
- kfree(pdata);
return ret;
}
sysfs_remove_bin_file(&pdev->dev.kobj, &stk17ta8_nvram_attr);
rtc_device_unregister(pdata->rtc);
- if (pdata->irq > 0) {
+ if (pdata->irq > 0)
writeb(0, pdata->ioaddr + RTC_INTERRUPTS);
- free_irq(pdata->irq, pdev);
- }
- iounmap(pdata->ioaddr);
- release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
- kfree(pdata);
return 0;
}
struct tx4939rtc_plat_data {
struct rtc_device *rtc;
struct tx4939_rtc_reg __iomem *rtcreg;
+ spinlock_t lock;
};
static struct tx4939rtc_plat_data *get_tx4939rtc_plat_data(struct device *dev)
buf[3] = secs >> 8;
buf[4] = secs >> 16;
buf[5] = secs >> 24;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
__raw_writel(0, &rtcreg->adr);
for (i = 0; i < 6; i++)
__raw_writel(buf[i], &rtcreg->dat);
ret = tx4939_rtc_cmd(rtcreg,
TX4939_RTCCTL_COMMAND_SETTIME |
(__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return ret;
}
unsigned long sec;
unsigned char buf[6];
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
ret = tx4939_rtc_cmd(rtcreg,
TX4939_RTCCTL_COMMAND_GETTIME |
(__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
if (ret) {
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return ret;
}
__raw_writel(2, &rtcreg->adr);
for (i = 2; i < 6; i++)
buf[i] = __raw_readl(&rtcreg->dat);
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
rtc_time_to_tm(sec, tm);
return rtc_valid_tm(tm);
buf[3] = sec >> 8;
buf[4] = sec >> 16;
buf[5] = sec >> 24;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
__raw_writel(0, &rtcreg->adr);
for (i = 0; i < 6; i++)
__raw_writel(buf[i], &rtcreg->dat);
ret = tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_SETALARM |
(alrm->enabled ? TX4939_RTCCTL_ALME : 0));
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return ret;
}
unsigned char buf[6];
u32 ctl;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
ret = tx4939_rtc_cmd(rtcreg,
TX4939_RTCCTL_COMMAND_GETALARM |
(__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
if (ret) {
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return ret;
}
__raw_writel(2, &rtcreg->adr);
ctl = __raw_readl(&rtcreg->ctl);
alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0;
alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0;
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
rtc_time_to_tm(sec, &alrm->time);
return rtc_valid_tm(&alrm->time);
{
struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev);
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
tx4939_rtc_cmd(pdata->rtcreg,
TX4939_RTCCTL_COMMAND_NOP |
(enabled ? TX4939_RTCCTL_ALME : 0));
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return 0;
}
struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
unsigned long events = RTC_IRQF;
- spin_lock(&pdata->rtc->irq_lock);
+ spin_lock(&pdata->lock);
if (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALMD) {
events |= RTC_AF;
tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_NOP);
}
- spin_unlock(&pdata->rtc->irq_lock);
- rtc_update_irq(pdata->rtc, 1, events);
+ spin_unlock(&pdata->lock);
+ if (likely(pdata->rtc))
+ rtc_update_irq(pdata->rtc, 1, events);
return IRQ_HANDLED;
}
struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
ssize_t count;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE;
count++, size--) {
__raw_writel(pos++, &rtcreg->adr);
*buf++ = __raw_readl(&rtcreg->dat);
}
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return count;
}
struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
ssize_t count;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE;
count++, size--) {
__raw_writel(pos++, &rtcreg->adr);
__raw_writel(*buf++, &rtcreg->dat);
}
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return count;
}
if (!pdata->rtcreg)
return -EBUSY;
+ spin_lock_init(&pdata->lock);
tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
if (devm_request_irq(&pdev->dev, irq, tx4939_rtc_interrupt,
IRQF_DISABLED, pdev->name, &pdev->dev) < 0)
static int __exit tx4939_rtc_remove(struct platform_device *pdev)
{
struct tx4939rtc_plat_data *pdata = platform_get_drvdata(pdev);
- struct rtc_device *rtc = pdata->rtc;
- spin_lock_irq(&rtc->irq_lock);
- tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
- spin_unlock_irq(&rtc->irq_lock);
sysfs_remove_bin_file(&pdev->dev.kobj, &tx4939_rtc_nvram_attr);
- rtc_device_unregister(rtc);
- platform_set_drvdata(pdev, NULL);
+ rtc_device_unregister(pdata->rtc);
+ spin_lock_irq(&pdata->lock);
+ tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
+ spin_unlock_irq(&pdata->lock);
return 0;
}
{
struct v3020_platform_data *pdata = pdev->dev.platform_data;
struct v3020 *chip;
- struct rtc_device *rtc;
int retval = -EBUSY;
int i;
int temp;
platform_set_drvdata(pdev, chip);
- rtc = rtc_device_register("v3020",
+ chip->rtc = rtc_device_register("v3020",
&pdev->dev, &v3020_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- retval = PTR_ERR(rtc);
+ if (IS_ERR(chip->rtc)) {
+ retval = PTR_ERR(chip->rtc);
goto err_io;
}
- chip->rtc = rtc;
return 0;
if (!res)
return -EBUSY;
- rtc1_base = ioremap(res->start, res->end - res->start + 1);
+ rtc1_base = ioremap(res->start, resource_size(res));
if (!rtc1_base)
return -EBUSY;
goto err_rtc1_iounmap;
}
- rtc2_base = ioremap(res->start, res->end - res->start + 1);
+ rtc2_base = ioremap(res->start, resource_size(res));
if (!rtc2_base) {
retval = -EBUSY;
goto err_rtc1_iounmap;
};
#ifdef CONFIG_PM
-static int wm8350_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+static int wm8350_rtc_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
int ret = 0;
u16 reg;
return ret;
}
-static int wm8350_rtc_resume(struct platform_device *pdev)
+static int wm8350_rtc_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
int ret;
return 0;
}
+static struct dev_pm_ops wm8350_rtc_pm_ops = {
+ .suspend = wm8350_rtc_suspend,
+ .resume = wm8350_rtc_resume,
+};
+
static struct platform_driver wm8350_rtc_driver = {
.probe = wm8350_rtc_probe,
.remove = __devexit_p(wm8350_rtc_remove),
- .suspend = wm8350_rtc_suspend,
- .resume = wm8350_rtc_resume,
.driver = {
.name = "wm8350-rtc",
+ .pm = &wm8350_rtc_pm_ops,
},
};
}
static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
- int datetoo, u8 reg_base, unsigned char alm_enable)
+ u8 reg_base, unsigned char alm_enable)
{
- int i, xfer, nbytes;
- unsigned char buf[8];
+ int i, xfer;
unsigned char rdata[10] = { 0, reg_base };
+ unsigned char *buf = rdata + 2;
static const unsigned char wel[3] = { 0, X1205_REG_SR,
X1205_SR_WEL };
static const unsigned char diswe[3] = { 0, X1205_REG_SR, 0 };
dev_dbg(&client->dev,
- "%s: secs=%d, mins=%d, hours=%d\n",
- __func__,
- tm->tm_sec, tm->tm_min, tm->tm_hour);
+ "%s: sec=%d min=%d hour=%d mday=%d mon=%d year=%d wday=%d\n",
+ __func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday,
+ tm->tm_mon, tm->tm_year, tm->tm_wday);
buf[CCR_SEC] = bin2bcd(tm->tm_sec);
buf[CCR_MIN] = bin2bcd(tm->tm_min);
/* set hour and 24hr bit */
buf[CCR_HOUR] = bin2bcd(tm->tm_hour) | X1205_HR_MIL;
- /* should we also set the date? */
- if (datetoo) {
- dev_dbg(&client->dev,
- "%s: mday=%d, mon=%d, year=%d, wday=%d\n",
- __func__,
- tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
+ buf[CCR_MDAY] = bin2bcd(tm->tm_mday);
- buf[CCR_MDAY] = bin2bcd(tm->tm_mday);
+ /* month, 1 - 12 */
+ buf[CCR_MONTH] = bin2bcd(tm->tm_mon + 1);
- /* month, 1 - 12 */
- buf[CCR_MONTH] = bin2bcd(tm->tm_mon + 1);
-
- /* year, since the rtc epoch*/
- buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100);
- buf[CCR_WDAY] = tm->tm_wday & 0x07;
- buf[CCR_Y2K] = bin2bcd((tm->tm_year + 1900) / 100);
- }
+ /* year, since the rtc epoch*/
+ buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100);
+ buf[CCR_WDAY] = tm->tm_wday & 0x07;
+ buf[CCR_Y2K] = bin2bcd((tm->tm_year + 1900) / 100);
/* If writing alarm registers, set compare bits on registers 0-4 */
if (reg_base < X1205_CCR_BASE)
return -EIO;
}
-
- /* write register's data */
- if (datetoo)
- nbytes = 8;
- else
- nbytes = 3;
- for (i = 0; i < nbytes; i++)
- rdata[2+i] = buf[i];
-
- xfer = i2c_master_send(client, rdata, nbytes+2);
- if (xfer != nbytes+2) {
+ xfer = i2c_master_send(client, rdata, sizeof(rdata));
+ if (xfer != sizeof(rdata)) {
dev_err(&client->dev,
"%s: result=%d addr=%02x, data=%02x\n",
__func__,
memset(&tm, 0, sizeof(tm));
- err = x1205_set_datetime(client, &tm, 1, X1205_CCR_BASE, 0);
+ err = x1205_set_datetime(client, &tm, X1205_CCR_BASE, 0);
if (err < 0)
dev_err(&client->dev, "unable to restart the oscillator\n");
static int x1205_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
return x1205_set_datetime(to_i2c_client(dev),
- &alrm->time, 1, X1205_ALM0_BASE, alrm->enabled);
+ &alrm->time, X1205_ALM0_BASE, alrm->enabled);
}
static int x1205_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int x1205_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
return x1205_set_datetime(to_i2c_client(dev),
- tm, 1, X1205_CCR_BASE, 0);
+ tm, X1205_CCR_BASE, 0);
}
static int x1205_rtc_proc(struct device *dev, struct seq_file *seq)
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/io.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
#include <asm/irq.h>
#include <asm/system.h>
struct isp1362_ep *ep, u16 len)
{
int ptd_offset = -EINVAL;
- int index;
int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
- int found = -1;
- int last = -1;
+ int found;
BUG_ON(len > epq->buf_size);
epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
BUG_ON(ep->num_ptds != 0);
- for (index = 0; index <= epq->buf_count - num_ptds; index++) {
- if (test_bit(index, &epq->buf_map))
- continue;
- found = index;
- for (last = index + 1; last < index + num_ptds; last++) {
- if (test_bit(last, &epq->buf_map)) {
- found = -1;
- break;
- }
- }
- if (found >= 0)
- break;
- }
- if (found < 0)
+ found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
+ num_ptds, 0);
+ if (found >= epq->buf_count)
return -EOVERFLOW;
DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
epq->buf_avail -= num_ptds;
BUG_ON(epq->buf_avail > epq->buf_count);
ep->ptd_index = found;
- for (index = found; index < last; index++)
- __set_bit(index, &epq->buf_map);
+ bitmap_set(&epq->buf_map, found, num_ptds);
DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
__func__, epq->name, ep->ptd_index, ep->ptd_offset,
epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
This display is a QVGA 320x240 24-bit RGB display interfaced by an 8-bit wide PPI
It uses PPI[0..7] PPI_FS1, PPI_FS2 and PPI_CLK.
+config FB_BFIN_LQ035Q1
+ tristate "SHARP LQ035Q1DH02 TFT LCD"
+ depends on FB && BLACKFIN && SPI
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select BFIN_GPTIMERS
+ help
+ This is the framebuffer device driver for a SHARP LQ035Q1DH02 TFT display found on
+ the Blackfin Landscape LCD EZ-Extender Card.
+ This display is a QVGA 320x240 18-bit RGB display interfaced by an 16-bit wide PPI
+ It uses PPI[0..15] PPI_FS1, PPI_FS2 and PPI_CLK.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bfin-lq035q1-fb.
config FB_STI
tristate "HP STI frame buffer device support"
obj-$(CONFIG_FB_VGA16) += vga16fb.o
obj-$(CONFIG_FB_OF) += offb.o
obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o
+obj-$(CONFIG_FB_BFIN_LQ035Q1) += bfin-lq035q1-fb.o
obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
obj-$(CONFIG_FB_MX3) += mx3fb.o
obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
if (regno > 255)
return 1;
+ if (regno > 255)
+ return 1;
+
switch (external_card_type) {
case IS_VGA:
OUTB(0x3c8, regno);
--- /dev/null
+/*
+ * Blackfin LCD Framebuffer driver SHARP LQ035Q1DH02
+ *
+ * Copyright 2008-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#define DRIVER_NAME "bfin-lq035q1"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/backlight.h>
+#include <linux/lcd.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/blackfin.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+#include <asm/gptimers.h>
+
+#include <asm/bfin-lq035q1.h>
+
+#if defined(BF533_FAMILY) || defined(BF538_FAMILY)
+#define TIMER_HSYNC_id TIMER1_id
+#define TIMER_HSYNCbit TIMER1bit
+#define TIMER_HSYNC_STATUS_TRUN TIMER_STATUS_TRUN1
+#define TIMER_HSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL1
+#define TIMER_HSYNC_STATUS_TOVF TIMER_STATUS_TOVF1
+
+#define TIMER_VSYNC_id TIMER2_id
+#define TIMER_VSYNCbit TIMER2bit
+#define TIMER_VSYNC_STATUS_TRUN TIMER_STATUS_TRUN2
+#define TIMER_VSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL2
+#define TIMER_VSYNC_STATUS_TOVF TIMER_STATUS_TOVF2
+#else
+#define TIMER_HSYNC_id TIMER0_id
+#define TIMER_HSYNCbit TIMER0bit
+#define TIMER_HSYNC_STATUS_TRUN TIMER_STATUS_TRUN0
+#define TIMER_HSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL0
+#define TIMER_HSYNC_STATUS_TOVF TIMER_STATUS_TOVF0
+
+#define TIMER_VSYNC_id TIMER1_id
+#define TIMER_VSYNCbit TIMER1bit
+#define TIMER_VSYNC_STATUS_TRUN TIMER_STATUS_TRUN1
+#define TIMER_VSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL1
+#define TIMER_VSYNC_STATUS_TOVF TIMER_STATUS_TOVF1
+#endif
+
+#define LCD_X_RES 320 /* Horizontal Resolution */
+#define LCD_Y_RES 240 /* Vertical Resolution */
+#define DMA_BUS_SIZE 16
+
+#define USE_RGB565_16_BIT_PPI
+
+#ifdef USE_RGB565_16_BIT_PPI
+#define LCD_BPP 16 /* Bit Per Pixel */
+#define CLOCKS_PER_PIX 1
+#define CPLD_PIPELINE_DELAY_COR 0 /* NO CPLB */
+#endif
+
+/* Interface 16/18-bit TFT over an 8-bit wide PPI using a small Programmable Logic Device (CPLD)
+ * http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165
+ */
+
+#ifdef USE_RGB565_8_BIT_PPI
+#define LCD_BPP 16 /* Bit Per Pixel */
+#define CLOCKS_PER_PIX 2
+#define CPLD_PIPELINE_DELAY_COR 3 /* RGB565 */
+#endif
+
+#ifdef USE_RGB888_8_BIT_PPI
+#define LCD_BPP 24 /* Bit Per Pixel */
+#define CLOCKS_PER_PIX 3
+#define CPLD_PIPELINE_DELAY_COR 5 /* RGB888 */
+#endif
+
+ /*
+ * HS and VS timing parameters (all in number of PPI clk ticks)
+ */
+
+#define U_LINE 4 /* Blanking Lines */
+
+#define H_ACTPIX (LCD_X_RES * CLOCKS_PER_PIX) /* active horizontal pixel */
+#define H_PERIOD (336 * CLOCKS_PER_PIX) /* HS period */
+#define H_PULSE (2 * CLOCKS_PER_PIX) /* HS pulse width */
+#define H_START (7 * CLOCKS_PER_PIX + CPLD_PIPELINE_DELAY_COR) /* first valid pixel */
+
+#define V_LINES (LCD_Y_RES + U_LINE) /* total vertical lines */
+#define V_PULSE (2 * CLOCKS_PER_PIX) /* VS pulse width (1-5 H_PERIODs) */
+#define V_PERIOD (H_PERIOD * V_LINES) /* VS period */
+
+#define ACTIVE_VIDEO_MEM_OFFSET ((U_LINE / 2) * LCD_X_RES * (LCD_BPP / 8))
+
+#define BFIN_LCD_NBR_PALETTE_ENTRIES 256
+
+#define PPI_TX_MODE 0x2
+#define PPI_XFER_TYPE_11 0xC
+#define PPI_PORT_CFG_01 0x10
+#define PPI_POLS_1 0x8000
+
+#if (CLOCKS_PER_PIX > 1)
+#define PPI_PMODE (DLEN_8 | PACK_EN)
+#else
+#define PPI_PMODE (DLEN_16)
+#endif
+
+#define LQ035_INDEX 0x74
+#define LQ035_DATA 0x76
+
+#define LQ035_DRIVER_OUTPUT_CTL 0x1
+#define LQ035_SHUT_CTL 0x11
+
+#define LQ035_DRIVER_OUTPUT_MASK (LQ035_LR | LQ035_TB | LQ035_BGR | LQ035_REV)
+#define LQ035_DRIVER_OUTPUT_DEFAULT (0x2AEF & ~LQ035_DRIVER_OUTPUT_MASK)
+
+#define LQ035_SHUT (1 << 0) /* Shutdown */
+#define LQ035_ON (0 << 0) /* Shutdown */
+
+struct bfin_lq035q1fb_info {
+ struct fb_info *fb;
+ struct device *dev;
+ struct spi_driver spidrv;
+ struct bfin_lq035q1fb_disp_info *disp_info;
+ unsigned char *fb_buffer; /* RGB Buffer */
+ dma_addr_t dma_handle;
+ int lq035_open_cnt;
+ int irq;
+ spinlock_t lock; /* lock */
+ u32 pseudo_pal[16];
+};
+
+static int nocursor;
+module_param(nocursor, int, 0644);
+MODULE_PARM_DESC(nocursor, "cursor enable/disable");
+
+struct spi_control {
+ unsigned short mode;
+};
+
+static int lq035q1_control(struct spi_device *spi, unsigned char reg, unsigned short value)
+{
+ int ret;
+ u8 regs[3] = { LQ035_INDEX, 0, 0 };
+ u8 dat[3] = { LQ035_DATA, 0, 0 };
+
+ if (!spi)
+ return -ENODEV;
+
+ regs[2] = reg;
+ dat[1] = value >> 8;
+ dat[2] = value & 0xFF;
+
+ ret = spi_write(spi, regs, ARRAY_SIZE(regs));
+ ret |= spi_write(spi, dat, ARRAY_SIZE(dat));
+ return ret;
+}
+
+static int __devinit lq035q1_spidev_probe(struct spi_device *spi)
+{
+ int ret;
+ struct spi_control *ctl;
+ struct bfin_lq035q1fb_info *info = container_of(spi->dev.driver,
+ struct bfin_lq035q1fb_info,
+ spidrv.driver);
+
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+
+ if (!ctl)
+ return -ENOMEM;
+
+ ctl->mode = (info->disp_info->mode &
+ LQ035_DRIVER_OUTPUT_MASK) | LQ035_DRIVER_OUTPUT_DEFAULT;
+
+ ret = lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_ON);
+ ret |= lq035q1_control(spi, LQ035_DRIVER_OUTPUT_CTL, ctl->mode);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, ctl);
+
+ return 0;
+}
+
+static int lq035q1_spidev_remove(struct spi_device *spi)
+{
+ return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT);
+}
+
+#ifdef CONFIG_PM
+static int lq035q1_spidev_suspend(struct spi_device *spi, pm_message_t state)
+{
+ return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT);
+}
+
+static int lq035q1_spidev_resume(struct spi_device *spi)
+{
+ int ret;
+ struct spi_control *ctl = spi_get_drvdata(spi);
+
+ ret = lq035q1_control(spi, LQ035_DRIVER_OUTPUT_CTL, ctl->mode);
+ if (ret)
+ return ret;
+
+ return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_ON);
+}
+#else
+# define lq035q1_spidev_suspend NULL
+# define lq035q1_spidev_resume NULL
+#endif
+
+/* Power down all displays on reboot, poweroff or halt */
+static void lq035q1_spidev_shutdown(struct spi_device *spi)
+{
+ lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT);
+}
+
+static int lq035q1_backlight(struct bfin_lq035q1fb_info *info, unsigned arg)
+{
+ if (info->disp_info->use_bl)
+ gpio_set_value(info->disp_info->gpio_bl, arg);
+
+ return 0;
+}
+
+static void bfin_lq035q1_config_ppi(struct bfin_lq035q1fb_info *fbi)
+{
+ bfin_write_PPI_DELAY(H_START);
+ bfin_write_PPI_COUNT(H_ACTPIX - 1);
+ bfin_write_PPI_FRAME(V_LINES);
+
+ bfin_write_PPI_CONTROL(PPI_TX_MODE | /* output mode , PORT_DIR */
+ PPI_XFER_TYPE_11 | /* sync mode XFR_TYPE */
+ PPI_PORT_CFG_01 | /* two frame sync PORT_CFG */
+ PPI_PMODE | /* 8/16 bit data length / PACK_EN? */
+ PPI_POLS_1); /* faling edge syncs POLS */
+}
+
+static inline void bfin_lq035q1_disable_ppi(void)
+{
+ bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() & ~PORT_EN);
+}
+
+static inline void bfin_lq035q1_enable_ppi(void)
+{
+ bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() | PORT_EN);
+}
+
+static void bfin_lq035q1_start_timers(void)
+{
+ enable_gptimers(TIMER_VSYNCbit | TIMER_HSYNCbit);
+}
+
+static void bfin_lq035q1_stop_timers(void)
+{
+ disable_gptimers(TIMER_HSYNCbit | TIMER_VSYNCbit);
+
+ set_gptimer_status(0, TIMER_HSYNC_STATUS_TRUN | TIMER_VSYNC_STATUS_TRUN |
+ TIMER_HSYNC_STATUS_TIMIL | TIMER_VSYNC_STATUS_TIMIL |
+ TIMER_HSYNC_STATUS_TOVF | TIMER_VSYNC_STATUS_TOVF);
+
+}
+
+static void bfin_lq035q1_init_timers(void)
+{
+
+ bfin_lq035q1_stop_timers();
+
+ set_gptimer_period(TIMER_HSYNC_id, H_PERIOD);
+ set_gptimer_pwidth(TIMER_HSYNC_id, H_PULSE);
+ set_gptimer_config(TIMER_HSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
+ TIMER_TIN_SEL | TIMER_CLK_SEL|
+ TIMER_EMU_RUN);
+
+ set_gptimer_period(TIMER_VSYNC_id, V_PERIOD);
+ set_gptimer_pwidth(TIMER_VSYNC_id, V_PULSE);
+ set_gptimer_config(TIMER_VSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
+ TIMER_TIN_SEL | TIMER_CLK_SEL |
+ TIMER_EMU_RUN);
+
+}
+
+static void bfin_lq035q1_config_dma(struct bfin_lq035q1fb_info *fbi)
+{
+
+ set_dma_config(CH_PPI,
+ set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO,
+ INTR_DISABLE, DIMENSION_2D,
+ DATA_SIZE_16,
+ DMA_NOSYNC_KEEP_DMA_BUF));
+ set_dma_x_count(CH_PPI, (LCD_X_RES * LCD_BPP) / DMA_BUS_SIZE);
+ set_dma_x_modify(CH_PPI, DMA_BUS_SIZE / 8);
+ set_dma_y_count(CH_PPI, V_LINES);
+
+ set_dma_y_modify(CH_PPI, DMA_BUS_SIZE / 8);
+ set_dma_start_addr(CH_PPI, (unsigned long)fbi->fb_buffer);
+
+}
+
+#if (CLOCKS_PER_PIX == 1)
+static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
+ P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
+ P_PPI0_D6, P_PPI0_D7, P_PPI0_D8,
+ P_PPI0_D9, P_PPI0_D10, P_PPI0_D11,
+ P_PPI0_D12, P_PPI0_D13, P_PPI0_D14,
+ P_PPI0_D15, 0};
+#else
+static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
+ P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
+ P_PPI0_D6, P_PPI0_D7, 0};
+#endif
+
+static inline void bfin_lq035q1_free_ports(void)
+{
+ peripheral_free_list(ppi0_req_16);
+ if (ANOMALY_05000400)
+ gpio_free(P_IDENT(P_PPI0_FS3));
+}
+
+static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev)
+{
+ /* ANOMALY_05000400 - PPI Does Not Start Properly In Specific Mode:
+ * Drive PPI_FS3 Low
+ */
+ if (ANOMALY_05000400) {
+ int ret = gpio_request(P_IDENT(P_PPI0_FS3), "PPI_FS3");
+ if (ret)
+ return ret;
+ gpio_direction_output(P_IDENT(P_PPI0_FS3), 0);
+ }
+
+ if (peripheral_request_list(ppi0_req_16, DRIVER_NAME)) {
+ dev_err(&pdev->dev, "requesting peripherals failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int bfin_lq035q1_fb_open(struct fb_info *info, int user)
+{
+ struct bfin_lq035q1fb_info *fbi = info->par;
+
+ spin_lock(&fbi->lock);
+ fbi->lq035_open_cnt++;
+
+ if (fbi->lq035_open_cnt <= 1) {
+
+ bfin_lq035q1_disable_ppi();
+ SSYNC();
+
+ bfin_lq035q1_config_dma(fbi);
+ bfin_lq035q1_config_ppi(fbi);
+ bfin_lq035q1_init_timers();
+
+ /* start dma */
+ enable_dma(CH_PPI);
+ bfin_lq035q1_enable_ppi();
+ bfin_lq035q1_start_timers();
+ lq035q1_backlight(fbi, 1);
+ }
+
+ spin_unlock(&fbi->lock);
+
+ return 0;
+}
+
+static int bfin_lq035q1_fb_release(struct fb_info *info, int user)
+{
+ struct bfin_lq035q1fb_info *fbi = info->par;
+
+ spin_lock(&fbi->lock);
+
+ fbi->lq035_open_cnt--;
+
+ if (fbi->lq035_open_cnt <= 0) {
+ lq035q1_backlight(fbi, 0);
+ bfin_lq035q1_disable_ppi();
+ SSYNC();
+ disable_dma(CH_PPI);
+ bfin_lq035q1_stop_timers();
+ }
+
+ spin_unlock(&fbi->lock);
+
+ return 0;
+}
+
+static int bfin_lq035q1_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ switch (var->bits_per_pixel) {
+#if (LCD_BPP == 24)
+ case 24:/* TRUECOLOUR, 16m */
+#else
+ case 16:/* DIRECTCOLOUR, 64k */
+#endif
+ var->red.offset = info->var.red.offset;
+ var->green.offset = info->var.green.offset;
+ var->blue.offset = info->var.blue.offset;
+ var->red.length = info->var.red.length;
+ var->green.length = info->var.green.length;
+ var->blue.length = info->var.blue.length;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ var->transp.msb_right = 0;
+ var->red.msb_right = 0;
+ var->green.msb_right = 0;
+ var->blue.msb_right = 0;
+ break;
+ default:
+ pr_debug("%s: depth not supported: %u BPP\n", __func__,
+ var->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ if (info->var.xres != var->xres || info->var.yres != var->yres ||
+ info->var.xres_virtual != var->xres_virtual ||
+ info->var.yres_virtual != var->yres_virtual) {
+ pr_debug("%s: Resolution not supported: X%u x Y%u \n",
+ __func__, var->xres, var->yres);
+ return -EINVAL;
+ }
+
+ /*
+ * Memory limit
+ */
+
+ if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
+ pr_debug("%s: Memory Limit requested yres_virtual = %u\n",
+ __func__, var->yres_virtual);
+ return -ENOMEM;
+ }
+
+
+ return 0;
+}
+
+int bfin_lq035q1_fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
+{
+ if (nocursor)
+ return 0;
+ else
+ return -EINVAL; /* just to force soft_cursor() call */
+}
+
+static int bfin_lq035q1_fb_setcolreg(u_int regno, u_int red, u_int green,
+ u_int blue, u_int transp,
+ struct fb_info *info)
+{
+ if (regno >= BFIN_LCD_NBR_PALETTE_ENTRIES)
+ return -EINVAL;
+
+ if (info->var.grayscale) {
+ /* grayscale = 0.30*R + 0.59*G + 0.11*B */
+ red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
+ }
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+
+ u32 value;
+ /* Place color in the pseudopalette */
+ if (regno > 16)
+ return -EINVAL;
+
+ red >>= (16 - info->var.red.length);
+ green >>= (16 - info->var.green.length);
+ blue >>= (16 - info->var.blue.length);
+
+ value = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset);
+ value &= 0xFFFFFF;
+
+ ((u32 *) (info->pseudo_palette))[regno] = value;
+
+ }
+
+ return 0;
+}
+
+static struct fb_ops bfin_lq035q1_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = bfin_lq035q1_fb_open,
+ .fb_release = bfin_lq035q1_fb_release,
+ .fb_check_var = bfin_lq035q1_fb_check_var,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_cursor = bfin_lq035q1_fb_cursor,
+ .fb_setcolreg = bfin_lq035q1_fb_setcolreg,
+};
+
+static irqreturn_t bfin_lq035q1_irq_error(int irq, void *dev_id)
+{
+ /*struct bfin_lq035q1fb_info *info = (struct bfin_lq035q1fb_info *)dev_id;*/
+
+ u16 status = bfin_read_PPI_STATUS();
+ bfin_write_PPI_STATUS(-1);
+
+ if (status) {
+ bfin_lq035q1_disable_ppi();
+ disable_dma(CH_PPI);
+
+ /* start dma */
+ enable_dma(CH_PPI);
+ bfin_lq035q1_enable_ppi();
+ bfin_write_PPI_STATUS(-1);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
+{
+ struct bfin_lq035q1fb_info *info;
+ struct fb_info *fbinfo;
+ int ret;
+
+ ret = request_dma(CH_PPI, DRIVER_NAME"_CH_PPI");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "PPI DMA unavailable\n");
+ goto out1;
+ }
+
+ fbinfo = framebuffer_alloc(sizeof(*info), &pdev->dev);
+ if (!fbinfo) {
+ ret = -ENOMEM;
+ goto out2;
+ }
+
+ info = fbinfo->par;
+ info->fb = fbinfo;
+ info->dev = &pdev->dev;
+
+ info->disp_info = pdev->dev.platform_data;
+
+ platform_set_drvdata(pdev, fbinfo);
+
+ strcpy(fbinfo->fix.id, DRIVER_NAME);
+
+ fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
+ fbinfo->fix.type_aux = 0;
+ fbinfo->fix.xpanstep = 0;
+ fbinfo->fix.ypanstep = 0;
+ fbinfo->fix.ywrapstep = 0;
+ fbinfo->fix.accel = FB_ACCEL_NONE;
+ fbinfo->fix.visual = FB_VISUAL_TRUECOLOR;
+
+ fbinfo->var.nonstd = 0;
+ fbinfo->var.activate = FB_ACTIVATE_NOW;
+ fbinfo->var.height = -1;
+ fbinfo->var.width = -1;
+ fbinfo->var.accel_flags = 0;
+ fbinfo->var.vmode = FB_VMODE_NONINTERLACED;
+
+ fbinfo->var.xres = LCD_X_RES;
+ fbinfo->var.xres_virtual = LCD_X_RES;
+ fbinfo->var.yres = LCD_Y_RES;
+ fbinfo->var.yres_virtual = LCD_Y_RES;
+ fbinfo->var.bits_per_pixel = LCD_BPP;
+
+ if (info->disp_info->mode & LQ035_BGR) {
+#if (LCD_BPP == 24)
+ fbinfo->var.red.offset = 0;
+ fbinfo->var.green.offset = 8;
+ fbinfo->var.blue.offset = 16;
+#else
+ fbinfo->var.red.offset = 0;
+ fbinfo->var.green.offset = 5;
+ fbinfo->var.blue.offset = 11;
+#endif
+ } else {
+#if (LCD_BPP == 24)
+ fbinfo->var.red.offset = 16;
+ fbinfo->var.green.offset = 8;
+ fbinfo->var.blue.offset = 0;
+#else
+ fbinfo->var.red.offset = 11;
+ fbinfo->var.green.offset = 5;
+ fbinfo->var.blue.offset = 0;
+#endif
+ }
+
+ fbinfo->var.transp.offset = 0;
+
+#if (LCD_BPP == 24)
+ fbinfo->var.red.length = 8;
+ fbinfo->var.green.length = 8;
+ fbinfo->var.blue.length = 8;
+#else
+ fbinfo->var.red.length = 5;
+ fbinfo->var.green.length = 6;
+ fbinfo->var.blue.length = 5;
+#endif
+
+ fbinfo->var.transp.length = 0;
+
+ fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * LCD_BPP / 8
+ + ACTIVE_VIDEO_MEM_OFFSET;
+
+ fbinfo->fix.line_length = fbinfo->var.xres_virtual *
+ fbinfo->var.bits_per_pixel / 8;
+
+
+ fbinfo->fbops = &bfin_lq035q1_fb_ops;
+ fbinfo->flags = FBINFO_FLAG_DEFAULT;
+
+ info->fb_buffer =
+ dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle,
+ GFP_KERNEL);
+
+ if (NULL == info->fb_buffer) {
+ dev_err(&pdev->dev, "couldn't allocate dma buffer\n");
+ ret = -ENOMEM;
+ goto out3;
+ }
+
+ fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
+ fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
+
+ fbinfo->fbops = &bfin_lq035q1_fb_ops;
+
+ fbinfo->pseudo_palette = &info->pseudo_pal;
+
+ ret = fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate colormap (%d entries)\n",
+ BFIN_LCD_NBR_PALETTE_ENTRIES);
+ goto out4;
+ }
+
+ ret = bfin_lq035q1_request_ports(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't request gpio port\n");
+ goto out6;
+ }
+
+ info->irq = platform_get_irq(pdev, 0);
+ if (info->irq < 0) {
+ ret = -EINVAL;
+ goto out7;
+ }
+
+ ret = request_irq(info->irq, bfin_lq035q1_irq_error, IRQF_DISABLED,
+ DRIVER_NAME" PPI ERROR", info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to request PPI ERROR IRQ\n");
+ goto out7;
+ }
+
+ info->spidrv.driver.name = DRIVER_NAME"-spi";
+ info->spidrv.probe = lq035q1_spidev_probe;
+ info->spidrv.remove = __devexit_p(lq035q1_spidev_remove);
+ info->spidrv.shutdown = lq035q1_spidev_shutdown;
+ info->spidrv.suspend = lq035q1_spidev_suspend;
+ info->spidrv.resume = lq035q1_spidev_resume;
+
+ ret = spi_register_driver(&info->spidrv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "couldn't register SPI Interface\n");
+ goto out8;
+ }
+
+ if (info->disp_info->use_bl) {
+ ret = gpio_request(info->disp_info->gpio_bl, "LQ035 Backlight");
+
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request GPIO %d\n",
+ info->disp_info->gpio_bl);
+ goto out9;
+ }
+ gpio_direction_output(info->disp_info->gpio_bl, 0);
+ }
+
+ ret = register_framebuffer(fbinfo);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register framebuffer\n");
+ goto out10;
+ }
+
+ dev_info(&pdev->dev, "%dx%d %d-bit RGB FrameBuffer initialized\n",
+ LCD_X_RES, LCD_Y_RES, LCD_BPP);
+
+ return 0;
+
+ out10:
+ if (info->disp_info->use_bl)
+ gpio_free(info->disp_info->gpio_bl);
+ out9:
+ spi_unregister_driver(&info->spidrv);
+ out8:
+ free_irq(info->irq, info);
+ out7:
+ bfin_lq035q1_free_ports();
+ out6:
+ fb_dealloc_cmap(&fbinfo->cmap);
+ out4:
+ dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
+ info->dma_handle);
+ out3:
+ framebuffer_release(fbinfo);
+ out2:
+ free_dma(CH_PPI);
+ out1:
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static int __devexit bfin_lq035q1_remove(struct platform_device *pdev)
+{
+ struct fb_info *fbinfo = platform_get_drvdata(pdev);
+ struct bfin_lq035q1fb_info *info = fbinfo->par;
+
+ if (info->disp_info->use_bl)
+ gpio_free(info->disp_info->gpio_bl);
+
+ spi_unregister_driver(&info->spidrv);
+
+ unregister_framebuffer(fbinfo);
+
+ free_dma(CH_PPI);
+ free_irq(info->irq, info);
+
+ if (info->fb_buffer != NULL)
+ dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
+ info->dma_handle);
+
+ fb_dealloc_cmap(&fbinfo->cmap);
+
+ bfin_lq035q1_free_ports();
+
+ platform_set_drvdata(pdev, NULL);
+ framebuffer_release(fbinfo);
+
+ dev_info(&pdev->dev, "unregistered LCD driver\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bfin_lq035q1_suspend(struct device *dev)
+{
+ struct fb_info *fbinfo = dev_get_drvdata(dev);
+ struct bfin_lq035q1fb_info *info = fbinfo->par;
+
+ if (info->lq035_open_cnt) {
+ lq035q1_backlight(info, 0);
+ bfin_lq035q1_disable_ppi();
+ SSYNC();
+ disable_dma(CH_PPI);
+ bfin_lq035q1_stop_timers();
+ bfin_write_PPI_STATUS(-1);
+ }
+
+ return 0;
+}
+
+static int bfin_lq035q1_resume(struct device *dev)
+{
+ struct fb_info *fbinfo = dev_get_drvdata(dev);
+ struct bfin_lq035q1fb_info *info = fbinfo->par;
+
+ if (info->lq035_open_cnt) {
+ bfin_lq035q1_disable_ppi();
+ SSYNC();
+
+ bfin_lq035q1_config_dma(info);
+ bfin_lq035q1_config_ppi(info);
+ bfin_lq035q1_init_timers();
+
+ /* start dma */
+ enable_dma(CH_PPI);
+ bfin_lq035q1_enable_ppi();
+ bfin_lq035q1_start_timers();
+ lq035q1_backlight(info, 1);
+ }
+
+ return 0;
+}
+
+static struct dev_pm_ops bfin_lq035q1_dev_pm_ops = {
+ .suspend = bfin_lq035q1_suspend,
+ .resume = bfin_lq035q1_resume,
+};
+#endif
+
+static struct platform_driver bfin_lq035q1_driver = {
+ .probe = bfin_lq035q1_probe,
+ .remove = __devexit_p(bfin_lq035q1_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+#ifdef CONFIG_PM
+ .pm = &bfin_lq035q1_dev_pm_ops,
+#endif
+ },
+};
+
+static int __init bfin_lq035q1_driver_init(void)
+{
+ return platform_driver_register(&bfin_lq035q1_driver);
+}
+module_init(bfin_lq035q1_driver_init);
+
+static void __exit bfin_lq035q1_driver_cleanup(void)
+{
+ platform_driver_unregister(&bfin_lq035q1_driver);
+}
+module_exit(bfin_lq035q1_driver_cleanup);
+
+MODULE_DESCRIPTION("Blackfin TFT LCD Driver");
+MODULE_LICENSE("GPL");
fbinfo->var.nonstd = 0;
fbinfo->var.activate = FB_ACTIVATE_NOW;
- fbinfo->var.height = -1;
- fbinfo->var.width = -1;
+ fbinfo->var.height = 53;
+ fbinfo->var.width = 70;
fbinfo->var.accel_flags = 0;
fbinfo->var.vmode = FB_VMODE_NONINTERLACED;
#ifdef CONFIG_PM
static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t state)
{
- bfin_t350mcqb_disable_ppi();
- disable_dma(CH_PPI);
- bfin_write_PPI_STATUS(0xFFFF);
+ struct fb_info *fbinfo = platform_get_drvdata(pdev);
+ struct bfin_t350mcqbfb_info *fbi = fbinfo->par;
+
+ if (fbi->lq043_open_cnt) {
+ bfin_t350mcqb_disable_ppi();
+ disable_dma(CH_PPI);
+ bfin_t350mcqb_stop_timers();
+ bfin_write_PPI_STATUS(-1);
+ }
+
return 0;
}
static int bfin_t350mcqb_resume(struct platform_device *pdev)
{
- enable_dma(CH_PPI);
- bfin_t350mcqb_enable_ppi();
+ struct fb_info *fbinfo = platform_get_drvdata(pdev);
+ struct bfin_t350mcqbfb_info *fbi = fbinfo->par;
+
+ if (fbi->lq043_open_cnt) {
+ bfin_t350mcqb_config_dma(fbi);
+ bfin_t350mcqb_config_ppi(fbi);
+ bfin_t350mcqb_init_timers();
+
+ /* start dma */
+ enable_dma(CH_PPI);
+ bfin_t350mcqb_enable_ppi();
+ bfin_t350mcqb_start_timers();
+ }
return 0;
}
*
* Framebuffer driver for the CLPS7111 and EP7212 processors.
*/
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/init.h>
#define CMAP_MAX_SIZE 16
-/* The /proc entry for the backlight. */
-static struct proc_dir_entry *clps7111fb_backlight_proc_entry = NULL;
-
-static int clps7111fb_proc_backlight_read(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-static int clps7111fb_proc_backlight_write(struct file *file,
- const char *buffer, unsigned long count, void *data);
-
/*
* LCD AC Prescale. This comes from the LCD panel manufacturers specifications.
* This determines how many clocks + 1 of CL1 before the M signal toggles.
.fb_imageblit = cfb_imageblit,
};
-static int
-clps7111fb_proc_backlight_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int backlight_proc_show(struct seq_file *m, void *v)
{
- /* We need at least two characters, one for the digit, and one for
- * the terminating NULL. */
- if (count < 2)
- return -EINVAL;
-
if (machine_is_edb7211()) {
- return sprintf(page, "%d\n",
+ seq_printf(m, "%d\n",
(clps_readb(PDDR) & EDB_PD3_LCDBL) ? 1 : 0);
}
return 0;
}
-static int
-clps7111fb_proc_backlight_write(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static int backlight_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, backlight_proc_show, NULL);
+}
+
+static ssize_t backlight_proc_write(struct file *file, const char *buffer,
+ size_t count, loff_t *pos)
{
unsigned char char_value;
int value;
return count;
}
+static const struct file_operations backlight_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = backlight_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = backlight_proc_write,
+};
+
static void __init clps711x_guess_lcd_params(struct fb_info *info)
{
unsigned int lcdcon, syscon, size;
fb_alloc_cmap(&cfb->cmap, CMAP_MAX_SIZE, 0);
- /* Register the /proc entries. */
- clps7111fb_backlight_proc_entry = create_proc_entry("backlight", 0444,
- NULL);
- if (clps7111fb_backlight_proc_entry == NULL) {
+ if (!proc_create("backlight", 0444, NULL, &backlight_proc_fops)) {
printk("Couldn't create the /proc entry for the backlight.\n");
return -EINVAL;
}
- clps7111fb_backlight_proc_entry->read_proc =
- &clps7111fb_proc_backlight_read;
- clps7111fb_backlight_proc_entry->write_proc =
- &clps7111fb_proc_backlight_write;
-
/*
* Power up the LCD
*/
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/console.h>
#include <video/da8xx-fb.h>
#define DRIVER_NAME "da8xx_lcdc"
unsigned short pseudo_palette[16];
unsigned int databuf_sz;
unsigned int palette_sz;
+ unsigned int pxl_clk;
+ int blank;
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
+#endif
+ void (*panel_power_ctrl)(int);
};
/* Variable Screen Information */
int vfp; /* Vertical front porch */
int vbp; /* Vertical back porch */
int vsw; /* Vertical Sync Pulse Width */
- int pxl_clk; /* Pixel clock */
+ unsigned int pxl_clk; /* Pixel clock */
unsigned char invert_pxl_clk; /* Invert Pixel clock */
};
.vfp = 2,
.vbp = 2,
.vsw = 0,
- .pxl_clk = 0x10,
+ .pxl_clk = 4608000,
.invert_pxl_clk = 1,
},
/* Sharp LK043T1DG01 */
.vfp = 2,
.vbp = 2,
.vsw = 10,
- .pxl_clk = 0x12,
+ .pxl_clk = 7833600,
.invert_pxl_clk = 0,
},
};
+/* Enable the Raster Engine of the LCD Controller */
+static inline void lcd_enable_raster(void)
+{
+ u32 reg;
+
+ reg = lcdc_read(LCD_RASTER_CTRL_REG);
+ if (!(reg & LCD_RASTER_ENABLE))
+ lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+}
+
/* Disable the Raster Engine of the LCD Controller */
-static void lcd_disable_raster(struct da8xx_fb_par *par)
+static inline void lcd_disable_raster(void)
{
u32 reg;
static void lcd_reset(struct da8xx_fb_par *par)
{
/* Disable the Raster if previously Enabled */
- if (lcdc_read(LCD_RASTER_CTRL_REG) & LCD_RASTER_ENABLE)
- lcd_disable_raster(par);
+ lcd_disable_raster();
/* DMA has to be disabled */
lcdc_write(0, LCD_DMA_CTRL_REG);
lcdc_write(0, LCD_RASTER_CTRL_REG);
}
+static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
+{
+ unsigned int lcd_clk, div;
+
+ lcd_clk = clk_get_rate(par->lcdc_clk);
+ div = lcd_clk / par->pxl_clk;
+
+ /* Configure the LCD clock divisor. */
+ lcdc_write(LCD_CLK_DIVISOR(div) |
+ (LCD_RASTER_MODE & 0x1), LCD_CTRL_REG);
+}
+
static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
struct da8xx_panel *panel)
{
lcd_reset(par);
- /* Configure the LCD clock divisor. */
- lcdc_write(LCD_CLK_DIVISOR(panel->pxl_clk) |
- (LCD_RASTER_MODE & 0x1), LCD_CTRL_REG);
+ /* Calculate the divider */
+ lcd_calc_clk_divider(par);
if (panel->invert_pxl_clk)
lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) |
static irqreturn_t lcdc_irq_handler(int irq, void *arg)
{
u32 stat = lcdc_read(LCD_STAT_REG);
- u32 reg;
if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
- reg = lcdc_read(LCD_RASTER_CTRL_REG);
- lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+ lcd_disable_raster();
lcdc_write(stat, LCD_STAT_REG);
- lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+ lcd_enable_raster();
} else
lcdc_write(stat, LCD_STAT_REG);
return err;
}
+#ifdef CONFIG_CPU_FREQ
+static int lcd_da8xx_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct da8xx_fb_par *par;
+
+ par = container_of(nb, struct da8xx_fb_par, freq_transition);
+ if (val == CPUFREQ_PRECHANGE) {
+ lcd_disable_raster();
+ } else if (val == CPUFREQ_POSTCHANGE) {
+ lcd_calc_clk_divider(par);
+ lcd_enable_raster();
+ }
+
+ return 0;
+}
+
+static inline int lcd_da8xx_cpufreq_register(struct da8xx_fb_par *par)
+{
+ par->freq_transition.notifier_call = lcd_da8xx_cpufreq_transition;
+
+ return cpufreq_register_notifier(&par->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void lcd_da8xx_cpufreq_deregister(struct da8xx_fb_par *par)
+{
+ cpufreq_unregister_notifier(&par->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+#endif
+
static int __devexit fb_remove(struct platform_device *dev)
{
struct fb_info *info = dev_get_drvdata(&dev->dev);
if (info) {
struct da8xx_fb_par *par = info->par;
- if (lcdc_read(LCD_RASTER_CTRL_REG) & LCD_RASTER_ENABLE)
- lcd_disable_raster(par);
+#ifdef CONFIG_CPU_FREQ
+ lcd_da8xx_cpufreq_deregister(par);
+#endif
+ if (par->panel_power_ctrl)
+ par->panel_power_ctrl(0);
+
+ lcd_disable_raster();
lcdc_write(0, LCD_RASTER_CTRL_REG);
/* disable DMA */
return 0;
}
+static int cfb_blank(int blank, struct fb_info *info)
+{
+ struct da8xx_fb_par *par = info->par;
+ int ret = 0;
+
+ if (par->blank == blank)
+ return 0;
+
+ par->blank = blank;
+ switch (blank) {
+ case FB_BLANK_UNBLANK:
+ if (par->panel_power_ctrl)
+ par->panel_power_ctrl(1);
+
+ lcd_enable_raster();
+ break;
+ case FB_BLANK_POWERDOWN:
+ if (par->panel_power_ctrl)
+ par->panel_power_ctrl(0);
+
+ lcd_disable_raster();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static struct fb_ops da8xx_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = fb_check_var,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
+ .fb_blank = cfb_blank,
};
static int __init fb_probe(struct platform_device *device)
}
par = da8xx_fb_info->par;
+ par->lcdc_clk = fb_clk;
+ par->pxl_clk = lcdc_info->pxl_clk;
+ if (fb_pdata->panel_power_ctrl) {
+ par->panel_power_ctrl = fb_pdata->panel_power_ctrl;
+ par->panel_power_ctrl(1);
+ }
if (lcd_init(par, lcd_cfg, lcdc_info) < 0) {
dev_err(&device->dev, "lcd_init failed\n");
da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz;
da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8;
- par->lcdc_clk = fb_clk;
-
par->irq = platform_get_irq(device, 0);
if (par->irq < 0) {
ret = -ENOENT;
goto err_dealloc_cmap;
}
+#ifdef CONFIG_CPU_FREQ
+ ret = lcd_da8xx_cpufreq_register(par);
+ if (ret) {
+ dev_err(&device->dev, "failed to register cpufreq\n");
+ goto err_cpu_freq;
+ }
+#endif
+
/* enable raster engine */
- lcdc_write(lcdc_read(LCD_RASTER_CTRL_REG) |
- LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+ lcd_enable_raster();
return 0;
+#ifdef CONFIG_CPU_FREQ
+err_cpu_freq:
+ unregister_framebuffer(da8xx_fb_info);
+#endif
+
err_dealloc_cmap:
fb_dealloc_cmap(&da8xx_fb_info->cmap);
#ifdef CONFIG_PM
static int fb_suspend(struct platform_device *dev, pm_message_t state)
{
- return -EBUSY;
+ struct fb_info *info = platform_get_drvdata(dev);
+ struct da8xx_fb_par *par = info->par;
+
+ acquire_console_sem();
+ if (par->panel_power_ctrl)
+ par->panel_power_ctrl(0);
+
+ fb_set_suspend(info, 1);
+ lcd_disable_raster();
+ clk_disable(par->lcdc_clk);
+ release_console_sem();
+
+ return 0;
}
static int fb_resume(struct platform_device *dev)
{
- return -EBUSY;
+ struct fb_info *info = platform_get_drvdata(dev);
+ struct da8xx_fb_par *par = info->par;
+
+ acquire_console_sem();
+ if (par->panel_power_ctrl)
+ par->panel_power_ctrl(1);
+
+ clk_enable(par->lcdc_clk);
+ lcd_enable_raster();
+ fb_set_suspend(info, 0);
+ release_console_sem();
+
+ return 0;
}
#else
#define fb_suspend NULL
switch (info->fix.visual) {
case FB_VISUAL_PSEUDOCOLOR:
+ if (regno > 255)
+ return 1;
rgb = ((red & 0xff00) << 8) | (green & 0xff00) |
((blue & 0xff00) >> 8);
+/* Geode LX framebuffer driver
+ *
+ * Copyright (C) 2006-2007, Advanced Micro Devices,Inc.
+ * Copyright (c) 2008 Andres Salomon <dilinger@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
#ifndef _LXFB_H_
#define _LXFB_H_
*yres = (*xres * 3) >> 2;
}
-void i810fb_encode_registers(const struct fb_var_screeninfo *var,
- struct i810fb_par *par, u32 xres, u32 yres)
+static int i810fb_find_best_mode(u32 xres, u32 yres, u32 pixclock)
{
u32 diff = 0, diff_best = 0xFFFFFFFF, i = 0, i_best = 0;
- u8 hfl;
+ u8 hfl = (u8) ((xres >> 3) - 1);
- hfl = (u8) ((xres >> 3) - 1);
for (i = 0; i < ARRAY_SIZE(std_modes); i++) {
if (std_modes[i].cr01 == hfl) {
- if (std_modes[i].pixclock <= par->regs.pixclock)
- diff = par->regs.pixclock -
- std_modes[i].pixclock;
+ if (std_modes[i].pixclock <= pixclock)
+ diff = pixclock - std_modes[i].pixclock;
if (diff < diff_best) {
i_best = i;
diff_best = diff;
}
}
}
+ return i_best;
+}
+
+void i810fb_encode_registers(const struct fb_var_screeninfo *var,
+ struct i810fb_par *par, u32 xres, u32 yres)
+{
+ u32 i_best = i810fb_find_best_mode(xres, yres, par->regs.pixclock);
+
par->regs = std_modes[i_best];
/* overlay */
void i810fb_fill_var_timings(struct fb_var_screeninfo *var)
{
- struct i810fb_par par;
u32 total, xres, yres;
+ u32 mode, pixclock;
xres = var->xres;
yres = var->yres;
- par.regs.pixclock = 1000000000/var->pixclock;
- i810fb_encode_registers(var, &par, xres, yres);
+ pixclock = 1000000000 / var->pixclock;
+ mode = i810fb_find_best_mode(xres, yres, pixclock);
- total = ((par.regs.cr00 | (par.regs.cr35 & 1) << 8) + 3) << 3;
+ total = (std_modes[mode].cr00 | (std_modes[mode].cr35 & 1) << 8) + 3;
+ total <<= 3;
- var->pixclock = 1000000000/par.regs.pixclock;
- var->right_margin = (par.regs.cr04 << 3) - xres;
- var->hsync_len = ((par.regs.cr05 & 0x1F) -
- (par.regs.cr04 & 0x1F)) << 3;
+ var->pixclock = 1000000000 / std_modes[mode].pixclock;
+ var->right_margin = (std_modes[mode].cr04 << 3) - xres;
+ var->hsync_len = ((std_modes[mode].cr05 & 0x1F) -
+ (std_modes[mode].cr04 & 0x1F)) << 3;
var->left_margin = (total - (xres + var->right_margin +
var->hsync_len));
var->sync = FB_SYNC_ON_GREEN;
- if (~(par.regs.msr & (1 << 6)))
+ if (~(std_modes[mode].msr & (1 << 6)))
var->sync |= FB_SYNC_HOR_HIGH_ACT;
- if (~(par.regs.msr & (1 << 7)))
+ if (~(std_modes[mode].msr & (1 << 7)))
var->sync |= FB_SYNC_VERT_HIGH_ACT;
-
- total = ((par.regs.cr06 | (par.regs.cr30 & 0x0F) << 8)) + 2;
- var->lower_margin = (par.regs.cr10 |
- (par.regs.cr32 & 0x0F) << 8) - yres;
- var->vsync_len = (par.regs.cr11 & 0x0F) - (var->lower_margin & 0x0F);
- var->upper_margin = total - (yres + var->lower_margin +
- var->vsync_len);
+ total = (std_modes[mode].cr06 | (std_modes[mode].cr30 & 0xF) << 8) + 2;
+ var->lower_margin = (std_modes[mode].cr10 |
+ (std_modes[mode].cr32 & 0x0F) << 8) - yres;
+ var->vsync_len = (std_modes[mode].cr11 & 0x0F) -
+ (var->lower_margin & 0x0F);
+ var->upper_margin = total - (yres + var->lower_margin + var->vsync_len);
}
u32 i810_get_watermark(struct fb_var_screeninfo *var,
if (bailearly == 18)
bailout(dinfo);
+ /* read active pipe */
+ dinfo->pipe = intelfbhw_active_pipe(&dinfo->save_state);
+
/* Cursor initialisation */
if (dinfo->hwcursor) {
intelfbhw_cursor_init(dinfo);
}
+/* Check which pipe is connected to an active display plane. */
+int intelfbhw_active_pipe(const struct intelfb_hwstate *hw)
+{
+ int pipe = -1;
+
+ /* keep old default behaviour - prefer PIPE_A */
+ if (hw->disp_b_ctrl & DISPPLANE_PLANE_ENABLE) {
+ pipe = (hw->disp_b_ctrl >> DISPPLANE_SEL_PIPE_SHIFT);
+ pipe &= PIPE_MASK;
+ if (unlikely(pipe == PIPE_A))
+ return PIPE_A;
+ }
+ if (hw->disp_a_ctrl & DISPPLANE_PLANE_ENABLE) {
+ pipe = (hw->disp_a_ctrl >> DISPPLANE_SEL_PIPE_SHIFT);
+ pipe &= PIPE_MASK;
+ if (likely(pipe == PIPE_A))
+ return PIPE_A;
+ }
+ /* Impossible that no pipe is selected - return PIPE_A */
+ WARN_ON(pipe == -1);
+ if (unlikely(pipe == -1))
+ pipe = PIPE_A;
+
+ return pipe;
+}
+
void intelfbhw_setcolreg(struct intelfb_info *dinfo, unsigned regno,
unsigned red, unsigned green, unsigned blue,
unsigned transp)
struct intelfb_hwstate *hw,
struct fb_var_screeninfo *var)
{
- int pipe = PIPE_A;
+ int pipe = intelfbhw_active_pipe(hw);
u32 *dpll, *fp0, *fp1;
u32 m1, m2, n, p1, p2, clock_target, clock;
u32 hsync_start, hsync_end, hblank_start, hblank_end, htotal, hactive;
/* Disable VGA */
hw->vgacntrl |= VGA_DISABLE;
- /* Check whether pipe A or pipe B is enabled. */
- if (hw->pipe_a_conf & PIPECONF_ENABLE)
- pipe = PIPE_A;
- else if (hw->pipe_b_conf & PIPECONF_ENABLE)
- pipe = PIPE_B;
-
/* Set which pipe's registers will be set. */
if (pipe == PIPE_B) {
dpll = &hw->dpll_b;
int intelfbhw_program_mode(struct intelfb_info *dinfo,
const struct intelfb_hwstate *hw, int blank)
{
- int pipe = PIPE_A;
u32 tmp;
const u32 *dpll, *fp0, *fp1, *pipe_conf;
const u32 *hs, *ht, *hb, *vs, *vt, *vb, *ss;
u32 src_size_reg;
u32 count, tmp_val[3];
- /* Assume single pipe, display plane A, analog CRT. */
+ /* Assume single pipe */
#if VERBOSE > 0
DBG_MSG("intelfbhw_program_mode\n");
tmp |= VGA_DISABLE;
OUTREG(VGACNTRL, tmp);
- /* Check whether pipe A or pipe B is enabled. */
- if (hw->pipe_a_conf & PIPECONF_ENABLE)
- pipe = PIPE_A;
- else if (hw->pipe_b_conf & PIPECONF_ENABLE)
- pipe = PIPE_B;
-
- dinfo->pipe = pipe;
+ dinfo->pipe = intelfbhw_active_pipe(hw);
- if (pipe == PIPE_B) {
+ if (dinfo->pipe == PIPE_B) {
dpll = &hw->dpll_b;
fp0 = &hw->fpb0;
fp1 = &hw->fpb1;
extern int intelfbhw_enable_irq(struct intelfb_info *dinfo);
extern void intelfbhw_disable_irq(struct intelfb_info *dinfo);
extern int intelfbhw_wait_for_vsync(struct intelfb_info *dinfo, u32 pipe);
+extern int intelfbhw_active_pipe(const struct intelfb_hwstate *hw);
#endif /* _INTELFBHW_H */
M1064_XDVICLKCTRL_C1DVICLKEN |
M1064_XDVICLKCTRL_DVILOOPCTL |
M1064_XDVICLKCTRL_P1LOOPBWDTCTL;
- matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp);
+ /* Setting this breaks PC systems so don't do it */
+ /* matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp); */
matroxfb_DAC_out(minfo, M1064_XPWRCTRL,
xpwrctrl);
/* value to be written into the palette reg. */
unsigned long hw_colorvalue = 0;
+ if (regno > 255)
+ return 1;
+
red >>= 8; /* The cmap fields are 16 bits */
green >>= 8; /* wide, but the harware colormap */
blue >>= 8; /* registers are only 8 bits wide */
# Makefile for the MB862xx framebuffer driver
#
-obj-$(CONFIG_FB_MB862XX) := mb862xxfb.o
+obj-$(CONFIG_FB_MB862XX) := mb862xxfb.o mb862xxfb_accel.o
unsigned long reg, sc;
dev_dbg(par->dev, "%s\n", __func__);
+ if (par->type == BT_CORALP)
+ mb862xxfb_init_accel(fbi, fbi->var.xres);
if (par->pre_init)
return 0;
ptr += sprintf(ptr, "%08x = %08x\n",
reg, inreg(disp, reg));
+ for (reg = 0x400; reg <= 0x410; reg += 4)
+ ptr += sprintf(ptr, "geo %08x = %08x\n",
+ reg, inreg(geo, reg));
+
+ for (reg = 0x400; reg <= 0x410; reg += 4)
+ ptr += sprintf(ptr, "draw %08x = %08x\n",
+ reg, inreg(draw, reg));
+
+ for (reg = 0x440; reg <= 0x450; reg += 4)
+ ptr += sprintf(ptr, "draw %08x = %08x\n",
+ reg, inreg(draw, reg));
+
return ptr - buf;
}
u32 pseudo_palette[16];
};
+extern void mb862xxfb_init_accel(struct fb_info *info, int xres);
+
#if defined(CONFIG_FB_MB862XX_LIME) && defined(CONFIG_FB_MB862XX_PCI_GDC)
#error "Select Lime GDC or CoralP/Carmine support, but not both together"
#endif
--- /dev/null
+/*
+ * drivers/mb862xx/mb862xxfb_accel.c
+ *
+ * Fujitsu Carmine/Coral-P(A)/Lime framebuffer driver acceleration support
+ *
+ * (C) 2007 Alexander Shishkin <virtuoso@slind.org>
+ * (C) 2009 Valentin Sitdikov <valentin.sitdikov@siemens.com>
+ * (C) 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#if defined(CONFIG_OF)
+#include <linux/of_platform.h>
+#endif
+#include "mb862xxfb.h"
+#include "mb862xx_reg.h"
+#include "mb862xxfb_accel.h"
+
+static void mb862xxfb_write_fifo(u32 count, u32 *data, struct fb_info *info)
+{
+ struct mb862xxfb_par *par = info->par;
+ static u32 free;
+
+ u32 total = 0;
+ while (total < count) {
+ if (free) {
+ outreg(geo, GDC_GEO_REG_INPUT_FIFO, data[total]);
+ total++;
+ free--;
+ } else {
+ free = (u32) inreg(draw, GDC_REG_FIFO_COUNT);
+ }
+ }
+}
+
+static void mb86290fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ __u32 cmd[6];
+
+ cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP;
+ /* Set raster operation */
+ cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9);
+ cmd[2] = GDC_TYPE_BLTCOPYP << 24;
+
+ if (area->sx >= area->dx && area->sy >= area->dy)
+ cmd[2] |= GDC_CMD_BLTCOPY_TOP_LEFT << 16;
+ else if (area->sx >= area->dx && area->sy <= area->dy)
+ cmd[2] |= GDC_CMD_BLTCOPY_BOTTOM_LEFT << 16;
+ else if (area->sx <= area->dx && area->sy >= area->dy)
+ cmd[2] |= GDC_CMD_BLTCOPY_TOP_RIGHT << 16;
+ else
+ cmd[2] |= GDC_CMD_BLTCOPY_BOTTOM_RIGHT << 16;
+
+ cmd[3] = (area->sy << 16) | area->sx;
+ cmd[4] = (area->dy << 16) | area->dx;
+ cmd[5] = (area->height << 16) | area->width;
+ mb862xxfb_write_fifo(6, cmd, info);
+}
+
+/*
+ * Fill in the cmd array /GDC FIFO commands/ to draw a 1bit image.
+ * Make sure cmd has enough room!
+ */
+static void mb86290fb_imageblit1(u32 *cmd, u16 step, u16 dx, u16 dy,
+ u16 width, u16 height, u32 fgcolor,
+ u32 bgcolor, const struct fb_image *image,
+ struct fb_info *info)
+{
+ int i;
+ unsigned const char *line;
+ u16 bytes;
+
+ /* set colors and raster operation regs */
+ cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP;
+ /* Set raster operation */
+ cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9);
+ cmd[2] =
+ (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_FORE_COLOR << 16);
+ cmd[3] = fgcolor;
+ cmd[4] =
+ (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_BACK_COLOR << 16);
+ cmd[5] = bgcolor;
+
+ i = 0;
+ line = image->data;
+ bytes = (image->width + 7) >> 3;
+
+ /* and the image */
+ cmd[6] = (GDC_TYPE_DRAWBITMAPP << 24) |
+ (GDC_CMD_BITMAP << 16) | (2 + (step * height));
+ cmd[7] = (dy << 16) | dx;
+ cmd[8] = (height << 16) | width;
+
+ while (i < height) {
+ memcpy(&cmd[9 + i * step], line, step << 2);
+#ifdef __LITTLE_ENDIAN
+ {
+ int k = 0;
+ for (k = 0; k < step; k++)
+ cmd[9 + i * step + k] =
+ cpu_to_be32(cmd[9 + i * step + k]);
+ }
+#endif
+ line += bytes;
+ i++;
+ }
+}
+
+/*
+ * Fill in the cmd array /GDC FIFO commands/ to draw a 8bit image.
+ * Make sure cmd has enough room!
+ */
+static void mb86290fb_imageblit8(u32 *cmd, u16 step, u16 dx, u16 dy,
+ u16 width, u16 height, u32 fgcolor,
+ u32 bgcolor, const struct fb_image *image,
+ struct fb_info *info)
+{
+ int i, j;
+ unsigned const char *line, *ptr;
+ u16 bytes;
+
+ cmd[0] = (GDC_TYPE_DRAWBITMAPP << 24) |
+ (GDC_CMD_BLT_DRAW << 16) | (2 + (height * step));
+ cmd[1] = (dy << 16) | dx;
+ cmd[2] = (height << 16) | width;
+
+ i = 0;
+ line = ptr = image->data;
+ bytes = image->width;
+
+ while (i < height) {
+ ptr = line;
+ for (j = 0; j < step; j++) {
+ cmd[3 + i * step + j] =
+ (((u32 *) (info->pseudo_palette))[*ptr]) & 0xffff;
+ ptr++;
+ cmd[3 + i * step + j] |=
+ ((((u32 *) (info->
+ pseudo_palette))[*ptr]) & 0xffff) << 16;
+ ptr++;
+ }
+
+ line += bytes;
+ i++;
+ }
+}
+
+/*
+ * Fill in the cmd array /GDC FIFO commands/ to draw a 16bit image.
+ * Make sure cmd has enough room!
+ */
+static void mb86290fb_imageblit16(u32 *cmd, u16 step, u16 dx, u16 dy,
+ u16 width, u16 height, u32 fgcolor,
+ u32 bgcolor, const struct fb_image *image,
+ struct fb_info *info)
+{
+ int i;
+ unsigned const char *line;
+ u16 bytes;
+
+ i = 0;
+ line = image->data;
+ bytes = image->width << 1;
+
+ cmd[0] = (GDC_TYPE_DRAWBITMAPP << 24) |
+ (GDC_CMD_BLT_DRAW << 16) | (2 + step * height);
+ cmd[1] = (dy << 16) | dx;
+ cmd[2] = (height << 16) | width;
+
+ while (i < height) {
+ memcpy(&cmd[3 + i * step], line, step);
+ line += bytes;
+ i++;
+ }
+}
+
+static void mb86290fb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ int mdr;
+ u32 *cmd = NULL;
+ void (*cmdfn) (u32 *, u16, u16, u16, u16, u16, u32, u32,
+ const struct fb_image *, struct fb_info *) = NULL;
+ u32 cmdlen;
+ u32 fgcolor = 0, bgcolor = 0;
+ u16 step;
+
+ u16 width = image->width, height = image->height;
+ u16 dx = image->dx, dy = image->dy;
+ int x2, y2, vxres, vyres;
+
+ mdr = (GDC_ROP_COPY << 9);
+ x2 = image->dx + image->width;
+ y2 = image->dy + image->height;
+ vxres = info->var.xres_virtual;
+ vyres = info->var.yres_virtual;
+ x2 = min(x2, vxres);
+ y2 = min(y2, vyres);
+ width = x2 - dx;
+ height = y2 - dy;
+
+ switch (image->depth) {
+ case 1:
+ step = (width + 31) >> 5;
+ cmdlen = 9 + height * step;
+ cmdfn = mb86290fb_imageblit1;
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ fgcolor =
+ ((u32 *) (info->pseudo_palette))[image->fg_color];
+ bgcolor =
+ ((u32 *) (info->pseudo_palette))[image->bg_color];
+ } else {
+ fgcolor = image->fg_color;
+ bgcolor = image->bg_color;
+ }
+
+ break;
+
+ case 8:
+ step = (width + 1) >> 1;
+ cmdlen = 3 + height * step;
+ cmdfn = mb86290fb_imageblit8;
+ break;
+
+ case 16:
+ step = (width + 1) >> 1;
+ cmdlen = 3 + height * step;
+ cmdfn = mb86290fb_imageblit16;
+ break;
+
+ default:
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ cmd = kmalloc(cmdlen * 4, GFP_DMA);
+ if (!cmd)
+ return cfb_imageblit(info, image);
+ cmdfn(cmd, step, dx, dy, width, height, fgcolor, bgcolor, image, info);
+ mb862xxfb_write_fifo(cmdlen, cmd, info);
+ kfree(cmd);
+}
+
+static void mb86290fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+
+ u32 x2, y2, vxres, vyres, height, width, fg;
+ u32 cmd[7];
+
+ vxres = info->var.xres_virtual;
+ vyres = info->var.yres_virtual;
+
+ if (!rect->width || !rect->height || rect->dx > vxres
+ || rect->dy > vyres)
+ return;
+
+ /* We could use hardware clipping but on many cards you get around
+ * hardware clipping by writing to framebuffer directly. */
+ x2 = rect->dx + rect->width;
+ y2 = rect->dy + rect->height;
+ x2 = min(x2, vxres);
+ y2 = min(y2, vyres);
+ width = x2 - rect->dx;
+ height = y2 - rect->dy;
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ fg = ((u32 *) (info->pseudo_palette))[rect->color];
+ else
+ fg = rect->color;
+
+ switch (rect->rop) {
+
+ case ROP_XOR:
+ /* Set raster operation */
+ cmd[1] = (2 << 7) | (GDC_ROP_XOR << 9);
+ break;
+
+ case ROP_COPY:
+ /* Set raster operation */
+ cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9);
+ break;
+
+ }
+
+ cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP;
+ /* cmd[1] set earlier */
+ cmd[2] =
+ (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_FORE_COLOR << 16);
+ cmd[3] = fg;
+ cmd[4] = (GDC_TYPE_DRAWRECTP << 24) | (GDC_CMD_BLT_FILL << 16);
+ cmd[5] = (rect->dy << 16) | (rect->dx);
+ cmd[6] = (height << 16) | width;
+
+ mb862xxfb_write_fifo(7, cmd, info);
+}
+
+void mb862xxfb_init_accel(struct fb_info *info, int xres)
+{
+ struct mb862xxfb_par *par = info->par;
+
+ if (info->var.bits_per_pixel == 32) {
+ info->fbops->fb_fillrect = cfb_fillrect;
+ info->fbops->fb_copyarea = cfb_copyarea;
+ info->fbops->fb_imageblit = cfb_imageblit;
+ } else {
+ outreg(disp, GC_L0EM, 3);
+ info->fbops->fb_fillrect = mb86290fb_fillrect;
+ info->fbops->fb_copyarea = mb86290fb_copyarea;
+ info->fbops->fb_imageblit = mb86290fb_imageblit;
+ }
+ outreg(draw, GDC_REG_DRAW_BASE, 0);
+ outreg(draw, GDC_REG_MODE_MISC, 0x8000);
+ outreg(draw, GDC_REG_X_RESOLUTION, xres);
+
+ info->flags |=
+ FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_IMAGEBLIT;
+ info->fix.accel = 0xff; /*FIXME: add right define */
+}
+EXPORT_SYMBOL(mb862xxfb_init_accel);
--- /dev/null
+#ifndef __MB826XXFB_ACCEL_H__
+#define __MB826XXFB_ACCEL_H__
+
+/* registers */
+#define GDC_GEO_REG_INPUT_FIFO 0x00000400L
+
+/* Special Registers */
+#define GDC_REG_CTRL 0x00000400L
+#define GDC_REG_FIFO_STATUS 0x00000404L
+#define GDC_REG_FIFO_COUNT 0x00000408L
+#define GDC_REG_SETUP_STATUS 0x0000040CL
+#define GDC_REG_DDA_STATUS 0x00000410L
+#define GDC_REG_ENGINE_STATUS 0x00000414L
+#define GDC_REG_ERROR_STATUS 0x00000418L
+#define GDC_REG_MODE_MISC 0x00000420L /* MDR0 */
+#define GDC_REG_MODE_LINE 0x00000424L /* MDR1 */
+#define GDC_REG_MODE_POLYGON 0x00000428L /* MDR2 */
+#define GDC_REG_MODE_TEXTURE 0x0000042CL /* MDR3 */
+#define GDC_REG_MODE_BITMAP 0x00000430L /* MDR4 */
+#define GDC_REG_MODE_EXTENSION 0x0000043CL /* MDR7 */
+
+/* Configuration Registers */
+#define GDC_REG_DRAW_BASE 0x00000440L
+#define GDC_REG_X_RESOLUTION 0x00000444L
+#define GDC_REG_Z_BASE 0x00000448L
+#define GDC_REG_TEXTURE_BASE 0x0000044CL
+#define GDC_REG_POLYGON_FLAG_BASE 0x00000450L
+#define GDC_REG_CLIP_XMIN 0x00000454L
+#define GDC_REG_CLIP_XMAX 0x00000458L
+#define GDC_REG_CLIP_YMIN 0x0000045CL
+#define GDC_REG_CLIP_YMAX 0x00000460L
+#define GDC_REG_TEXURE_SIZE 0x00000464L
+#define GDC_REG_TILE_SIZE 0x00000468L
+#define GDC_REG_TEX_BUF_OFFSET 0x0000046CL
+
+/* for MB86293 or later */
+#define GDC_REG_ALPHA_MAP_BASE 0x00000474L /* ABR */
+
+/* Constant Registers */
+#define GDC_REG_FOREGROUND_COLOR 0x00000480L
+#define GDC_REG_BACKGROUND_COLOR 0x00000484L
+#define GDC_REG_ALPHA 0x00000488L
+#define GDC_REG_LINE_PATTERN 0x0000048CL
+#define GDC_REG_TEX_BORDER_COLOR 0x00000494L
+#define GDC_REG_LINE_PATTERN_OFFSET 0x000003E0L
+
+/* Coomand Code */
+#define GDC_CMD_PIXEL 0x00000000L
+#define GDC_CMD_PIXEL_Z 0x00000001L
+
+#define GDC_CMD_X_VECTOR 0x00000020L
+#define GDC_CMD_Y_VECTOR 0x00000021L
+#define GDC_CMD_X_VECTOR_NOEND 0x00000022L
+#define GDC_CMD_Y_VECTOR_NOEND 0x00000023L
+#define GDC_CMD_X_VECTOR_BLPO 0x00000024L
+#define GDC_CMD_Y_VECTOR_BLPO 0x00000025L
+#define GDC_CMD_X_VECTOR_NOEND_BLPO 0x00000026L
+#define GDC_CMD_Y_VECTOR_NOEND_BLPO 0x00000027L
+#define GDC_CMD_AA_X_VECTOR 0x00000028L
+#define GDC_CMD_AA_Y_VECTOR 0x00000029L
+#define GDC_CMD_AA_X_VECTOR_NOEND 0x0000002AL
+#define GDC_CMD_AA_Y_VECTOR_NOEND 0x0000002BL
+#define GDC_CMD_AA_X_VECTOR_BLPO 0x0000002CL
+#define GDC_CMD_AA_Y_VECTOR_BLPO 0x0000002DL
+#define GDC_CMD_AA_X_VECTOR_NOEND_BLPO 0x0000002EL
+#define GDC_CMD_AA_Y_VECTOR_NOEND_BLPO 0x0000002FL
+
+#define GDC_CMD_0_VECTOR 0x00000030L
+#define GDC_CMD_1_VECTOR 0x00000031L
+#define GDC_CMD_0_VECTOR_NOEND 0x00000032L
+#define GDC_CMD_1_VECTOR_NOEND 0x00000033L
+#define GDC_CMD_0_VECTOR_BLPO 0x00000034L
+#define GDC_CMD_1_VECTOR_BLPO 0x00000035L
+#define GDC_CMD_0_VECTOR_NOEND_BLPO 0x00000036L
+#define GDC_CMD_1_VECTOR_NOEND_BLPO 0x00000037L
+#define GDC_CMD_AA_0_VECTOR 0x00000038L
+#define GDC_CMD_AA_1_VECTOR 0x00000039L
+#define GDC_CMD_AA_0_VECTOR_NOEND 0x0000003AL
+#define GDC_CMD_AA_1_VECTOR_NOEND 0x0000003BL
+#define GDC_CMD_AA_0_VECTOR_BLPO 0x0000003CL
+#define GDC_CMD_AA_1_VECTOR_BLPO 0x0000003DL
+#define GDC_CMD_AA_0_VECTOR_NOEND_BLPO 0x0000003EL
+#define GDC_CMD_AA_1_VECTOR_NOEND_BLPO 0x0000003FL
+
+#define GDC_CMD_BLT_FILL 0x00000041L
+#define GDC_CMD_BLT_DRAW 0x00000042L
+#define GDC_CMD_BITMAP 0x00000043L
+#define GDC_CMD_BLTCOPY_TOP_LEFT 0x00000044L
+#define GDC_CMD_BLTCOPY_TOP_RIGHT 0x00000045L
+#define GDC_CMD_BLTCOPY_BOTTOM_LEFT 0x00000046L
+#define GDC_CMD_BLTCOPY_BOTTOM_RIGHT 0x00000047L
+#define GDC_CMD_LOAD_TEXTURE 0x00000048L
+#define GDC_CMD_LOAD_TILE 0x00000049L
+
+#define GDC_CMD_TRAP_RIGHT 0x00000060L
+#define GDC_CMD_TRAP_LEFT 0x00000061L
+#define GDC_CMD_TRIANGLE_FAN 0x00000062L
+#define GDC_CMD_FLAG_TRIANGLE_FAN 0x00000063L
+
+#define GDC_CMD_FLUSH_FB 0x000000C1L
+#define GDC_CMD_FLUSH_Z 0x000000C2L
+
+#define GDC_CMD_POLYGON_BEGIN 0x000000E0L
+#define GDC_CMD_POLYGON_END 0x000000E1L
+#define GDC_CMD_CLEAR_POLY_FLAG 0x000000E2L
+#define GDC_CMD_NORMAL 0x000000FFL
+
+#define GDC_CMD_VECTOR_BLPO_FLAG 0x00040000L
+#define GDC_CMD_FAST_VECTOR_BLPO_FLAG 0x00000004L
+
+/* for MB86293 or later */
+#define GDC_CMD_MDR1 0x00000000L
+#define GDC_CMD_MDR1S 0x00000002L
+#define GDC_CMD_MDR1B 0x00000004L
+#define GDC_CMD_MDR2 0x00000001L
+#define GDC_CMD_MDR2S 0x00000003L
+#define GDC_CMD_MDR2TL 0x00000007L
+#define GDC_CMD_GMDR1E 0x00000010L
+#define GDC_CMD_GMDR2E 0x00000020L
+#define GDC_CMD_OVERLAP_SHADOW_XY 0x00000000L
+#define GDC_CMD_OVERLAP_SHADOW_XY_COMPOSITION 0x00000001L
+#define GDC_CMD_OVERLAP_Z_PACKED_ONBS 0x00000007L
+#define GDC_CMD_OVERLAP_Z_ORIGIN 0x00000000L
+#define GDC_CMD_OVERLAP_Z_NON_TOPLEFT 0x00000001L
+#define GDC_CMD_OVERLAP_Z_BORDER 0x00000002L
+#define GDC_CMD_OVERLAP_Z_SHADOW 0x00000003L
+#define GDC_CMD_BLTCOPY_ALT_ALPHA 0x00000000L /* Reserverd */
+#define GDC_CMD_DC_LOGOUT 0x00000000L /* Reserverd */
+#define GDC_CMD_BODY_FORE_COLOR 0x00000000L
+#define GDC_CMD_BODY_BACK_COLOR 0x00000001L
+#define GDC_CMD_SHADOW_FORE_COLOR 0x00000002L
+#define GDC_CMD_SHADOW_BACK_COLOR 0x00000003L
+#define GDC_CMD_BORDER_FORE_COLOR 0x00000004L
+#define GDC_CMD_BORDER_BACK_COLOR 0x00000005L
+
+/* Type Code Table */
+#define GDC_TYPE_G_NOP 0x00000020L
+#define GDC_TYPE_G_BEGIN 0x00000021L
+#define GDC_TYPE_G_BEGINCONT 0x00000022L
+#define GDC_TYPE_G_END 0x00000023L
+#define GDC_TYPE_G_VERTEX 0x00000030L
+#define GDC_TYPE_G_VERTEXLOG 0x00000032L
+#define GDC_TYPE_G_VERTEXNOPLOG 0x00000033L
+#define GDC_TYPE_G_INIT 0x00000040L
+#define GDC_TYPE_G_VIEWPORT 0x00000041L
+#define GDC_TYPE_G_DEPTHRANGE 0x00000042L
+#define GDC_TYPE_G_LOADMATRIX 0x00000043L
+#define GDC_TYPE_G_VIEWVOLUMEXYCLIP 0x00000044L
+#define GDC_TYPE_G_VIEWVOLUMEZCLIP 0x00000045L
+#define GDC_TYPE_G_VIEWVOLUMEWCLIP 0x00000046L
+#define GDC_TYPE_SETLVERTEX2I 0x00000072L
+#define GDC_TYPE_SETLVERTEX2IP 0x00000073L
+#define GDC_TYPE_SETMODEREGISTER 0x000000C0L
+#define GDC_TYPE_SETGMODEREGISTER 0x000000C1L
+#define GDC_TYPE_OVERLAPXYOFFT 0x000000C8L
+#define GDC_TYPE_OVERLAPZOFFT 0x000000C9L
+#define GDC_TYPE_DC_LOGOUTADDR 0x000000CCL
+#define GDC_TYPE_SETCOLORREGISTER 0x000000CEL
+#define GDC_TYPE_G_BEGINE 0x000000E1L
+#define GDC_TYPE_G_BEGINCONTE 0x000000E2L
+#define GDC_TYPE_G_ENDE 0x000000E3L
+#define GDC_TYPE_DRAWPIXEL 0x00000000L
+#define GDC_TYPE_DRAWPIXELZ 0x00000001L
+#define GDC_TYPE_DRAWLINE 0x00000002L
+#define GDC_TYPE_DRAWLINE2I 0x00000003L
+#define GDC_TYPE_DRAWLINE2IP 0x00000004L
+#define GDC_TYPE_DRAWTRAP 0x00000005L
+#define GDC_TYPE_DRAWVERTEX2I 0x00000006L
+#define GDC_TYPE_DRAWVERTEX2IP 0x00000007L
+#define GDC_TYPE_DRAWRECTP 0x00000009L
+#define GDC_TYPE_DRAWBITMAPP 0x0000000BL
+#define GDC_TYPE_BLTCOPYP 0x0000000DL
+#define GDC_TYPE_BLTCOPYALTERNATEP 0x0000000FL
+#define GDC_TYPE_LOADTEXTUREP 0x00000011L
+#define GDC_TYPE_BLTTEXTUREP 0x00000013L
+#define GDC_TYPE_BLTCOPYALTALPHABLENDP 0x0000001FL
+#define GDC_TYPE_SETVERTEX2I 0x00000070L
+#define GDC_TYPE_SETVERTEX2IP 0x00000071L
+#define GDC_TYPE_DRAW 0x000000F0L
+#define GDC_TYPE_SETREGISTER 0x000000F1L
+#define GDC_TYPE_SYNC 0x000000FCL
+#define GDC_TYPE_INTERRUPT 0x000000FDL
+#define GDC_TYPE_NOP 0x0
+
+/* Raster operation */
+#define GDC_ROP_CLEAR 0x0000
+#define GDC_ROP_AND 0x0001
+#define GDC_ROP_AND_REVERSE 0x0002
+#define GDC_ROP_COPY 0x0003
+#define GDC_ROP_AND_INVERTED 0x0004
+#define GDC_ROP_NOP 0x0005
+#define GDC_ROP_XOR 0x0006
+#define GDC_ROP_OR 0x0007
+#define GDC_ROP_NOR 0x0008
+#define GDC_ROP_EQUIV 0x0009
+#define GDC_ROP_INVERT 0x000A
+#define GDC_ROP_OR_REVERSE 0x000B
+#define GDC_ROP_COPY_INVERTED 0x000C
+#define GDC_ROP_OR_INVERTED 0x000D
+#define GDC_ROP_NAND 0x000E
+#define GDC_ROP_SET 0x000F
+
+#endif
#include <linux/module.h>
#include <linux/fb.h>
+#include <linux/kernel.h>
#undef DEBUG
EXPORT_SYMBOL(vesa_modes);
#endif /* CONFIG_FB_MODE_HELPERS */
-static int my_atoi(const char *name)
-{
- int val = 0;
-
- for (;; name++) {
- switch (*name) {
- case '0' ... '9':
- val = 10*val+(*name-'0');
- break;
- default:
- return val;
- }
- }
-}
-
/**
* fb_try_mode - test a video mode
* @var: frame buffer user defined part of display
namelen = i;
if (!refresh_specified && !bpp_specified &&
!yres_specified) {
- refresh = my_atoi(&name[i+1]);
+ refresh = simple_strtol(&name[i+1], NULL, 10);
refresh_specified = 1;
if (cvt || rb)
cvt = 0;
case '-':
namelen = i;
if (!bpp_specified && !yres_specified) {
- bpp = my_atoi(&name[i+1]);
+ bpp = simple_strtol(&name[i+1], NULL, 10);
bpp_specified = 1;
if (cvt || rb)
cvt = 0;
break;
case 'x':
if (!yres_specified) {
- yres = my_atoi(&name[i+1]);
+ yres = simple_strtol(&name[i+1], NULL, 10);
yres_specified = 1;
} else
goto done;
}
}
if (i < 0 && yres_specified) {
- xres = my_atoi(name);
+ xres = simple_strtol(name, NULL, 10);
res_specified = 1;
}
done:
{
struct pmagbafb_par *par = info->par;
- BUG_ON(regno >= info->cmap.len);
+ if (regno >= info->cmap.len)
+ return 1;
red >>= 8; /* The cmap fields are 16 bits */
green >>= 8; /* wide, but the hardware colormap */
{
struct pmagbbfb_par *par = info->par;
- BUG_ON(regno >= info->cmap.len);
+ if (regno >= info->cmap.len)
+ return 1;
red >>= 8; /* The cmap fields are 16 bits */
green >>= 8; /* wide, but the hardware colormap */
static int pxafb_smart_thread(void *arg)
{
struct pxafb_info *fbi = arg;
- struct pxafb_mach_info *inf = fbi->dev->platform_data;
+ struct pxafb_mach_info *inf;
- if (!fbi || !inf->smart_update) {
+ if (!fbi || !fbi->dev->platform_data->smart_update) {
pr_err("%s: not properly initialized, thread terminated\n",
__func__);
return -EINVAL;
}
+ inf = fbi->dev->platform_data;
pr_debug("%s(): task starting\n", __func__);
if( (!(ivideo->vbflags2 & VB2_SISBRIDGE)) &&
(!((ivideo->sisvga_engine == SIS_315_VGA) &&
(ivideo->vbflags2 & VB2_CHRONTEL))) ) {
- if(ivideo->sisfb_tvstd & (TV_PALN | TV_PALN | TV_NTSCJ)) {
+ if(ivideo->sisfb_tvstd & (TV_PALM | TV_PALN | TV_NTSCJ)) {
ivideo->sisfb_tvstd = -1;
printk(KERN_ERR "sisfb: PALM/PALN/NTSCJ not supported\n");
}
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/console.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/div64.h>
struct fb_info *fb[2]; /* fb info for both heads */
struct resource *fbmem_res; /* framebuffer resource */
struct resource *regs_res; /* registers resource */
+ struct resource *regs2d_res; /* 2d registers resource */
struct sm501_platdata_fb *pdata; /* our platform data */
unsigned long pm_crt_ctrl; /* pm: crt ctrl save */
int irq;
int swap_endian; /* set to swap rgb=>bgr */
void __iomem *regs; /* remapped registers */
+ void __iomem *regs2d; /* 2d remapped registers */
void __iomem *fbmem; /* remapped framebuffer */
size_t fbmem_len; /* length of remapped region */
};
* This is an attempt to lay out memory for the two framebuffers and
* everything else
*
- * |fbmem_res->start fbmem_res->end|
- * | |
- * |fb[0].fix.smem_start | |fb[1].fix.smem_start | 2K |
+ * |fbmem_res->start fbmem_res->end|
+ * | |
+ * |fb[0].fix.smem_start | |fb[1].fix.smem_start | 2K |
* |-> fb[0].fix.smem_len <-| spare |-> fb[1].fix.smem_len <-|-> cursors <-|
*
* The "spare" space is for the 2d engine data
static DEVICE_ATTR(fbregs_pnl, 0444, sm501fb_debug_show_pnl, NULL);
-/* framebuffer ops */
+/* acceleration operations */
+static int sm501fb_sync(struct fb_info *info)
+{
+ int count = 1000000;
+ struct sm501fb_par *par = info->par;
+ struct sm501fb_info *fbi = par->info;
+
+ /* wait for the 2d engine to be ready */
+ while ((count > 0) &&
+ (readl(fbi->regs + SM501_SYSTEM_CONTROL) &
+ SM501_SYSCTRL_2D_ENGINE_STATUS) != 0)
+ count--;
+
+ if (count <= 0) {
+ dev_err(info->dev, "Timeout waiting for 2d engine sync\n");
+ return 1;
+ }
+ return 0;
+}
+
+static void sm501fb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ struct sm501fb_par *par = info->par;
+ struct sm501fb_info *fbi = par->info;
+ int width = area->width;
+ int height = area->height;
+ int sx = area->sx;
+ int sy = area->sy;
+ int dx = area->dx;
+ int dy = area->dy;
+ unsigned long rtl = 0;
+
+ /* source clip */
+ if ((sx >= info->var.xres_virtual) ||
+ (sy >= info->var.yres_virtual))
+ /* source Area not within virtual screen, skipping */
+ return;
+ if ((sx + width) >= info->var.xres_virtual)
+ width = info->var.xres_virtual - sx - 1;
+ if ((sy + height) >= info->var.yres_virtual)
+ height = info->var.yres_virtual - sy - 1;
+
+ /* dest clip */
+ if ((dx >= info->var.xres_virtual) ||
+ (dy >= info->var.yres_virtual))
+ /* Destination Area not within virtual screen, skipping */
+ return;
+ if ((dx + width) >= info->var.xres_virtual)
+ width = info->var.xres_virtual - dx - 1;
+ if ((dy + height) >= info->var.yres_virtual)
+ height = info->var.yres_virtual - dy - 1;
+
+ if ((sx < dx) || (sy < dy)) {
+ rtl = 1 << 27;
+ sx += width - 1;
+ dx += width - 1;
+ sy += height - 1;
+ dy += height - 1;
+ }
+
+ if (sm501fb_sync(info))
+ return;
+
+ /* set the base addresses */
+ writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_SOURCE_BASE);
+ writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_DESTINATION_BASE);
+
+ /* set the window width */
+ writel((info->var.xres << 16) | info->var.xres,
+ fbi->regs2d + SM501_2D_WINDOW_WIDTH);
+
+ /* set window stride */
+ writel((info->var.xres_virtual << 16) | info->var.xres_virtual,
+ fbi->regs2d + SM501_2D_PITCH);
+
+ /* set data format */
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ writel(0, fbi->regs2d + SM501_2D_STRETCH);
+ break;
+ case 16:
+ writel(0x00100000, fbi->regs2d + SM501_2D_STRETCH);
+ break;
+ case 32:
+ writel(0x00200000, fbi->regs2d + SM501_2D_STRETCH);
+ break;
+ }
+
+ /* 2d compare mask */
+ writel(0xffffffff, fbi->regs2d + SM501_2D_COLOR_COMPARE_MASK);
+
+ /* 2d mask */
+ writel(0xffffffff, fbi->regs2d + SM501_2D_MASK);
+
+ /* source and destination x y */
+ writel((sx << 16) | sy, fbi->regs2d + SM501_2D_SOURCE);
+ writel((dx << 16) | dy, fbi->regs2d + SM501_2D_DESTINATION);
+
+ /* w/h */
+ writel((width << 16) | height, fbi->regs2d + SM501_2D_DIMENSION);
+
+ /* do area move */
+ writel(0x800000cc | rtl, fbi->regs2d + SM501_2D_CONTROL);
+}
+
+static void sm501fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct sm501fb_par *par = info->par;
+ struct sm501fb_info *fbi = par->info;
+ int width = rect->width, height = rect->height;
+
+ if ((rect->dx >= info->var.xres_virtual) ||
+ (rect->dy >= info->var.yres_virtual))
+ /* Rectangle not within virtual screen, skipping */
+ return;
+ if ((rect->dx + width) >= info->var.xres_virtual)
+ width = info->var.xres_virtual - rect->dx - 1;
+ if ((rect->dy + height) >= info->var.yres_virtual)
+ height = info->var.yres_virtual - rect->dy - 1;
+
+ if (sm501fb_sync(info))
+ return;
+
+ /* set the base addresses */
+ writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_SOURCE_BASE);
+ writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_DESTINATION_BASE);
+
+ /* set the window width */
+ writel((info->var.xres << 16) | info->var.xres,
+ fbi->regs2d + SM501_2D_WINDOW_WIDTH);
+
+ /* set window stride */
+ writel((info->var.xres_virtual << 16) | info->var.xres_virtual,
+ fbi->regs2d + SM501_2D_PITCH);
+
+ /* set data format */
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ writel(0, fbi->regs2d + SM501_2D_STRETCH);
+ break;
+ case 16:
+ writel(0x00100000, fbi->regs2d + SM501_2D_STRETCH);
+ break;
+ case 32:
+ writel(0x00200000, fbi->regs2d + SM501_2D_STRETCH);
+ break;
+ }
+
+ /* 2d compare mask */
+ writel(0xffffffff, fbi->regs2d + SM501_2D_COLOR_COMPARE_MASK);
+
+ /* 2d mask */
+ writel(0xffffffff, fbi->regs2d + SM501_2D_MASK);
+
+ /* colour */
+ writel(rect->color, fbi->regs2d + SM501_2D_FOREGROUND);
+
+ /* x y */
+ writel((rect->dx << 16) | rect->dy, fbi->regs2d + SM501_2D_DESTINATION);
+
+ /* w/h */
+ writel((width << 16) | height, fbi->regs2d + SM501_2D_DIMENSION);
+
+ /* do rectangle fill */
+ writel(0x800100cc, fbi->regs2d + SM501_2D_CONTROL);
+}
+
static struct fb_ops sm501fb_ops_crt = {
.owner = THIS_MODULE,
.fb_setcolreg = sm501fb_setcolreg,
.fb_pan_display = sm501fb_pan_crt,
.fb_cursor = sm501fb_cursor,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
+ .fb_fillrect = sm501fb_fillrect,
+ .fb_copyarea = sm501fb_copyarea,
.fb_imageblit = cfb_imageblit,
+ .fb_sync = sm501fb_sync,
};
static struct fb_ops sm501fb_ops_pnl = {
.fb_blank = sm501fb_blank_pnl,
.fb_setcolreg = sm501fb_setcolreg,
.fb_cursor = sm501fb_cursor,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
+ .fb_fillrect = sm501fb_fillrect,
+ .fb_copyarea = sm501fb_copyarea,
.fb_imageblit = cfb_imageblit,
+ .fb_sync = sm501fb_sync,
};
/* sm501_init_cursor
dev_warn(dev, "no irq for device\n");
}
- /* allocate, reserve and remap resources for registers */
+ /* allocate, reserve and remap resources for display
+ * controller registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "no resource definition for registers\n");
}
info->regs_res = request_mem_region(res->start,
- res->end - res->start,
+ resource_size(res),
pdev->name);
if (info->regs_res == NULL) {
goto err_release;
}
- info->regs = ioremap(res->start, (res->end - res->start)+1);
+ info->regs = ioremap(res->start, resource_size(res));
if (info->regs == NULL) {
dev_err(dev, "cannot remap registers\n");
ret = -ENXIO;
goto err_regs_res;
}
+ /* allocate, reserve and remap resources for 2d
+ * controller registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res == NULL) {
+ dev_err(dev, "no resource definition for 2d registers\n");
+ ret = -ENOENT;
+ goto err_regs_map;
+ }
+
+ info->regs2d_res = request_mem_region(res->start,
+ resource_size(res),
+ pdev->name);
+
+ if (info->regs2d_res == NULL) {
+ dev_err(dev, "cannot claim registers\n");
+ ret = -ENXIO;
+ goto err_regs_map;
+ }
+
+ info->regs2d = ioremap(res->start, resource_size(res));
+ if (info->regs2d == NULL) {
+ dev_err(dev, "cannot remap registers\n");
+ ret = -ENXIO;
+ goto err_regs2d_res;
+ }
+
/* allocate, reserve resources for framebuffer */
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (res == NULL) {
dev_err(dev, "no memory resource defined\n");
ret = -ENXIO;
- goto err_regs_map;
+ goto err_regs2d_map;
}
info->fbmem_res = request_mem_region(res->start,
- (res->end - res->start)+1,
+ resource_size(res),
pdev->name);
if (info->fbmem_res == NULL) {
dev_err(dev, "cannot claim framebuffer\n");
ret = -ENXIO;
- goto err_regs_map;
+ goto err_regs2d_map;
}
- info->fbmem = ioremap(res->start, (res->end - res->start)+1);
+ info->fbmem = ioremap(res->start, resource_size(res));
if (info->fbmem == NULL) {
dev_err(dev, "cannot remap framebuffer\n");
goto err_mem_res;
}
- info->fbmem_len = (res->end - res->start)+1;
+ info->fbmem_len = resource_size(res);
/* clear framebuffer memory - avoids garbage data on unused fb */
memset(info->fbmem, 0, info->fbmem_len);
/* enable display controller */
sm501_unit_power(dev->parent, SM501_GATE_DISPLAY, 1);
- /* setup cursors */
+ /* enable 2d controller */
+ sm501_unit_power(dev->parent, SM501_GATE_2D_ENGINE, 1);
+ /* setup cursors */
sm501_init_cursor(info->fb[HEAD_CRT], SM501_DC_CRT_HWC_ADDR);
sm501_init_cursor(info->fb[HEAD_PANEL], SM501_DC_PANEL_HWC_ADDR);
release_resource(info->fbmem_res);
kfree(info->fbmem_res);
+ err_regs2d_map:
+ iounmap(info->regs2d);
+
+ err_regs2d_res:
+ release_resource(info->regs2d_res);
+ kfree(info->regs2d_res);
+
err_regs_map:
iounmap(info->regs);
release_resource(info->fbmem_res);
kfree(info->fbmem_res);
+ iounmap(info->regs2d);
+ release_resource(info->regs2d_res);
+ kfree(info->regs2d_res);
+
iounmap(info->regs);
release_resource(info->regs_res);
kfree(info->regs_res);
par->ops.fb_cursor = NULL;
fb->fbops = &par->ops;
- fb->flags = FBINFO_FLAG_DEFAULT |
+ fb->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST |
+ FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
/* fixed data */
*plvds_setting_info,
struct lvds_chip_information *plvds_chip_info)
{
- bool turn_on_first_powersequence = false;
- bool turn_on_second_powersequence = false;
-
DEBUG_MSG(KERN_INFO "integrated_lvds_enable, out_interface:%d\n",
plvds_chip_info->output_interface);
if (plvds_setting_info->lcd_mode == LCD_SPWG)
viafb_write_reg_mask(CRD2, VIACR, 0x00, BIT0 + BIT1);
- else
+ else
viafb_write_reg_mask(CRD2, VIACR, 0x03, BIT0 + BIT1);
- if (INTERFACE_LVDS0LVDS1 == plvds_chip_info->output_interface)
- turn_on_first_powersequence = true;
- if (INTERFACE_LVDS0 == plvds_chip_info->output_interface)
- turn_on_first_powersequence = true;
- if (INTERFACE_LVDS1 == plvds_chip_info->output_interface)
- turn_on_second_powersequence = true;
-
- if (turn_on_second_powersequence) {
- /* Use second power sequence control: */
-
- /* Use hardware control power sequence. */
- viafb_write_reg_mask(CRD3, VIACR, 0, BIT0);
-
- /* Turn on back light. */
- viafb_write_reg_mask(CRD3, VIACR, 0, BIT6 + BIT7);
- /* Turn on hardware power sequence. */
- viafb_write_reg_mask(CRD4, VIACR, 0x02, BIT1);
- }
- if (turn_on_first_powersequence) {
+ switch (plvds_chip_info->output_interface) {
+ case INTERFACE_LVDS0LVDS1:
+ case INTERFACE_LVDS0:
/* Use first power sequence control: */
-
/* Use hardware control power sequence. */
viafb_write_reg_mask(CR91, VIACR, 0, BIT0);
-
/* Turn on back light. */
viafb_write_reg_mask(CR91, VIACR, 0, BIT6 + BIT7);
-
/* Turn on hardware power sequence. */
viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3);
+ break;
+ case INTERFACE_LVDS1:
+ /* Use second power sequence control: */
+ /* Use hardware control power sequence. */
+ viafb_write_reg_mask(CRD3, VIACR, 0, BIT0);
+ /* Turn on back light. */
+ viafb_write_reg_mask(CRD3, VIACR, 0, BIT6 + BIT7);
+ /* Turn on hardware power sequence. */
+ viafb_write_reg_mask(CRD4, VIACR, 0x02, BIT1);
+ break;
}
/* Turn DFP High/Low pad on. */
static void viafb_init_proc(struct proc_dir_entry **viafb_entry)
{
*viafb_entry = proc_mkdir("viafb", NULL);
- if (viafb_entry) {
+ if (*viafb_entry) {
proc_create("dvp0", 0, *viafb_entry, &viafb_dvp0_proc_fops);
proc_create("dvp1", 0, *viafb_entry, &viafb_dvp1_proc_fops);
proc_create("dfph", 0, *viafb_entry, &viafb_dfph_proc_fops);
*/
ret = retry(iocb);
- if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
- BUG_ON(!list_empty(&iocb->ki_wait.task_list));
+ if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED)
aio_complete(iocb, ret, 0);
- }
out:
spin_lock_irq(&ctx->ctx_lock);
unsigned long flags;
int run = 0;
- /* We're supposed to be the only path putting the iocb back on the run
- * list. If we find that the iocb is *back* on a wait queue already
- * than retry has happened before we could queue the iocb. This also
- * means that the retry could have completed and freed our iocb, no
- * good. */
- BUG_ON((!list_empty(&iocb->ki_wait.task_list)));
-
spin_lock_irqsave(&ctx->ctx_lock, flags);
/* set this inside the lock so that we can't race with aio_run_iocb()
* testing it and putting the iocb on the run list under the lock */
/*
* kick_iocb:
* Called typically from a wait queue callback context
- * (aio_wake_function) to trigger a retry of the iocb.
+ * to trigger a retry of the iocb.
* The retry is usually executed by aio workqueue
* threads (See aio_kick_handler).
*/
return 0;
}
-/*
- * aio_wake_function:
- * wait queue callback function for aio notification,
- * Simply triggers a retry of the operation via kick_iocb.
- *
- * This callback is specified in the wait queue entry in
- * a kiocb.
- *
- * Note:
- * This routine is executed with the wait queue lock held.
- * Since kick_iocb acquires iocb->ctx->ctx_lock, it nests
- * the ioctx lock inside the wait queue lock. This is safe
- * because this callback isn't used for wait queues which
- * are nested inside ioctx lock (i.e. ctx->wait)
- */
-static int aio_wake_function(wait_queue_t *wait, unsigned mode,
- int sync, void *key)
-{
- struct kiocb *iocb = container_of(wait, struct kiocb, ki_wait);
-
- list_del_init(&wait->task_list);
- kick_iocb(iocb);
- return 1;
-}
-
static void aio_batch_add(struct address_space *mapping,
struct hlist_head *batch_hash)
{
req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
req->ki_opcode = iocb->aio_lio_opcode;
- init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
- INIT_LIST_HEAD(&req->ki_wait.task_list);
ret = aio_setup_iocb(req);
current->pid, __func__, ##args); \
} while (0)
+struct rehash_entry {
+ struct task_struct *task;
+ struct list_head list;
+};
+
/* Unified info structure. This is pointed to by both the dentry and
inode structures. Each file in the filesystem has an instance of this
structure. It holds a reference to the dentry, so dentries are never
struct completion expire_complete;
struct list_head active;
+ int active_count;
+ struct list_head rehash_list;
+
struct list_head expiring;
struct autofs_sb_info *sbi;
#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */
#define AUTOFS_INF_MOUNTPOINT (1<<1) /* mountpoint status for direct expire */
+#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
+#define AUTOFS_INF_REHASH (1<<3) /* dentry in transit to ->lookup() */
struct autofs_wait_queue {
wait_queue_head_t queue;
{
struct autofs_info *inf = autofs4_dentry_ino(dentry);
- if (dentry->d_flags & DCACHE_AUTOFS_PENDING)
+ if (inf->flags & AUTOFS_INF_PENDING)
return 1;
if (inf->flags & AUTOFS_INF_EXPIRING)
return ret;
}
+static inline void autofs4_add_expiring(struct dentry *dentry)
+{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ if (ino) {
+ spin_lock(&sbi->lookup_lock);
+ if (list_empty(&ino->expiring))
+ list_add(&ino->expiring, &sbi->expiring_list);
+ spin_unlock(&sbi->lookup_lock);
+ }
+ return;
+}
+
+static inline void autofs4_del_expiring(struct dentry *dentry)
+{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ if (ino) {
+ spin_lock(&sbi->lookup_lock);
+ if (!list_empty(&ino->expiring))
+ list_del_init(&ino->expiring);
+ spin_unlock(&sbi->lookup_lock);
+ }
+ return;
+}
+
void autofs4_dentry_release(struct dentry *);
extern void autofs4_kill_sb(struct super_block *);
return 0;
/* No point expiring a pending mount */
- if (dentry->d_flags & DCACHE_AUTOFS_PENDING)
+ if (ino->flags & AUTOFS_INF_PENDING)
return 0;
if (!do_now) {
root->d_mounted--;
}
ino->flags |= AUTOFS_INF_EXPIRING;
+ autofs4_add_expiring(root);
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
return root;
expired, (int)expired->d_name.len, expired->d_name.name);
ino = autofs4_dentry_ino(expired);
ino->flags |= AUTOFS_INF_EXPIRING;
+ autofs4_add_expiring(expired);
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
spin_lock(&dcache_lock);
DPRINTK("expire done status=%d", status);
- if (d_unhashed(dentry))
+ if (d_unhashed(dentry) && IS_DEADDIR(dentry->d_inode))
return -EAGAIN;
return status;
spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(dentry);
ino->flags &= ~AUTOFS_INF_EXPIRING;
+ autofs4_del_expiring(dentry);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
ino->flags &= ~AUTOFS_INF_MOUNTPOINT;
}
ino->flags &= ~AUTOFS_INF_EXPIRING;
+ autofs4_del_expiring(dentry);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
dput(dentry);
ino->dentry = NULL;
ino->size = 0;
INIT_LIST_HEAD(&ino->active);
+ INIT_LIST_HEAD(&ino->rehash_list);
+ ino->active_count = 0;
INIT_LIST_HEAD(&ino->expiring);
atomic_set(&ino->count, 0);
}
.rmdir = autofs4_dir_rmdir,
};
+static void autofs4_add_active(struct dentry *dentry)
+{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ if (ino) {
+ spin_lock(&sbi->lookup_lock);
+ if (!ino->active_count) {
+ if (list_empty(&ino->active))
+ list_add(&ino->active, &sbi->active_list);
+ }
+ ino->active_count++;
+ spin_unlock(&sbi->lookup_lock);
+ }
+ return;
+}
+
+static void autofs4_del_active(struct dentry *dentry)
+{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ if (ino) {
+ spin_lock(&sbi->lookup_lock);
+ ino->active_count--;
+ if (!ino->active_count) {
+ if (!list_empty(&ino->active))
+ list_del_init(&ino->active);
+ }
+ spin_unlock(&sbi->lookup_lock);
+ }
+ return;
+}
+
+static void autofs4_add_rehash_entry(struct autofs_info *ino,
+ struct rehash_entry *entry)
+{
+ entry->task = current;
+ INIT_LIST_HEAD(&entry->list);
+ list_add(&entry->list, &ino->rehash_list);
+ return;
+}
+
+static void autofs4_remove_rehash_entry(struct autofs_info *ino)
+{
+ struct list_head *head = &ino->rehash_list;
+ struct rehash_entry *entry;
+ list_for_each_entry(entry, head, list) {
+ if (entry->task == current) {
+ list_del(&entry->list);
+ kfree(entry);
+ break;
+ }
+ }
+ return;
+}
+
+static void autofs4_remove_rehash_entrys(struct autofs_info *ino)
+{
+ struct autofs_sb_info *sbi = ino->sbi;
+ struct rehash_entry *entry, *next;
+ struct list_head *head;
+
+ spin_lock(&sbi->fs_lock);
+ spin_lock(&sbi->lookup_lock);
+ if (!(ino->flags & AUTOFS_INF_REHASH)) {
+ spin_unlock(&sbi->lookup_lock);
+ spin_unlock(&sbi->fs_lock);
+ return;
+ }
+ ino->flags &= ~AUTOFS_INF_REHASH;
+ head = &ino->rehash_list;
+ list_for_each_entry_safe(entry, next, head, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock(&sbi->lookup_lock);
+ spin_unlock(&sbi->fs_lock);
+ dput(ino->dentry);
+
+ return;
+}
+
+static void autofs4_revalidate_drop(struct dentry *dentry,
+ struct rehash_entry *entry)
+{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ /*
+ * Add to the active list so we can pick this up in
+ * ->lookup(). Also add an entry to a rehash list so
+ * we know when there are no dentrys in flight so we
+ * know when we can rehash the dentry.
+ */
+ spin_lock(&sbi->lookup_lock);
+ if (list_empty(&ino->active))
+ list_add(&ino->active, &sbi->active_list);
+ autofs4_add_rehash_entry(ino, entry);
+ spin_unlock(&sbi->lookup_lock);
+ if (!(ino->flags & AUTOFS_INF_REHASH)) {
+ ino->flags |= AUTOFS_INF_REHASH;
+ dget(dentry);
+ spin_lock(&dentry->d_lock);
+ __d_drop(dentry);
+ spin_unlock(&dentry->d_lock);
+ }
+ return;
+}
+
+static void autofs4_revalidate_rehash(struct dentry *dentry)
+{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ if (ino->flags & AUTOFS_INF_REHASH) {
+ spin_lock(&sbi->lookup_lock);
+ autofs4_remove_rehash_entry(ino);
+ if (list_empty(&ino->rehash_list)) {
+ spin_unlock(&sbi->lookup_lock);
+ ino->flags &= ~AUTOFS_INF_REHASH;
+ d_rehash(dentry);
+ dput(ino->dentry);
+ } else
+ spin_unlock(&sbi->lookup_lock);
+ }
+ return;
+}
+
+static unsigned int autofs4_need_mount(unsigned int flags)
+{
+ unsigned int res = 0;
+ if (flags & (TRIGGER_FLAGS | TRIGGER_INTENTS))
+ res = 1;
+ return res;
+}
+
static int autofs4_dir_open(struct inode *inode, struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
* it.
*/
spin_lock(&dcache_lock);
- if (!d_mountpoint(dentry) && __simple_empty(dentry)) {
+ if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
spin_unlock(&dcache_lock);
return -ENOENT;
}
return dcache_dir_open(inode, file);
}
-static int try_to_fill_dentry(struct dentry *dentry, int flags)
+static int try_to_fill_dentry(struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
* Wait for a pending mount, triggering one if there
* isn't one already
*/
- if (dentry->d_inode == NULL) {
- DPRINTK("waiting for mount name=%.*s",
- dentry->d_name.len, dentry->d_name.name);
-
- status = autofs4_wait(sbi, dentry, NFY_MOUNT);
-
- DPRINTK("mount done status=%d", status);
-
- /* Turn this into a real negative dentry? */
- if (status == -ENOENT) {
- spin_lock(&dentry->d_lock);
- dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
- spin_unlock(&dentry->d_lock);
- return status;
- } else if (status) {
- /* Return a negative dentry, but leave it "pending" */
- return status;
- }
- /* Trigger mount for path component or follow link */
- } else if (dentry->d_flags & DCACHE_AUTOFS_PENDING ||
- flags & (TRIGGER_FLAGS | TRIGGER_INTENTS) ||
- current->link_count) {
- DPRINTK("waiting for mount name=%.*s",
- dentry->d_name.len, dentry->d_name.name);
-
- spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_AUTOFS_PENDING;
- spin_unlock(&dentry->d_lock);
- status = autofs4_wait(sbi, dentry, NFY_MOUNT);
+ DPRINTK("waiting for mount name=%.*s",
+ dentry->d_name.len, dentry->d_name.name);
- DPRINTK("mount done status=%d", status);
+ status = autofs4_wait(sbi, dentry, NFY_MOUNT);
- if (status) {
- spin_lock(&dentry->d_lock);
- dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
- spin_unlock(&dentry->d_lock);
- return status;
- }
- }
-
- /* Initialize expiry counter after successful mount */
- if (ino)
- ino->last_used = jiffies;
+ DPRINTK("mount done status=%d", status);
- spin_lock(&dentry->d_lock);
- dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
- spin_unlock(&dentry->d_lock);
+ /* Update expiry counter */
+ ino->last_used = jiffies;
- return 0;
+ return status;
}
/* For autofs direct mounts the follow link triggers the mount */
autofs4_expire_wait(dentry);
/* We trigger a mount for almost all flags */
- lookup_type = nd->flags & (TRIGGER_FLAGS | TRIGGER_INTENTS);
- if (!(lookup_type || dentry->d_flags & DCACHE_AUTOFS_PENDING))
+ lookup_type = autofs4_need_mount(nd->flags);
+ spin_lock(&sbi->fs_lock);
+ spin_lock(&dcache_lock);
+ if (!(lookup_type || ino->flags & AUTOFS_INF_PENDING)) {
+ spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
goto follow;
+ }
/*
* If the dentry contains directories then it is an autofs
* multi-mount with no root mount offset. So don't try to
* mount it again.
*/
- spin_lock(&dcache_lock);
- if (dentry->d_flags & DCACHE_AUTOFS_PENDING ||
- (!d_mountpoint(dentry) && __simple_empty(dentry))) {
+ if (ino->flags & AUTOFS_INF_PENDING ||
+ (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) {
+ ino->flags |= AUTOFS_INF_PENDING;
spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
+
+ status = try_to_fill_dentry(dentry);
+
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
- status = try_to_fill_dentry(dentry, 0);
if (status)
goto out_error;
goto follow;
}
spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
follow:
/*
* If there is no root mount it must be an autofs
{
struct inode *dir = dentry->d_parent->d_inode;
struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
- int oz_mode = autofs4_oz_mode(sbi);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct rehash_entry *entry;
int flags = nd ? nd->flags : 0;
- int status = 1;
+ unsigned int mutex_aquired;
+
+ DPRINTK("name = %.*s oz_mode = %d",
+ dentry->d_name.len, dentry->d_name.name, oz_mode);
+
+ /* Daemon never causes a mount to trigger */
+ if (autofs4_oz_mode(sbi))
+ return 1;
+
+ entry = kmalloc(sizeof(struct rehash_entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ mutex_aquired = mutex_trylock(&dir->i_mutex);
- /* Pending dentry */
spin_lock(&sbi->fs_lock);
+ spin_lock(&dcache_lock);
+ /* Pending dentry */
if (autofs4_ispending(dentry)) {
- /* The daemon never causes a mount to trigger */
- spin_unlock(&sbi->fs_lock);
+ int status;
- if (oz_mode)
- return 1;
+ /*
+ * We can only unhash and send this to ->lookup() if
+ * the directory mutex is held over d_revalidate() and
+ * ->lookup(). This prevents the VFS from incorrectly
+ * seeing the dentry as non-existent.
+ */
+ ino->flags |= AUTOFS_INF_PENDING;
+ if (!mutex_aquired) {
+ autofs4_revalidate_drop(dentry, entry);
+ spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
+ return 0;
+ }
+ spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
+ mutex_unlock(&dir->i_mutex);
+ kfree(entry);
/*
* If the directory has gone away due to an expire
* A zero status is success otherwise we have a
* negative error code.
*/
- status = try_to_fill_dentry(dentry, flags);
+ status = try_to_fill_dentry(dentry);
+
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
+
if (status == 0)
return 1;
return status;
}
- spin_unlock(&sbi->fs_lock);
-
- /* Negative dentry.. invalidate if "old" */
- if (dentry->d_inode == NULL)
- return 0;
/* Check for a non-mountpoint directory with no contents */
- spin_lock(&dcache_lock);
if (S_ISDIR(dentry->d_inode->i_mode) &&
- !d_mountpoint(dentry) &&
- __simple_empty(dentry)) {
+ !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
DPRINTK("dentry=%p %.*s, emptydir",
dentry, dentry->d_name.len, dentry->d_name.name);
- spin_unlock(&dcache_lock);
- /* The daemon never causes a mount to trigger */
- if (oz_mode)
- return 1;
+ if (autofs4_need_mount(flags) || current->link_count) {
+ int status;
- /*
- * A zero status is success otherwise we have a
- * negative error code.
- */
- status = try_to_fill_dentry(dentry, flags);
- if (status == 0)
- return 1;
+ /*
+ * We can only unhash and send this to ->lookup() if
+ * the directory mutex is held over d_revalidate() and
+ * ->lookup(). This prevents the VFS from incorrectly
+ * seeing the dentry as non-existent.
+ */
+ ino->flags |= AUTOFS_INF_PENDING;
+ if (!mutex_aquired) {
+ autofs4_revalidate_drop(dentry, entry);
+ spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
+ return 0;
+ }
+ spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
+ mutex_unlock(&dir->i_mutex);
+ kfree(entry);
- return status;
+ /*
+ * A zero status is success otherwise we have a
+ * negative error code.
+ */
+ status = try_to_fill_dentry(dentry);
+
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
+
+ if (status == 0)
+ return 1;
+
+ return status;
+ }
}
spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
+
+ if (mutex_aquired)
+ mutex_unlock(&dir->i_mutex);
+
+ kfree(entry);
return 1;
}
+static void autofs4_free_rehash_entrys(struct autofs_info *inf)
+{
+ struct list_head *head = &inf->rehash_list;
+ struct rehash_entry *entry, *next;
+ list_for_each_entry_safe(entry, next, head, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
void autofs4_dentry_release(struct dentry *de)
{
struct autofs_info *inf;
list_del(&inf->active);
if (!list_empty(&inf->expiring))
list_del(&inf->expiring);
+ if (!list_empty(&inf->rehash_list))
+ autofs4_free_rehash_entrys(inf);
spin_unlock(&sbi->lookup_lock);
}
.d_release = autofs4_dentry_release,
};
-static struct dentry *autofs4_lookup_active(struct autofs_sb_info *sbi, struct dentry *parent, struct qstr *name)
+static struct dentry *autofs4_lookup_active(struct dentry *dentry)
{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct dentry *parent = dentry->d_parent;
+ struct qstr *name = &dentry->d_name;
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
struct list_head *p, *head;
+restart:
spin_lock(&dcache_lock);
spin_lock(&sbi->lookup_lock);
head = &sbi->active_list;
list_for_each(p, head) {
struct autofs_info *ino;
- struct dentry *dentry;
+ struct dentry *active;
struct qstr *qstr;
ino = list_entry(p, struct autofs_info, active);
- dentry = ino->dentry;
+ active = ino->dentry;
- spin_lock(&dentry->d_lock);
+ spin_lock(&active->d_lock);
/* Already gone? */
- if (atomic_read(&dentry->d_count) == 0)
+ if (atomic_read(&active->d_count) == 0)
goto next;
- qstr = &dentry->d_name;
+ if (active->d_inode && IS_DEADDIR(active->d_inode)) {
+ if (!list_empty(&ino->rehash_list)) {
+ dget(active);
+ spin_unlock(&active->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ spin_unlock(&dcache_lock);
+ autofs4_remove_rehash_entrys(ino);
+ dput(active);
+ goto restart;
+ }
+ goto next;
+ }
+
+ qstr = &active->d_name;
- if (dentry->d_name.hash != hash)
+ if (active->d_name.hash != hash)
goto next;
- if (dentry->d_parent != parent)
+ if (active->d_parent != parent)
goto next;
if (qstr->len != len)
if (memcmp(qstr->name, str, len))
goto next;
- if (d_unhashed(dentry)) {
- dget(dentry);
- spin_unlock(&dentry->d_lock);
- spin_unlock(&sbi->lookup_lock);
- spin_unlock(&dcache_lock);
- return dentry;
- }
+ dget(active);
+ spin_unlock(&active->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ spin_unlock(&dcache_lock);
+ return active;
next:
- spin_unlock(&dentry->d_lock);
+ spin_unlock(&active->d_lock);
}
spin_unlock(&sbi->lookup_lock);
spin_unlock(&dcache_lock);
return NULL;
}
-static struct dentry *autofs4_lookup_expiring(struct autofs_sb_info *sbi, struct dentry *parent, struct qstr *name)
+static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct dentry *parent = dentry->d_parent;
+ struct qstr *name = &dentry->d_name;
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
head = &sbi->expiring_list;
list_for_each(p, head) {
struct autofs_info *ino;
- struct dentry *dentry;
+ struct dentry *expiring;
struct qstr *qstr;
ino = list_entry(p, struct autofs_info, expiring);
- dentry = ino->dentry;
+ expiring = ino->dentry;
- spin_lock(&dentry->d_lock);
+ spin_lock(&expiring->d_lock);
/* Bad luck, we've already been dentry_iput */
- if (!dentry->d_inode)
+ if (!expiring->d_inode)
goto next;
- qstr = &dentry->d_name;
+ qstr = &expiring->d_name;
- if (dentry->d_name.hash != hash)
+ if (expiring->d_name.hash != hash)
goto next;
- if (dentry->d_parent != parent)
+ if (expiring->d_parent != parent)
goto next;
if (qstr->len != len)
if (memcmp(qstr->name, str, len))
goto next;
- if (d_unhashed(dentry)) {
- dget(dentry);
- spin_unlock(&dentry->d_lock);
- spin_unlock(&sbi->lookup_lock);
- spin_unlock(&dcache_lock);
- return dentry;
- }
+ dget(expiring);
+ spin_unlock(&expiring->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ spin_unlock(&dcache_lock);
+ return expiring;
next:
- spin_unlock(&dentry->d_lock);
+ spin_unlock(&expiring->d_lock);
}
spin_unlock(&sbi->lookup_lock);
spin_unlock(&dcache_lock);
return NULL;
}
+static struct autofs_info *init_new_dentry(struct autofs_sb_info *sbi,
+ struct dentry *dentry, int oz_mode)
+{
+ struct autofs_info *ino;
+
+ /*
+ * Mark the dentry incomplete but don't hash it. We do this
+ * to serialize our inode creation operations (symlink and
+ * mkdir) which prevents deadlock during the callback to
+ * the daemon. Subsequent user space lookups for the same
+ * dentry are placed on the wait queue while the daemon
+ * itself is allowed passage unresticted so the create
+ * operation itself can then hash the dentry. Finally,
+ * we check for the hashed dentry and return the newly
+ * hashed dentry.
+ */
+ dentry->d_op = &autofs4_root_dentry_operations;
+
+ /*
+ * And we need to ensure that the same dentry is used for
+ * all following lookup calls until it is hashed so that
+ * the dentry flags are persistent throughout the request.
+ */
+ ino = autofs4_init_ino(NULL, sbi, 0555);
+ if (!ino)
+ return ERR_PTR(-ENOMEM);
+
+ dentry->d_fsdata = ino;
+ ino->dentry = dentry;
+
+ /*
+ * Only set the mount pending flag for new dentrys not created
+ * by the daemon.
+ */
+ if (!oz_mode)
+ ino->flags |= AUTOFS_INF_PENDING;
+
+ d_instantiate(dentry, NULL);
+
+ return ino;
+}
+
/* Lookups in the root directory */
static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
struct autofs_sb_info *sbi;
struct autofs_info *ino;
- struct dentry *expiring, *unhashed;
+ struct dentry *expiring, *active;
int oz_mode;
+ int status = 0;
DPRINTK("name = %.*s",
dentry->d_name.len, dentry->d_name.name);
DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode);
- unhashed = autofs4_lookup_active(sbi, dentry->d_parent, &dentry->d_name);
- if (unhashed)
- dentry = unhashed;
- else {
- /*
- * Mark the dentry incomplete but don't hash it. We do this
- * to serialize our inode creation operations (symlink and
- * mkdir) which prevents deadlock during the callback to
- * the daemon. Subsequent user space lookups for the same
- * dentry are placed on the wait queue while the daemon
- * itself is allowed passage unresticted so the create
- * operation itself can then hash the dentry. Finally,
- * we check for the hashed dentry and return the newly
- * hashed dentry.
- */
- dentry->d_op = &autofs4_root_dentry_operations;
-
- /*
- * And we need to ensure that the same dentry is used for
- * all following lookup calls until it is hashed so that
- * the dentry flags are persistent throughout the request.
- */
- ino = autofs4_init_ino(NULL, sbi, 0555);
- if (!ino)
- return ERR_PTR(-ENOMEM);
-
- dentry->d_fsdata = ino;
- ino->dentry = dentry;
-
- spin_lock(&sbi->lookup_lock);
- list_add(&ino->active, &sbi->active_list);
- spin_unlock(&sbi->lookup_lock);
-
- d_instantiate(dentry, NULL);
+ spin_lock(&sbi->fs_lock);
+ active = autofs4_lookup_active(dentry);
+ if (active) {
+ dentry = active;
+ ino = autofs4_dentry_ino(dentry);
+ /* If this came from revalidate, rehash it */
+ autofs4_revalidate_rehash(dentry);
+ spin_unlock(&sbi->fs_lock);
+ } else {
+ spin_unlock(&sbi->fs_lock);
+ ino = init_new_dentry(sbi, dentry, oz_mode);
+ if (IS_ERR(ino))
+ return (struct dentry *) ino;
}
+ autofs4_add_active(dentry);
+
if (!oz_mode) {
+ expiring = autofs4_lookup_expiring(dentry);
mutex_unlock(&dir->i_mutex);
- expiring = autofs4_lookup_expiring(sbi,
- dentry->d_parent,
- &dentry->d_name);
if (expiring) {
/*
* If we are racing with expire the request might not
* be quite complete but the directory has been removed
* so it must have been successful, so just wait for it.
*/
- ino = autofs4_dentry_ino(expiring);
autofs4_expire_wait(expiring);
- spin_lock(&sbi->lookup_lock);
- if (!list_empty(&ino->expiring))
- list_del_init(&ino->expiring);
- spin_unlock(&sbi->lookup_lock);
dput(expiring);
}
-
- spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_AUTOFS_PENDING;
- spin_unlock(&dentry->d_lock);
- if (dentry->d_op && dentry->d_op->d_revalidate)
- (dentry->d_op->d_revalidate)(dentry, nd);
+ status = try_to_fill_dentry(dentry);
mutex_lock(&dir->i_mutex);
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
}
+ autofs4_del_active(dentry);
+
/*
- * If we are still pending, check if we had to handle
+ * If we had a mount fail, check if we had to handle
* a signal. If so we can force a restart..
*/
- if (dentry->d_flags & DCACHE_AUTOFS_PENDING) {
+ if (status) {
/* See if we were interrupted */
if (signal_pending(current)) {
sigset_t *sigset = ¤t->pending.signal;
if (sigismember (sigset, SIGKILL) ||
sigismember (sigset, SIGQUIT) ||
sigismember (sigset, SIGINT)) {
- if (unhashed)
- dput(unhashed);
+ if (active)
+ dput(active);
return ERR_PTR(-ERESTARTNOINTR);
}
}
- if (!oz_mode) {
- spin_lock(&dentry->d_lock);
- dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
- spin_unlock(&dentry->d_lock);
+ }
+
+ /*
+ * User space can (and has done in the past) remove and re-create
+ * this directory during the callback. This can leave us with an
+ * unhashed dentry, but a successful mount! So we need to
+ * perform another cached lookup in case the dentry now exists.
+ */
+ if (!oz_mode && !have_submounts(dentry)) {
+ struct dentry *new;
+ new = d_lookup(dentry->d_parent, &dentry->d_name);
+ if (new) {
+ if (active)
+ dput(active);
+ return new;
+ } else {
+ if (!status)
+ status = -ENOENT;
}
}
/*
- * If this dentry is unhashed, then we shouldn't honour this
- * lookup. Returning ENOENT here doesn't do the right thing
- * for all system calls, but it should be OK for the operations
- * we permit from an autofs.
+ * If we had a mount failure, return status to user space.
+ * If the mount succeeded and we used a dentry from the active queue
+ * return it.
*/
- if (!oz_mode && d_unhashed(dentry)) {
+ if (status) {
+ dentry = ERR_PTR(status);
+ if (active)
+ dput(active);
+ return dentry;
+ } else {
/*
- * A user space application can (and has done in the past)
- * remove and re-create this directory during the callback.
- * This can leave us with an unhashed dentry, but a
- * successful mount! So we need to perform another
- * cached lookup in case the dentry now exists.
+ * Valid successful mount, return active dentry or NULL
+ * for a new dentry.
*/
- struct dentry *parent = dentry->d_parent;
- struct dentry *new = d_lookup(parent, &dentry->d_name);
- if (new != NULL)
- dentry = new;
- else
- dentry = ERR_PTR(-ENOENT);
-
- if (unhashed)
- dput(unhashed);
-
- return dentry;
+ if (active)
+ return active;
}
- if (unhashed)
- return unhashed;
-
return NULL;
}
if (!ino)
return -ENOMEM;
- spin_lock(&sbi->lookup_lock);
- if (!list_empty(&ino->active))
- list_del_init(&ino->active);
- spin_unlock(&sbi->lookup_lock);
-
ino->size = strlen(symname);
cp = kmalloc(ino->size + 1, GFP_KERNEL);
if (!cp) {
dir->i_mtime = CURRENT_TIME;
spin_lock(&dcache_lock);
- spin_lock(&sbi->lookup_lock);
- if (list_empty(&ino->expiring))
- list_add(&ino->expiring, &sbi->expiring_list);
- spin_unlock(&sbi->lookup_lock);
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
return -ENOTEMPTY;
}
- spin_lock(&sbi->lookup_lock);
- if (list_empty(&ino->expiring))
- list_add(&ino->expiring, &sbi->expiring_list);
- spin_unlock(&sbi->lookup_lock);
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
if (!ino)
return -ENOMEM;
- spin_lock(&sbi->lookup_lock);
- if (!list_empty(&ino->active))
- list_del_init(&ino->active);
- spin_unlock(&sbi->lookup_lock);
-
inode = autofs4_get_inode(dir->i_sb, ino);
if (!inode) {
if (!dentry->d_fsdata)
* If we don't support core dumping, then supply a NULL so we
* don't even try.
*/
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+#ifdef CONFIG_ELF_CORE
static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
#else
#define elf_core_dump NULL
return error;
}
-/*
- * Note that some platforms still use traditional core dumps and not
- * the ELF core dump. Each platform can select it as appropriate.
- */
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
-
+#ifdef CONFIG_ELF_CORE
/*
* ELF core dumper
*
return has_dumped;
}
-#endif /* USE_ELF_CORE_DUMP */
+#endif /* CONFIG_ELF_CORE */
static int __init init_elf_binfmt(void)
{
static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *,
struct file *, struct mm_struct *);
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+#ifdef CONFIG_ELF_CORE
static int elf_fdpic_core_dump(long, struct pt_regs *, struct file *, unsigned long limit);
#endif
static struct linux_binfmt elf_fdpic_format = {
.module = THIS_MODULE,
.load_binary = load_elf_fdpic_binary,
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+#ifdef CONFIG_ELF_CORE
.core_dump = elf_fdpic_core_dump,
#endif
.min_coredump = ELF_EXEC_PAGESIZE,
*
* Modelled on fs/binfmt_elf.c core dumper
*/
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+#ifdef CONFIG_ELF_CORE
/*
* These are the only things you should do on a core-file: use only these
#undef NUM_NOTES
}
-#endif /* USE_ELF_CORE_DUMP */
+#endif /* CONFIG_ELF_CORE */
*
* If blkfactor is zero then the user's request was aligned to the filesystem's
* blocksize.
- *
- * lock_type is DIO_LOCKING for regular files on direct-IO-naive filesystems.
- * This determines whether we need to do the fancy locking which prevents
- * direct-IO from being able to read uninitialised disk blocks. If its zero
- * (blockdev) this locking is not done, and if it is DIO_OWN_LOCKING i_mutex is
- * not held for the entire direct write (taken briefly, initially, during a
- * direct read though, but its never held for the duration of a direct-IO).
*/
struct dio {
struct inode *inode;
int rw;
loff_t i_size; /* i_size when submitted */
- int lock_type; /* doesn't change */
+ int flags; /* doesn't change */
unsigned blkbits; /* doesn't change */
unsigned blkfactor; /* When we're using an alignment which
is finer than the filesystem's soft
unsigned cur_page_len; /* Nr of bytes at cur_page_offset */
sector_t cur_page_block; /* Where it starts */
+ /* BIO completion state */
+ spinlock_t bio_lock; /* protects BIO fields below */
+ unsigned long refcount; /* direct_io_worker() and bios */
+ struct bio *bio_list; /* singly linked via bi_private */
+ struct task_struct *waiter; /* waiting task (NULL if none) */
+
+ /* AIO related stuff */
+ struct kiocb *iocb; /* kiocb */
+ int is_async; /* is IO async ? */
+ int io_error; /* IO error in completion path */
+ ssize_t result; /* IO result */
+
/*
* Page fetching state. These variables belong to dio_refill_pages().
*/
* Page queue. These variables belong to dio_refill_pages() and
* dio_get_page().
*/
- struct page *pages[DIO_PAGES]; /* page buffer */
unsigned head; /* next page to process */
unsigned tail; /* last valid page + 1 */
int page_errors; /* errno from get_user_pages() */
- /* BIO completion state */
- spinlock_t bio_lock; /* protects BIO fields below */
- unsigned long refcount; /* direct_io_worker() and bios */
- struct bio *bio_list; /* singly linked via bi_private */
- struct task_struct *waiter; /* waiting task (NULL if none) */
-
- /* AIO related stuff */
- struct kiocb *iocb; /* kiocb */
- int is_async; /* is IO async ? */
- int io_error; /* IO error in completion path */
- ssize_t result; /* IO result */
+ /*
+ * pages[] (and any fields placed after it) are not zeroed out at
+ * allocation time. Don't add new fields after pages[] unless you
+ * wish that they not be zeroed.
+ */
+ struct page *pages[DIO_PAGES]; /* page buffer */
};
/*
if (dio->end_io && dio->result)
dio->end_io(dio->iocb, offset, transferred,
dio->map_bh.b_private);
- if (dio->lock_type == DIO_LOCKING)
+
+ if (dio->flags & DIO_LOCKING)
/* lockdep: non-owner release */
up_read_non_owner(&dio->inode->i_alloc_sem);
map_bh->b_state = 0;
map_bh->b_size = fs_count << dio->inode->i_blkbits;
+ /*
+ * For writes inside i_size on a DIO_SKIP_HOLES filesystem we
+ * forbid block creations: only overwrites are permitted.
+ * We will return early to the caller once we see an
+ * unmapped buffer head returned, and the caller will fall
+ * back to buffered I/O.
+ *
+ * Otherwise the decision is left to the get_blocks method,
+ * which may decide to handle it or also return an unmapped
+ * buffer head.
+ */
create = dio->rw & WRITE;
- if (dio->lock_type == DIO_LOCKING) {
+ if (dio->flags & DIO_SKIP_HOLES) {
if (dio->block_in_file < (i_size_read(dio->inode) >>
dio->blkbits))
create = 0;
- } else if (dio->lock_type == DIO_NO_LOCKING) {
- create = 0;
}
- /*
- * For writes inside i_size we forbid block creations: only
- * overwrites are permitted. We fall back to buffered writes
- * at a higher level for inside-i_size block-instantiating
- * writes.
- */
ret = (*dio->get_block)(dio->inode, fs_startblk,
map_bh, create);
}
* we can let i_mutex go now that its achieved its purpose
* of protecting us from looking up uninitialized blocks.
*/
- if ((rw == READ) && (dio->lock_type == DIO_LOCKING))
+ if (rw == READ && (dio->flags & DIO_LOCKING))
mutex_unlock(&dio->inode->i_mutex);
/*
/*
* This is a library function for use by filesystem drivers.
- * The locking rules are governed by the dio_lock_type parameter.
*
- * DIO_NO_LOCKING (no locking, for raw block device access)
- * For writes, i_mutex is not held on entry; it is never taken.
+ * The locking rules are governed by the flags parameter:
+ * - if the flags value contains DIO_LOCKING we use a fancy locking
+ * scheme for dumb filesystems.
+ * For writes this function is called under i_mutex and returns with
+ * i_mutex held, for reads, i_mutex is not held on entry, but it is
+ * taken and dropped again before returning.
+ * For reads and writes i_alloc_sem is taken in shared mode and released
+ * on I/O completion (which may happen asynchronously after returning to
+ * the caller).
*
- * DIO_LOCKING (simple locking for regular files)
- * For writes we are called under i_mutex and return with i_mutex held, even
- * though it is internally dropped.
- * For reads, i_mutex is not held on entry, but it is taken and dropped before
- * returning.
- *
- * DIO_OWN_LOCKING (filesystem provides synchronisation and handling of
- * uninitialised data, allowing parallel direct readers and writers)
- * For writes we are called without i_mutex, return without it, never touch it.
- * For reads we are called under i_mutex and return with i_mutex held, even
- * though it may be internally dropped.
- *
- * Additional i_alloc_sem locking requirements described inline below.
+ * - if the flags value does NOT contain DIO_LOCKING we don't use any
+ * internal locking but rather rely on the filesystem to synchronize
+ * direct I/O reads/writes versus each other and truncate.
+ * For reads and writes both i_mutex and i_alloc_sem are not held on
+ * entry and are never taken.
*/
ssize_t
__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
- int dio_lock_type)
+ int flags)
{
int seg;
size_t size;
ssize_t retval = -EINVAL;
loff_t end = offset;
struct dio *dio;
- int release_i_mutex = 0;
- int acquire_i_mutex = 0;
if (rw & WRITE)
rw = WRITE_ODIRECT_PLUG;
}
}
- dio = kzalloc(sizeof(*dio), GFP_KERNEL);
+ dio = kmalloc(sizeof(*dio), GFP_KERNEL);
retval = -ENOMEM;
if (!dio)
goto out;
-
/*
- * For block device access DIO_NO_LOCKING is used,
- * neither readers nor writers do any locking at all
- * For regular files using DIO_LOCKING,
- * readers need to grab i_mutex and i_alloc_sem
- * writers need to grab i_alloc_sem only (i_mutex is already held)
- * For regular files using DIO_OWN_LOCKING,
- * neither readers nor writers take any locks here
+ * Believe it or not, zeroing out the page array caused a .5%
+ * performance regression in a database benchmark. So, we take
+ * care to only zero out what's needed.
*/
- dio->lock_type = dio_lock_type;
- if (dio_lock_type != DIO_NO_LOCKING) {
+ memset(dio, 0, offsetof(struct dio, pages));
+
+ dio->flags = flags;
+ if (dio->flags & DIO_LOCKING) {
/* watch out for a 0 len io from a tricksy fs */
if (rw == READ && end > offset) {
- struct address_space *mapping;
+ struct address_space *mapping =
+ iocb->ki_filp->f_mapping;
- mapping = iocb->ki_filp->f_mapping;
- if (dio_lock_type != DIO_OWN_LOCKING) {
- mutex_lock(&inode->i_mutex);
- release_i_mutex = 1;
- }
+ /* will be released by direct_io_worker */
+ mutex_lock(&inode->i_mutex);
retval = filemap_write_and_wait_range(mapping, offset,
end - 1);
if (retval) {
+ mutex_unlock(&inode->i_mutex);
kfree(dio);
goto out;
}
-
- if (dio_lock_type == DIO_OWN_LOCKING) {
- mutex_unlock(&inode->i_mutex);
- acquire_i_mutex = 1;
- }
}
- if (dio_lock_type == DIO_LOCKING)
- /* lockdep: not the owner will release it */
- down_read_non_owner(&inode->i_alloc_sem);
+ /*
+ * Will be released at I/O completion, possibly in a
+ * different thread.
+ */
+ down_read_non_owner(&inode->i_alloc_sem);
}
/*
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again for DIO_LOCKING.
- * NOTE: DIO_NO_LOCK/DIO_OWN_LOCK callers have to handle this by
- * it's own meaner.
+ *
+ * NOTE: filesystems with their own locking have to handle this
+ * on their own.
*/
- if (unlikely(retval < 0 && (rw & WRITE))) {
- loff_t isize = i_size_read(inode);
-
- if (end > isize && dio_lock_type == DIO_LOCKING)
- vmtruncate(inode, isize);
+ if (dio->flags & DIO_LOCKING) {
+ if (unlikely((rw & WRITE) && retval < 0)) {
+ loff_t isize = i_size_read(inode);
+ if (end > isize)
+ vmtruncate(inode, isize);
+ }
}
- if (rw == READ && dio_lock_type == DIO_LOCKING)
- release_i_mutex = 0;
-
out:
- if (release_i_mutex)
- mutex_unlock(&inode->i_mutex);
- else if (acquire_i_mutex)
- mutex_lock(&inode->i_mutex);
return retval;
}
EXPORT_SYMBOL(__blockdev_direct_IO);
#ifdef CONFIG_COMPAT
.compat_ioctl = ext2_compat_ioctl,
#endif
- .fsync = simple_fsync,
+ .fsync = ext2_fsync,
};
extern const struct file_operations ext2_dir_operations;
/* file.c */
+extern int ext2_fsync(struct file *file, struct dentry *dentry, int datasync);
extern const struct inode_operations ext2_file_inode_operations;
extern const struct file_operations ext2_file_operations;
extern const struct file_operations ext2_xip_file_operations;
*/
#include <linux/time.h>
+#include <linux/pagemap.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
return 0;
}
+int ext2_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+ int ret;
+ struct super_block *sb = dentry->d_inode->i_sb;
+ struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
+
+ ret = simple_fsync(file, dentry, datasync);
+ if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) {
+ /* We don't really know where the IO error happened... */
+ ext2_error(sb, __func__,
+ "detected IO error when writing metadata buffers");
+ ret = -EIO;
+ }
+ return ret;
+}
+
/*
* We have mostly NULL's here: the current defaults are ok for
* the ext2 filesystem.
.mmap = generic_file_mmap,
.open = generic_file_open,
.release = ext2_release_file,
- .fsync = simple_fsync,
+ .fsync = ext2_fsync,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
};
.mmap = xip_file_mmap,
.open = generic_file_open,
.release = ext2_release_file,
- .fsync = simple_fsync,
+ .fsync = ext2_fsync,
};
#endif
return ret;
}
+static void ext2_clear_super_error(struct super_block *sb)
+{
+ struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
+
+ if (buffer_write_io_error(sbh)) {
+ /*
+ * Oh, dear. A previous attempt to write the
+ * superblock failed. This could happen because the
+ * USB device was yanked out. Or it could happen to
+ * be a transient write error and maybe the block will
+ * be remapped. Nothing we can do but to retry the
+ * write and hope for the best.
+ */
+ printk(KERN_ERR "EXT2-fs: %s previous I/O error to "
+ "superblock detected", sb->s_id);
+ clear_buffer_write_io_error(sbh);
+ set_buffer_uptodate(sbh);
+ }
+}
+
static void ext2_commit_super (struct super_block * sb,
struct ext2_super_block * es)
{
+ ext2_clear_super_error(sb);
es->s_wtime = cpu_to_le32(get_seconds());
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
sb->s_dirt = 0;
static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
{
+ ext2_clear_super_error(sb);
es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
es->s_wtime = cpu_to_le32(get_seconds());
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
+#include <linux/time.h>
#include "fat.h"
/*
#define SECS_PER_MIN 60
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
-#define UNIX_SECS_1980 315532800L
-#if BITS_PER_LONG == 64
-#define UNIX_SECS_2108 4354819200L
-#endif
/* days between 1.1.70 and 1.1.80 (2 leap days) */
#define DAYS_DELTA (365 * 10 + 2)
/* 120 (2100 - 1980) isn't leap year */
void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec *ts,
__le16 *time, __le16 *date, u8 *time_cs)
{
- time_t second = ts->tv_sec;
- time_t day, leap_day, month, year;
+ struct tm tm;
+ time_to_tm(ts->tv_sec, sbi->options.tz_utc ? 0 :
+ -sys_tz.tz_minuteswest * 60, &tm);
- if (!sbi->options.tz_utc)
- second -= sys_tz.tz_minuteswest * SECS_PER_MIN;
-
- /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */
- if (second < UNIX_SECS_1980) {
+ /* FAT can only support year between 1980 to 2107 */
+ if (tm.tm_year < 1980 - 1900) {
*time = 0;
*date = cpu_to_le16((0 << 9) | (1 << 5) | 1);
if (time_cs)
*time_cs = 0;
return;
}
-#if BITS_PER_LONG == 64
- if (second >= UNIX_SECS_2108) {
+ if (tm.tm_year > 2107 - 1900) {
*time = cpu_to_le16((23 << 11) | (59 << 5) | 29);
*date = cpu_to_le16((127 << 9) | (12 << 5) | 31);
if (time_cs)
*time_cs = 199;
return;
}
-#endif
- day = second / SECS_PER_DAY - DAYS_DELTA;
- year = day / 365;
- leap_day = (year + 3) / 4;
- if (year > YEAR_2100) /* 2100 isn't leap year */
- leap_day--;
- if (year * 365 + leap_day > day)
- year--;
- leap_day = (year + 3) / 4;
- if (year > YEAR_2100) /* 2100 isn't leap year */
- leap_day--;
- day -= year * 365 + leap_day;
-
- if (IS_LEAP_YEAR(year) && day == days_in_year[3]) {
- month = 2;
- } else {
- if (IS_LEAP_YEAR(year) && day > days_in_year[3])
- day--;
- for (month = 1; month < 12; month++) {
- if (days_in_year[month + 1] > day)
- break;
- }
- }
- day -= days_in_year[month];
+ /* from 1900 -> from 1980 */
+ tm.tm_year -= 80;
+ /* 0~11 -> 1~12 */
+ tm.tm_mon++;
+ /* 0~59 -> 0~29(2sec counts) */
+ tm.tm_sec >>= 1;
- *time = cpu_to_le16(((second / SECS_PER_HOUR) % 24) << 11
- | ((second / SECS_PER_MIN) % 60) << 5
- | (second % SECS_PER_MIN) >> 1);
- *date = cpu_to_le16((year << 9) | (month << 5) | (day + 1));
+ *time = cpu_to_le16(tm.tm_hour << 11 | tm.tm_min << 5 | tm.tm_sec);
+ *date = cpu_to_le16(tm.tm_year << 9 | tm.tm_mon << 5 | tm.tm_mday);
if (time_cs)
*time_cs = (ts->tv_sec & 1) * 100 + ts->tv_nsec / 10000000;
}
}
return err;
}
-
*/
static struct fscache_object *fscache_objlist_lookup(loff_t *_pos)
{
- struct fscache_object *pobj, *obj, *minobj = NULL;
+ struct fscache_object *pobj, *obj = NULL, *minobj = NULL;
struct rb_node *p;
unsigned long pos;
#include <linux/magic.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
+#include <linux/bitmap.h>
/* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
{
struct quad_buffer_head qbh;
- unsigned *bits;
- unsigned i, count;
- if (!(bits = hpfs_map_4sectors(s, secno, &qbh, 4))) return 0;
- count = 0;
- for (i = 0; i < 2048 / sizeof(unsigned); i++) {
- unsigned b;
- if (!bits[i]) continue;
- for (b = bits[i]; b; b>>=1) count += b & 1;
- }
+ unsigned long *bits;
+ unsigned count;
+
+ bits = hpfs_map_4sectors(s, secno, &qbh, 4);
+ if (!bits)
+ return 0;
+ count = bitmap_weight(bits, 2048 * BITS_PER_BYTE);
hpfs_brelse4(&qbh);
return count;
}
*
* called like this: dio->get_blocks(dio->inode, fs_startblk,
* fs_count, map_bh, dio->rw == WRITE);
+ *
+ * Note that we never bother to allocate blocks here, and thus ignore the
+ * create argument.
*/
static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
- /*
- * Any write past EOF is not allowed because we'd be extending.
- */
- if (create && (iblock + max_blocks) > inode_blocks) {
- ret = -EIO;
- goto bail;
- }
-
/* This figures out the size of the next contiguous block, and
* our logical offset */
ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
goto bail;
}
- if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)) && !p_blkno && create) {
- ocfs2_error(inode->i_sb,
- "Inode %llu has a hole at block %llu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- (unsigned long long)iblock);
- ret = -EROFS;
- goto bail;
- }
-
/* We should already CoW the refcounted extent. */
BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
/*
*/
if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
map_bh(bh_result, inode->i_sb, p_blkno);
- else {
- /*
- * ocfs2_prepare_inode_for_write() should have caught
- * the case where we'd be filling a hole and triggered
- * a buffered write instead.
- */
- if (create) {
- ret = -EIO;
- mlog_errno(ret);
- goto bail;
- }
-
+ else
clear_buffer_mapped(bh_result);
- }
/* make sure we don't map more than max_blocks blocks here as
that's all the kernel will handle at this point. */
#endif
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+#ifdef CONFIG_ELF_CORE
static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
#ifdef CONFIG_FAULT_INJECTION
REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
#endif
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+#ifdef CONFIG_ELF_CORE
REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
#endif
#ifdef CONFIG_TASK_IO_ACCOUNTING
unsigned int ino;
ino = de->low_ino;
- de_get(de);
+ pde_get(de);
spin_unlock(&proc_subdir_lock);
error = -EINVAL;
inode = proc_get_inode(dir->i_sb, ino, de);
return NULL;
}
if (de)
- de_put(de);
+ pde_put(de);
return ERR_PTR(error);
}
struct proc_dir_entry *next;
/* filldir passes info to user space */
- de_get(de);
+ pde_get(de);
spin_unlock(&proc_subdir_lock);
if (filldir(dirent, de->name, de->namelen, filp->f_pos,
de->low_ino, de->mode >> 12) < 0) {
- de_put(de);
+ pde_put(de);
goto out;
}
spin_lock(&proc_subdir_lock);
filp->f_pos++;
next = de->next;
- de_put(de);
+ pde_put(de);
de = next;
} while (de);
spin_unlock(&proc_subdir_lock);
return NULL;
}
-void free_proc_entry(struct proc_dir_entry *de)
+static void free_proc_entry(struct proc_dir_entry *de)
{
unsigned int ino = de->low_ino;
kfree(de);
}
+void pde_put(struct proc_dir_entry *pde)
+{
+ if (atomic_dec_and_test(&pde->count))
+ free_proc_entry(pde);
+}
+
/*
* Remove a /proc entry and free it if it's not currently in use.
*/
WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory "
"'%s/%s', leaking at least '%s'\n", __func__,
de->parent->name, de->name, de->subdir->name);
- if (atomic_dec_and_test(&de->count))
- free_proc_entry(de);
+ pde_put(de);
}
#include "internal.h"
-struct proc_dir_entry *de_get(struct proc_dir_entry *de)
-{
- atomic_inc(&de->count);
- return de;
-}
-
-/*
- * Decrements the use count and checks for deferred deletion.
- */
-void de_put(struct proc_dir_entry *de)
-{
- if (!atomic_read(&de->count)) {
- printk("de_put: entry %s already free!\n", de->name);
- return;
- }
-
- if (atomic_dec_and_test(&de->count))
- free_proc_entry(de);
-}
-
-/*
- * Decrement the use count of the proc_dir_entry.
- */
static void proc_delete_inode(struct inode *inode)
{
struct proc_dir_entry *de;
/* Let go of any associated proc directory entry */
de = PROC_I(inode)->pde;
if (de)
- de_put(de);
+ pde_put(de);
if (PROC_I(inode)->sysctl)
sysctl_head_put(PROC_I(inode)->sysctl);
clear_inode(inode);
}
unlock_new_inode(inode);
} else
- de_put(de);
+ pde_put(de);
return inode;
}
s->s_op = &proc_sops;
s->s_time_gran = 1;
- de_get(&proc_root);
+ pde_get(&proc_root);
root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root);
if (!root_inode)
goto out_no_root;
out_no_root:
printk("proc_read_super: get root inode failed\n");
iput(root_inode);
- de_put(&proc_root);
+ pde_put(&proc_root);
return -ENOMEM;
}
extern const struct file_operations proc_net_operations;
extern const struct inode_operations proc_net_inode_operations;
-void free_proc_entry(struct proc_dir_entry *de);
-
void proc_init_inodecache(void);
static inline struct pid *proc_pid(struct inode *inode)
int task_statm(struct mm_struct *, int *, int *, int *, int *);
void task_mem(struct seq_file *, struct mm_struct *);
-struct proc_dir_entry *de_get(struct proc_dir_entry *de);
-void de_put(struct proc_dir_entry *de);
+static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
+{
+ atomic_inc(&pde->count);
+ return pde;
+}
+void pde_put(struct proc_dir_entry *pde);
extern struct vfsmount *proc_mnt;
int proc_fill_super(struct super_block *);
#include <linux/bitops.h>
#include "qnx4.h"
-#if 0
-int qnx4_new_block(struct super_block *sb)
-{
- return 0;
-}
-#endif /* 0 */
-
static void count_bits(register const char *bmPart, register int size,
int *const tf)
{
}
do {
b = *bmPart++;
- if ((b & 1) == 0)
- tot++;
- if ((b & 2) == 0)
- tot++;
- if ((b & 4) == 0)
- tot++;
- if ((b & 8) == 0)
- tot++;
- if ((b & 16) == 0)
- tot++;
- if ((b & 32) == 0)
- tot++;
- if ((b & 64) == 0)
- tot++;
- if ((b & 128) == 0)
- tot++;
+ tot += 8 - hweight8(b);
size--;
} while (size != 0);
*tf = tot;
result = sb_getblk(inode->i_sb, nr);
return result;
}
- if (!create) {
- return NULL;
- }
-#if 0
- tmp = qnx4_new_block(inode->i_sb);
- if (!tmp) {
- return NULL;
- }
- result = sb_getblk(inode->i_sb, tmp);
- if (tst) {
- qnx4_free_block(inode->i_sb, tmp);
- brelse(result);
- goto repeat;
- }
- tst = tmp;
-#endif
- inode->i_ctime = CURRENT_TIME_SEC;
- mark_inode_dirty(inode);
- return result;
+ return NULL;
}
struct buffer_head *qnx4_bread(struct inode *inode, int block, int create)
if ( phys ) {
// logical block is before EOF
map_bh(bh, inode->i_sb, phys);
- } else if ( create ) {
- // to be done.
}
return 0;
}
reiserfs-objs := bitmap.o do_balan.o namei.o inode.o file.o dir.o fix_node.o \
super.o prints.o objectid.o lbalance.o ibalance.o stree.o \
hashes.o tail_conversion.o journal.o resize.o \
- item_ops.o ioctl.o procfs.o xattr.o lock.o
+ item_ops.o ioctl.o xattr.o lock.o
+
+ifeq ($(CONFIG_REISERFS_PROC_INFO),y)
+reiserfs-objs += procfs.o
+endif
ifeq ($(CONFIG_REISERFS_FS_XATTR),y)
reiserfs-objs += xattr_user.o xattr_trusted.o
#include <linux/init.h>
#include <linux/proc_fs.h>
-#ifdef CONFIG_REISERFS_PROC_INFO
-
/*
* LOCKING:
*
return 0;
}
-int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
- int count, int *eof, void *data)
-{
- *start = buffer;
- *eof = 1;
- return 0;
-}
-
#define SF( x ) ( r -> x )
#define SFP( x ) SF( s_proc_info_data.x )
#define SFPL( x ) SFP( x[ level ] )
return 0;
}
-struct proc_dir_entry *reiserfs_proc_register_global(char *name,
- read_proc_t * func)
-{
- return (proc_info_root) ? create_proc_read_entry(name, 0,
- proc_info_root,
- func, NULL) : NULL;
-}
-
-void reiserfs_proc_unregister_global(const char *name)
-{
- remove_proc_entry(name, proc_info_root);
-}
-
int reiserfs_proc_info_global_init(void)
{
if (proc_info_root == NULL) {
}
return 0;
}
-
-/* REISERFS_PROC_INFO */
-#else
-
-int reiserfs_proc_info_init(struct super_block *sb)
-{
- return 0;
-}
-int reiserfs_proc_info_done(struct super_block *sb)
-{
- return 0;
-}
-
-struct proc_dir_entry *reiserfs_proc_register_global(char *name,
- read_proc_t * func)
-{
- return NULL;
-}
-
-void reiserfs_proc_unregister_global(const char *name)
-{;
-}
-
-int reiserfs_proc_info_global_init(void)
-{
- return 0;
-}
-int reiserfs_proc_info_global_done(void)
-{
- return 0;
-}
-
-int reiserfs_global_version_in_proc(char *buffer, char **start,
- off_t offset,
- int count, int *eof, void *data)
-{
- return 0;
-}
-
-/* REISERFS_PROC_INFO */
-#endif
-
/*
* Revision 1.1.8.2 2001/07/15 17:08:42 god
* . use get_super() in procfs.c
}
reiserfs_proc_info_global_init();
- reiserfs_proc_register_global("version",
- reiserfs_global_version_in_proc);
ret = register_filesystem(&reiserfs_fs_type);
return 0;
}
- reiserfs_proc_unregister_global("version");
reiserfs_proc_info_global_done();
destroy_inodecache();
static void __exit exit_reiserfs_fs(void)
{
- reiserfs_proc_unregister_global("version");
reiserfs_proc_info_global_done();
unregister_filesystem(&reiserfs_fs_type);
destroy_inodecache();
return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
}
-ino_t ufs_inode_by_name(struct inode *dir, struct dentry *dentry)
+ino_t ufs_inode_by_name(struct inode *dir, struct qstr *qstr)
{
ino_t res = 0;
struct ufs_dir_entry *de;
struct page *page;
- de = ufs_find_entry(dir, dentry, &page);
+ de = ufs_find_entry(dir, qstr, &page);
if (de) {
res = fs32_to_cpu(dir->i_sb, de->d_ino);
ufs_put_page(page);
* (as a parameter - res_dir). Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*/
-struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct dentry *dentry,
+struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct qstr *qstr,
struct page **res_page)
{
struct super_block *sb = dir->i_sb;
- const char *name = dentry->d_name.name;
- int namelen = dentry->d_name.len;
+ const char *name = qstr->name;
+ int namelen = qstr->len;
unsigned reclen = UFS_DIR_REC_LEN(namelen);
unsigned long start, n;
unsigned long npages = ufs_dir_pages(dir);
return ERR_PTR(-ENAMETOOLONG);
lock_kernel();
- ino = ufs_inode_by_name(dir, dentry);
+ ino = ufs_inode_by_name(dir, &dentry->d_name);
if (ino) {
inode = ufs_iget(dir->i_sb, ino);
if (IS_ERR(inode)) {
struct page *page;
int err = -ENOENT;
- de = ufs_find_entry(dir, dentry, &page);
+ de = ufs_find_entry(dir, &dentry->d_name, &page);
if (!de)
goto out;
struct ufs_dir_entry *old_de;
int err = -ENOENT;
- old_de = ufs_find_entry(old_dir, old_dentry, &old_page);
+ old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_de)
goto out;
goto out_dir;
err = -ENOENT;
- new_de = ufs_find_entry(new_dir, new_dentry, &new_page);
+ new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
if (!new_de)
goto out_dir;
inode_inc_link_count(old_inode);
*/
+#include <linux/exportfs.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include "swab.h"
#include "util.h"
+static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation)
+{
+ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+ struct inode *inode;
+
+ if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg)
+ return ERR_PTR(-ESTALE);
+
+ inode = ufs_iget(sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ if (generation && inode->i_generation != generation) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
+ return inode;
+}
+
+static struct dentry *ufs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ufs_nfs_get_inode);
+}
+
+static struct dentry *ufs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ return generic_fh_to_parent(sb, fid, fh_len, fh_type, ufs_nfs_get_inode);
+}
+
+static struct dentry *ufs_get_parent(struct dentry *child)
+{
+ struct qstr dot_dot = {
+ .name = "..",
+ .len = 2,
+ };
+ ino_t ino;
+
+ ino = ufs_inode_by_name(child->d_inode, &dot_dot);
+ if (!ino)
+ return ERR_PTR(-ENOENT);
+ return d_obtain_alias(ufs_iget(child->d_inode->i_sb, ino));
+}
+
+static const struct export_operations ufs_export_ops = {
+ .fh_to_dentry = ufs_fh_to_dentry,
+ .fh_to_parent = ufs_fh_to_parent,
+ .get_parent = ufs_get_parent,
+};
+
#ifdef CONFIG_UFS_DEBUG
/*
* Print contents of ufs_super_block, useful for debugging
* Read ufs_super_block into internal data structures
*/
sb->s_op = &ufs_super_ops;
+ sb->s_export_op = &ufs_export_ops;
sb->dq_op = NULL; /***/
sb->s_magic = fs32_to_cpu(sb, usb3->fs_magic);
/* dir.c */
extern const struct inode_operations ufs_dir_inode_operations;
extern int ufs_add_link (struct dentry *, struct inode *);
-extern ino_t ufs_inode_by_name(struct inode *, struct dentry *);
+extern ino_t ufs_inode_by_name(struct inode *, struct qstr *);
extern int ufs_make_empty(struct inode *, struct inode *);
-extern struct ufs_dir_entry *ufs_find_entry(struct inode *, struct dentry *, struct page **);
+extern struct ufs_dir_entry *ufs_find_entry(struct inode *, struct qstr *, struct page **);
extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *);
extern int ufs_empty_dir (struct inode *);
extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **);
bdev = xfs_find_bdev_for_inode(XFS_I(inode));
- if (rw == WRITE) {
- iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
- ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
- bdev, iov, offset, nr_segs,
- xfs_get_blocks_direct,
- xfs_end_io_direct);
- } else {
- iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
- ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
- bdev, iov, offset, nr_segs,
- xfs_get_blocks_direct,
- xfs_end_io_direct);
- }
+ iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
+ IOMAP_UNWRITTEN : IOMAP_READ);
+
+ ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
+ offset, nr_segs,
+ xfs_get_blocks_direct,
+ xfs_end_io_direct);
if (unlikely(ret != -EIOCBQUEUED && iocb->private))
xfs_destroy_ioend(iocb->private);
extern int gpio_export(unsigned gpio, bool direction_may_change);
extern int gpio_export_link(struct device *dev, const char *name,
unsigned gpio);
+extern int gpio_sysfs_set_active_low(unsigned gpio, int value);
extern void gpio_unexport(unsigned gpio);
#endif /* CONFIG_GPIO_SYSFS */
return -ENOSYS;
}
+static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
+{
+ return -ENOSYS;
+}
+
static inline void gpio_unexport(unsigned gpio)
{
}
} ki_obj;
__u64 ki_user_data; /* user's data for completion */
- wait_queue_t ki_wait;
loff_t ki_pos;
void *private;
(x)->ki_dtor = NULL; \
(x)->ki_obj.tsk = tsk; \
(x)->ki_user_data = 0; \
- init_wait((&(x)->ki_wait)); \
} while (0)
#define AIO_RING_MAGIC 0xa10a10a1
static inline void exit_aio(struct mm_struct *mm) { }
#endif /* CONFIG_AIO */
-#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
-
static inline struct kiocb *list_kiocb(struct list_head *h)
{
return list_entry(h, struct kiocb, ki_list);
* bitmap_empty(src, nbits) Are all bits zero in *src?
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_set(dst, pos, nbits) Set specified bit area
+ * bitmap_clear(dst, pos, nbits) Clear specified bit area
+ * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
const unsigned long *bitmap2, int bits);
extern int __bitmap_weight(const unsigned long *bitmap, int bits);
+extern void bitmap_set(unsigned long *map, int i, int len);
+extern void bitmap_clear(unsigned long *map, int start, int nr);
+extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask);
+
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
int lock_type);
enum {
- DIO_LOCKING = 1, /* need locking between buffered and direct access */
- DIO_NO_LOCKING, /* bdev; no locking at all between buffered/direct */
- DIO_OWN_LOCKING, /* filesystem locks buffered and direct internally */
+ /* need locking between buffered and direct access */
+ DIO_LOCKING = 0x01,
+
+ /* filesystem does not support filling holes */
+ DIO_SKIP_HOLES = 0x02,
};
static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
dio_iodone_t end_io)
{
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_block, end_io, DIO_LOCKING);
+ nr_segs, get_block, end_io,
+ DIO_LOCKING | DIO_SKIP_HOLES);
}
static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
dio_iodone_t end_io)
{
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_block, end_io, DIO_NO_LOCKING);
-}
-
-static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
- struct inode *inode, struct block_device *bdev, const struct iovec *iov,
- loff_t offset, unsigned long nr_segs, get_block_t get_block,
- dio_iodone_t end_io)
-{
- return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_block, end_io, DIO_OWN_LOCKING);
+ nr_segs, get_block, end_io, 0);
}
#endif
return -EINVAL;
}
+static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -EINVAL;
+}
static inline void gpio_unexport(unsigned gpio)
{
extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
unsigned long shift,
unsigned long boundary_size);
-extern void iommu_area_reserve(unsigned long *map, unsigned long i, int len);
extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr,
unsigned long shift,
unsigned long boundary_size,
unsigned long align_mask);
-extern void iommu_area_free(unsigned long *map, unsigned long start,
- unsigned int nr);
extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
unsigned long io_page_size);
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
resource_size_t resource_alignment(struct resource *res);
-static inline resource_size_t resource_size(struct resource *res)
+static inline resource_size_t resource_size(const struct resource *res)
{
return res->end - res->start + 1;
}
-static inline unsigned long resource_type(struct resource *res)
+static inline unsigned long resource_type(const struct resource *res)
{
return res->flags & IORESOURCE_TYPE_BITS;
}
/* default values */
#define DFLT_QUEUESMAX 256 /* max number of message queues */
#define DFLT_MSGMAX 10 /* max number of messages in each queue */
-#define HARD_MSGMAX (131072/sizeof(void *))
+#define HARD_MSGMAX (32768*sizeof(void *)/4)
#define DFLT_MSGSIZEMAX 8192 /* max message size */
#else
static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
unsigned long long *crash_size, unsigned long long *crash_base);
+int crash_shrink_memory(unsigned long new_size);
+size_t crash_get_memory_size(void);
#else /* !CONFIG_KEXEC */
struct pt_regs;
#else /* !CONFIG_KSM */
-static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, int advice, unsigned long *vm_flags)
-{
- return 0;
-}
-
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
return 0;
return 0;
}
+#ifdef CONFIG_MMU
+static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, int advice, unsigned long *vm_flags)
+{
+ return 0;
+}
+
static inline struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
}
+#endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */
#endif /* __LINUX_KSM_H */
extern void mem_cgroup_del_lru(struct page *page);
extern void mem_cgroup_move_lists(struct page *page,
enum lru_list from, enum lru_list to);
+
+/* For coalescing uncharge for reducing memcg' overhead*/
+extern void mem_cgroup_uncharge_start(void);
+extern void mem_cgroup_uncharge_end(void);
+
extern void mem_cgroup_uncharge_page(struct page *page);
extern void mem_cgroup_uncharge_cache_page(struct page *page);
extern int mem_cgroup_shmem_charge_fallback(struct page *page,
}
extern bool mem_cgroup_oom_called(struct task_struct *task);
-void mem_cgroup_update_mapped_file_stat(struct page *page, int val);
+void mem_cgroup_update_file_mapped(struct page *page, int val);
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask, int nid,
int zid);
{
}
+static inline void mem_cgroup_uncharge_start(void)
+{
+}
+
+static inline void mem_cgroup_uncharge_end(void)
+{
+}
+
static inline void mem_cgroup_uncharge_page(struct page *page)
{
}
{
}
-static inline void mem_cgroup_update_mapped_file_stat(struct page *page,
+static inline void mem_cgroup_update_file_mapped(struct page *page,
int val)
{
}
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/nodemask.h>
struct zonelist;
struct notifier_block;
extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
-extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order);
+extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+ int order, nodemask_t *mask);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
{ return test_and_clear_bit(PCG_##lname, &pc->flags); }
+TESTPCGFLAG(Locked, LOCK)
+
/* Cache flag is set only once (at allocation) */
TESTPCGFLAG(Cache, CACHE)
CLEARPCGFLAG(Cache, CACHE)
bit_spin_lock(PCG_LOCK, &pc->flags);
}
-static inline int trylock_page_cgroup(struct page_cgroup *pc)
-{
- return bit_spin_trylock(PCG_LOCK, &pc->flags);
-}
-
static inline void unlock_page_cgroup(struct page_cgroup *pc)
{
bit_spin_unlock(PCG_LOCK, &pc->flags);
{
return child->real_parent != child->parent;
}
-static inline void ptrace_link(struct task_struct *child,
- struct task_struct *new_parent)
-{
- if (unlikely(child->ptrace))
- __ptrace_link(child, new_parent);
-}
+
static inline void ptrace_unlink(struct task_struct *child)
{
if (unlikely(child->ptrace))
INIT_LIST_HEAD(&child->ptraced);
child->parent = child->real_parent;
child->ptrace = 0;
- if (unlikely(ptrace)) {
+ if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) {
child->ptrace = current->ptrace;
- ptrace_link(child, current->parent);
+ __ptrace_link(child, current->parent);
}
}
}
#endif /* arch_has_block_step */
+#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
+extern void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs, siginfo_t *info);
+#else
+static inline void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs, siginfo_t *info)
+{
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+}
+#endif
+
#ifndef arch_ptrace_stop_needed
/**
* arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
struct treepath *path, struct reiserfs_dir_entry *de);
struct dentry *reiserfs_get_parent(struct dentry *);
-/* procfs.c */
-
-#if defined( CONFIG_PROC_FS ) && defined( CONFIG_REISERFS_PROC_INFO )
-#define REISERFS_PROC_INFO
-#else
-#undef REISERFS_PROC_INFO
-#endif
+#ifdef CONFIG_REISERFS_PROC_INFO
int reiserfs_proc_info_init(struct super_block *sb);
int reiserfs_proc_info_done(struct super_block *sb);
-struct proc_dir_entry *reiserfs_proc_register_global(char *name,
- read_proc_t * func);
-void reiserfs_proc_unregister_global(const char *name);
int reiserfs_proc_info_global_init(void);
int reiserfs_proc_info_global_done(void);
-int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
- int count, int *eof, void *data);
-
-#if defined( REISERFS_PROC_INFO )
#define PROC_EXP( e ) e
PROC_INFO_ADD( sb, free_at[ ( level ) ], B_FREE_SPACE( bh ) ); \
PROC_INFO_ADD( sb, items_at[ ( level ) ], B_NR_ITEMS( bh ) )
#else
+static inline int reiserfs_proc_info_init(struct super_block *sb)
+{
+ return 0;
+}
+
+static inline int reiserfs_proc_info_done(struct super_block *sb)
+{
+ return 0;
+}
+
+static inline int reiserfs_proc_info_global_init(void)
+{
+ return 0;
+}
+
+static inline int reiserfs_proc_info_global_done(void)
+{
+ return 0;
+}
+
#define PROC_EXP( e )
#define VOID_V ( ( void ) 0 )
#define PROC_INFO_MAX( sb, field, value ) VOID_V
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
unsigned long stack_start;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
+ struct memcg_batch_info {
+ int do_batch; /* incremented when batch uncharge started */
+ struct mem_cgroup *memcg; /* target memcg of uncharge */
+ unsigned long bytes; /* uncharged usage */
+ unsigned long memsw_bytes; /* uncharged mem+swap usage */
+ } memcg_batch;
+#endif
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
extern int do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int, struct task_struct *);
-extern void force_sig_specific(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
extern void zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
#define SEND_SIG_PRIV ((struct siginfo *) 1)
#define SEND_SIG_FORCED ((struct siginfo *) 2)
-static inline int is_si_special(const struct siginfo *info)
-{
- return info <= SEND_SIG_FORCED;
-}
-
/*
* True if we are on the alternate signal stack.
*/
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ struct list_head sem_pending; /* pending single-sop operations */
};
/* One sem_array data structure for each set of semaphores in the system. */
struct sem *sem_base; /* ptr to first semaphore in array */
struct list_head sem_pending; /* pending operations to be processed */
struct list_head list_id; /* undo requests on this array */
- unsigned long sem_nsems; /* no. of semaphores in array */
+ int sem_nsems; /* no. of semaphores in array */
+ int complex_count; /* pending complex operations */
};
/* One queue for each sleeping process in the system. */
struct sem_queue {
+ struct list_head simple_list; /* queue of pending operations */
struct list_head list; /* queue of pending operations */
struct task_struct *sleeper; /* this process */
struct sem_undo *undo; /* undo structure */
#define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11)
#define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15)
+#define SM501_SYSCTRL_2D_ENGINE_STATUS (1<<19)
+
/* miscellaneous control */
#define SM501_MISC_CONTROL (0x000004)
--- /dev/null
+/*
+ * timb_gpio.h timberdale FPGA GPIO driver, platform data definition
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _LINUX_TIMB_GPIO_H
+#define _LINUX_TIMB_GPIO_H
+
+/**
+ * struct timbgpio_platform_data - Platform data of the Timberdale GPIO driver
+ * @gpio_base The number of the first GPIO pin, set to -1 for
+ * dynamic number allocation.
+ * @nr_pins Number of pins that is supported by the hardware (1-32)
+ * @irq_base If IRQ is supported by the hardware, this is the base
+ * number of IRQ:s. One IRQ per pin will be used. Set to
+ * -1 if IRQ:s is not supported.
+ */
+struct timbgpio_platform_data {
+ int gpio_base;
+ int nr_pins;
+ int irq_base;
+};
+
+#endif
*/
static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
{
+ if (step) {
+ siginfo_t info;
+ user_single_step_siginfo(current, regs, &info);
+ force_sig_info(SIGTRAP, &info, current);
+ return;
+ }
+
ptrace_report_syscall(regs);
}
const char manu_name[10];
void *controller_data;
const char type[25];
+ void (*panel_power_ctrl)(int);
};
struct lcd_ctrl_config {
void msg_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &msg_ids(ns), freeque);
+ idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
}
#endif
void sem_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &sem_ids(ns), freeary);
+ idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
}
#endif
key_t key = params->key;
int nsems = params->u.nsems;
int semflg = params->flg;
+ int i;
if (!nsems)
return -EINVAL;
ns->used_sems += nsems;
sma->sem_base = (struct sem *) &sma[1];
+
+ for (i = 0; i < nsems; i++)
+ INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+
+ sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
return result;
}
-/* Go through the pending queue for the indicated semaphore
- * looking for tasks that can be completed.
+/*
+ * Wake up a process waiting on the sem queue with a given error.
+ * The queue is invalid (may not be accessed) after the function returns.
*/
-static void update_queue (struct sem_array * sma)
+static void wake_up_sem_queue(struct sem_queue *q, int error)
{
- int error;
- struct sem_queue * q;
+ /*
+ * Hold preempt off so that we don't get preempted and have the
+ * wakee busy-wait until we're scheduled back on. We're holding
+ * locks here so it may not strictly be needed, however if the
+ * locks become preemptible then this prevents such a problem.
+ */
+ preempt_disable();
+ q->status = IN_WAKEUP;
+ wake_up_process(q->sleeper);
+ /* hands-off: q can disappear immediately after writing q->status. */
+ smp_wmb();
+ q->status = error;
+ preempt_enable();
+}
+
+static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
+{
+ list_del(&q->list);
+ if (q->nsops == 1)
+ list_del(&q->simple_list);
+ else
+ sma->complex_count--;
+}
+
+
+/**
+ * update_queue(sma, semnum): Look for tasks that can be completed.
+ * @sma: semaphore array.
+ * @semnum: semaphore that was modified.
+ *
+ * update_queue must be called after a semaphore in a semaphore array
+ * was modified. If multiple semaphore were modified, then @semnum
+ * must be set to -1.
+ */
+static void update_queue(struct sem_array *sma, int semnum)
+{
+ struct sem_queue *q;
+ struct list_head *walk;
+ struct list_head *pending_list;
+ int offset;
+
+ /* if there are complex operations around, then knowing the semaphore
+ * that was modified doesn't help us. Assume that multiple semaphores
+ * were modified.
+ */
+ if (sma->complex_count)
+ semnum = -1;
+
+ if (semnum == -1) {
+ pending_list = &sma->sem_pending;
+ offset = offsetof(struct sem_queue, list);
+ } else {
+ pending_list = &sma->sem_base[semnum].sem_pending;
+ offset = offsetof(struct sem_queue, simple_list);
+ }
+
+again:
+ walk = pending_list->next;
+ while (walk != pending_list) {
+ int error, alter;
+
+ q = (struct sem_queue *)((char *)walk - offset);
+ walk = walk->next;
+
+ /* If we are scanning the single sop, per-semaphore list of
+ * one semaphore and that semaphore is 0, then it is not
+ * necessary to scan the "alter" entries: simple increments
+ * that affect only one entry succeed immediately and cannot
+ * be in the per semaphore pending queue, and decrements
+ * cannot be successful if the value is already 0.
+ */
+ if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
+ q->alter)
+ break;
- q = list_entry(sma->sem_pending.next, struct sem_queue, list);
- while (&q->list != &sma->sem_pending) {
error = try_atomic_semop(sma, q->sops, q->nsops,
q->undo, q->pid);
/* Does q->sleeper still need to sleep? */
- if (error <= 0) {
- struct sem_queue *n;
-
- /*
- * Continue scanning. The next operation
- * that must be checked depends on the type of the
- * completed operation:
- * - if the operation modified the array, then
- * restart from the head of the queue and
- * check for threads that might be waiting
- * for semaphore values to become 0.
- * - if the operation didn't modify the array,
- * then just continue.
- * The order of list_del() and reading ->next
- * is crucial: In the former case, the list_del()
- * must be done first [because we might be the
- * first entry in ->sem_pending], in the latter
- * case the list_del() must be done last
- * [because the list is invalid after the list_del()]
- */
- if (q->alter) {
- list_del(&q->list);
- n = list_entry(sma->sem_pending.next,
- struct sem_queue, list);
- } else {
- n = list_entry(q->list.next, struct sem_queue,
- list);
- list_del(&q->list);
- }
-
- /* wake up the waiting thread */
- q->status = IN_WAKEUP;
+ if (error > 0)
+ continue;
- wake_up_process(q->sleeper);
- /* hands-off: q will disappear immediately after
- * writing q->status.
- */
- smp_wmb();
- q->status = error;
- q = n;
- } else {
- q = list_entry(q->list.next, struct sem_queue, list);
- }
+ unlink_queue(sma, q);
+
+ /*
+ * The next operation that must be checked depends on the type
+ * of the completed operation:
+ * - if the operation modified the array, then restart from the
+ * head of the queue and check for threads that might be
+ * waiting for the new semaphore values.
+ * - if the operation didn't modify the array, then just
+ * continue.
+ */
+ alter = q->alter;
+ wake_up_sem_queue(q, error);
+ if (alter && !error)
+ goto again;
}
}
/* Wake up all pending processes and let them fail with EIDRM. */
list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
- list_del(&q->list);
-
- q->status = IN_WAKEUP;
- wake_up_process(q->sleeper); /* doesn't sleep */
- smp_wmb();
- q->status = -EIDRM; /* hands-off q */
+ unlink_queue(sma, q);
+ wake_up_sem_queue(q, -EIDRM);
}
/* Remove the semaphore set from the IDR */
static int semctl_nolock(struct ipc_namespace *ns, int semid,
int cmd, int version, union semun arg)
{
- int err = -EINVAL;
+ int err;
struct sem_array *sma;
switch(cmd) {
default:
return -EINVAL;
}
- return err;
out_unlock:
sem_unlock(sma);
return err;
}
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
- update_queue(sma);
+ update_queue(sma, -1);
err = 0;
goto out_unlock;
}
curr->sempid = task_tgid_vnr(current);
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
- update_queue(sma);
+ update_queue(sma, semnum);
err = 0;
goto out_unlock;
}
return 0;
}
-static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
+static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
{
- struct sem_undo *walk;
+ struct sem_undo *un;
- list_for_each_entry_rcu(walk, &ulp->list_proc, list_proc) {
- if (walk->semid == semid)
- return walk;
+ list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
+ if (un->semid == semid)
+ return un;
}
return NULL;
}
+static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
+{
+ struct sem_undo *un;
+
+ assert_spin_locked(&ulp->lock);
+
+ un = __lookup_undo(ulp, semid);
+ if (un) {
+ list_del_rcu(&un->list_proc);
+ list_add_rcu(&un->list_proc, &ulp->list_proc);
+ }
+ return un;
+}
+
/**
* find_alloc_undo - Lookup (and if not present create) undo array
* @ns: namespace
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
if (alter && error == 0)
- update_queue (sma);
+ update_queue(sma, (nsops == 1) ? sops[0].sem_num : -1);
+
goto out_unlock_free;
}
else
list_add(&queue.list, &sma->sem_pending);
+ if (nsops == 1) {
+ struct sem *curr;
+ curr = &sma->sem_base[sops->sem_num];
+
+ if (alter)
+ list_add_tail(&queue.simple_list, &curr->sem_pending);
+ else
+ list_add(&queue.simple_list, &curr->sem_pending);
+ } else {
+ INIT_LIST_HEAD(&queue.simple_list);
+ sma->complex_count++;
+ }
+
queue.status = -EINTR;
queue.sleeper = current;
current->state = TASK_INTERRUPTIBLE;
*/
if (timeout && jiffies_left == 0)
error = -EAGAIN;
- list_del(&queue.list);
+ unlink_queue(sma, &queue);
out_unlock_free:
sem_unlock(sma);
if (IS_ERR(sma))
continue;
- un = lookup_undo(ulp, semid);
+ un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
}
sma->sem_otime = get_seconds();
/* maybe some queued-up processes were waiting for this */
- update_queue(sma);
+ update_queue(sma, -1);
sem_unlock(sma);
call_rcu(&un->rcu, free_un);
struct sem_array *sma = it;
return seq_printf(s,
- "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
+ "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
sma->sem_perm.key,
sma->sem_perm.id,
sma->sem_perm.mode,
void shm_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
+ idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
}
#endif
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+ p->memcg_batch.do_batch = 0;
+ p->memcg_batch.memcg = NULL;
+#endif
p->bts = NULL;
p->sas_ss_sp = p->sas_ss_size = 0;
/*
- * Syscall tracing should be turned off in the child regardless
- * of CLONE_PTRACE.
+ * Syscall tracing and stepping should be turned off in the
+ * child regardless of CLONE_PTRACE.
*/
+ user_disable_single_step(p);
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#include <linux/cpu.h>
#include <linux/console.h>
#include <linux/vmalloc.h>
+#include <linux/swap.h>
#include <asm/page.h>
#include <asm/uaccess.h>
}
}
+size_t crash_get_memory_size(void)
+{
+ size_t size;
+ mutex_lock(&kexec_mutex);
+ size = crashk_res.end - crashk_res.start + 1;
+ mutex_unlock(&kexec_mutex);
+ return size;
+}
+
+static void free_reserved_phys_range(unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
+ ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
+ init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
+ free_page((unsigned long)__va(addr));
+ totalram_pages++;
+ }
+}
+
+int crash_shrink_memory(unsigned long new_size)
+{
+ int ret = 0;
+ unsigned long start, end;
+
+ mutex_lock(&kexec_mutex);
+
+ if (kexec_crash_image) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ start = crashk_res.start;
+ end = crashk_res.end;
+
+ if (new_size >= end - start + 1) {
+ ret = -EINVAL;
+ if (new_size == end - start + 1)
+ ret = 0;
+ goto unlock;
+ }
+
+ start = roundup(start, PAGE_SIZE);
+ end = roundup(start + new_size, PAGE_SIZE);
+
+ free_reserved_phys_range(end, crashk_res.end);
+
+ if (start == end) {
+ crashk_res.end = end;
+ release_resource(&crashk_res);
+ } else
+ crashk_res.end = end - 1;
+
+unlock:
+ mutex_unlock(&kexec_mutex);
+ return ret;
+}
+
static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
size_t data_len)
{
}
KERNEL_ATTR_RO(kexec_crash_loaded);
+static ssize_t kexec_crash_size_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%zu\n", crash_get_memory_size());
+}
+static ssize_t kexec_crash_size_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long cnt;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &cnt))
+ return -EINVAL;
+
+ ret = crash_shrink_memory(cnt);
+ return ret < 0 ? ret : count;
+}
+KERNEL_ATTR_RW(kexec_crash_size);
+
static ssize_t vmcoreinfo_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
#ifdef CONFIG_KEXEC
&kexec_loaded_attr.attr,
&kexec_crash_loaded_attr.attr,
+ &kexec_crash_size_attr.attr,
&vmcoreinfo_attr.attr,
#endif
NULL
* installing it:
*/
spin_lock_irq(&pidmap_lock);
- if (map->page)
- kfree(page);
- else
+ if (!map->page) {
map->page = page;
+ page = NULL;
+ }
spin_unlock_irq(&pidmap_lock);
+ kfree(page);
if (unlikely(!map->page))
break;
}
for (type = 0; type < PIDTYPE_MAX; ++type)
INIT_HLIST_HEAD(&pid->tasks[type]);
+ upid = pid->numbers + ns->level;
spin_lock_irq(&pidmap_lock);
- for (i = ns->level; i >= 0; i--) {
- upid = &pid->numbers[i];
+ for ( ; upid >= pid->numbers; --upid)
hlist_add_head_rcu(&upid->pid_chain,
&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
- }
spin_unlock_irq(&pidmap_lock);
out:
relay_consume_bytes(rbuf, buf->private);
}
-static struct pipe_buf_operations relay_pipe_buf_ops = {
+static const struct pipe_buf_operations relay_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
*/
info->si_signo = sig;
info->si_errno = 0;
- info->si_code = 0;
+ info->si_code = SI_USER;
info->si_pid = 0;
info->si_uid = 0;
}
return 1;
}
+static inline int is_si_special(const struct siginfo *info)
+{
+ return info <= SEND_SIG_FORCED;
+}
+
+static inline bool si_fromuser(const struct siginfo *info)
+{
+ return info == SEND_SIG_NOINFO ||
+ (!is_si_special(info) && SI_FROMUSER(info));
+}
+
/*
* Bad permissions for sending the signal
* - the caller must hold at least the RCU read lock
if (!valid_signal(sig))
return -EINVAL;
- if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
+ if (!si_fromuser(info))
return 0;
error = audit_signal_info(sig, t); /* Let audit system see the signal */
int from_ancestor_ns = 0;
#ifdef CONFIG_PID_NS
- if (!is_si_special(info) && SI_FROMUSER(info) &&
- task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0)
- from_ancestor_ns = 1;
+ from_ancestor_ns = si_fromuser(info) &&
+ !task_pid_nr_ns(current, task_active_pid_ns(t));
#endif
return __send_signal(sig, info, t, group, from_ancestor_ns);
return ret;
}
-void
-force_sig_specific(int sig, struct task_struct *t)
-{
- force_sig_info(sig, SEND_SIG_FORCED, t);
-}
-
/*
* Nuke all other threads in the group.
*/
goto out_unlock;
}
pcred = __task_cred(p);
- if ((info == SEND_SIG_NOINFO ||
- (!is_si_special(info) && SI_FROMUSER(info))) &&
+ if (si_fromuser(info) &&
euid != pcred->suid && euid != pcred->uid &&
uid != pcred->suid && uid != pcred->uid) {
ret = -EPERM;
for (;;) {
struct k_sigaction *ka;
-
- if (unlikely(signal->group_stop_count > 0) &&
- do_signal_stop(0))
- goto relock;
-
/*
* Tracing can induce an artifical signal and choose sigaction.
* The return value in @signr determines the default action,
if (unlikely(signr != 0))
ka = return_ka;
else {
+ if (unlikely(signal->group_stop_count > 0) &&
+ do_signal_stop(0))
+ goto relock;
+
signr = dequeue_signal(current, ¤t->blocked,
info);
* source time
*/
sample.offset =
- ktime_to_ns(ktime_add(end, start)) / 2 -
+ (ktime_to_ns(end) + ktime_to_ns(start)) / 2 -
ts;
/* simple insertion sort based on duration */
__free_page(spd->pages[idx]);
}
-static struct pipe_buf_operations tracing_pipe_buf_ops = {
+static const struct pipe_buf_operations tracing_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
}
/* Pipe buffer operations for a buffer. */
-static struct pipe_buf_operations buffer_pipe_buf_ops = {
+static const struct pipe_buf_operations buffer_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
}
EXPORT_SYMBOL(__bitmap_weight);
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
+
+void bitmap_set(unsigned long *map, int start, int nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const int size = start + nr;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ nr -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+EXPORT_SYMBOL(bitmap_set);
+
+void bitmap_clear(unsigned long *map, int start, int nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const int size = start + nr;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ nr -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
+EXPORT_SYMBOL(bitmap_clear);
+
+/*
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @align_mask: Alignment mask for zero area
+ *
+ * The @align_mask should be one less than a power of 2; the effect is that
+ * the bit offset of all zero areas this function finds is multiples of that
+ * power of 2. A @align_mask of 0 means no alignment is required.
+ */
+unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
+{
+ unsigned long index, end, i;
+again:
+ index = find_next_zero_bit(map, size, start);
+
+ /* Align allocation */
+ index = __ALIGN_MASK(index, align_mask);
+
+ end = index + nr;
+ if (end > size)
+ return end;
+ i = find_next_bit(map, end, index);
+ if (i < end) {
+ start = i + 1;
+ goto again;
+ }
+ return index;
+}
+EXPORT_SYMBOL(bitmap_find_next_zero_area);
+
/*
* Bitmap printing & parsing functions: first version by Bill Irwin,
* second version by Paul Jackson, third by Joe Korty.
*/
#include <linux/module.h>
+#include <linux/bitmap.h>
#include <linux/genalloc.h>
struct gen_pool_chunk *chunk;
unsigned long addr, flags;
int order = pool->min_alloc_order;
- int nbits, bit, start_bit, end_bit;
+ int nbits, start_bit, end_bit;
if (size == 0)
return 0;
end_bit -= nbits + 1;
spin_lock_irqsave(&chunk->lock, flags);
- bit = -1;
- while (bit + 1 < end_bit) {
- bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
- if (bit >= end_bit)
- break;
-
- start_bit = bit;
- if (nbits > 1) {
- bit = find_next_bit(chunk->bits, bit + nbits,
- bit + 1);
- if (bit - start_bit < nbits)
- continue;
- }
-
- addr = chunk->start_addr +
- ((unsigned long)start_bit << order);
- while (nbits--)
- __set_bit(start_bit++, chunk->bits);
+ start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
+ nbits, 0);
+ if (start_bit >= end_bit) {
spin_unlock_irqrestore(&chunk->lock, flags);
- read_unlock(&pool->lock);
- return addr;
+ continue;
}
+
+ addr = chunk->start_addr + ((unsigned long)start_bit << order);
+
+ bitmap_set(chunk->bits, start_bit, nbits);
spin_unlock_irqrestore(&chunk->lock, flags);
+ read_unlock(&pool->lock);
+ return addr;
}
read_unlock(&pool->lock);
return 0;
*/
#include <linux/module.h>
-#include <linux/bitops.h>
-
-static unsigned long find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask)
-{
- unsigned long index, end, i;
-again:
- index = find_next_zero_bit(map, size, start);
-
- /* Align allocation */
- index = (index + align_mask) & ~align_mask;
-
- end = index + nr;
- if (end >= size)
- return -1;
- for (i = index; i < end; i++) {
- if (test_bit(i, map)) {
- start = i+1;
- goto again;
- }
- }
- return index;
-}
-
-void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
-{
- unsigned long end = i + len;
- while (i < end) {
- __set_bit(i, map);
- i++;
- }
-}
+#include <linux/bitmap.h>
int iommu_is_span_boundary(unsigned int index, unsigned int nr,
unsigned long shift,
unsigned long align_mask)
{
unsigned long index;
+
+ /* We don't want the last of the limit */
+ size -= 1;
again:
- index = find_next_zero_area(map, size, start, nr, align_mask);
- if (index != -1) {
+ index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+ if (index < size) {
if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
/* we could do more effectively */
start = index + 1;
goto again;
}
- iommu_area_reserve(map, index, nr);
+ bitmap_set(map, index, nr);
+ return index;
}
- return index;
+ return -1;
}
EXPORT_SYMBOL(iommu_area_alloc);
-void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
-{
- unsigned long end = start + nr;
-
- while (start < end) {
- __clear_bit(start, map);
- start++;
- }
-}
-EXPORT_SYMBOL(iommu_area_free);
-
unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
unsigned long io_page_size)
{
dma_mask = hwdev->coherent_dma_mask;
ret = (void *)__get_free_pages(flags, order);
- if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
+ if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
/*
* The allocated memory isn't reachable by the device.
*/
dev_addr = swiotlb_virt_to_bus(hwdev, ret);
/* Confirm address can be DMA'd by device */
- if (dev_addr + size > dma_mask) {
+ if (dev_addr + size - 1 > dma_mask) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)dma_mask,
(unsigned long long)dev_addr);
#include <linux/vmalloc.h>
#include <linux/mm_inline.h>
#include <linux/page_cgroup.h>
+#include <linux/cpu.h>
#include "internal.h"
#include <asm/uaccess.h>
#define do_swap_account (0)
#endif
-static DEFINE_MUTEX(memcg_tasklist); /* can be hold under cgroup_mutex */
#define SOFTLIMIT_EVENTS_THRESH (1000)
/*
*/
MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
- MEM_CGROUP_STAT_MAPPED_FILE, /* # of pages charged as file rss */
+ MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */
static void mem_cgroup_get(struct mem_cgroup *mem);
static void mem_cgroup_put(struct mem_cgroup *mem);
static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
+static void drain_all_stock_async(void);
static struct mem_cgroup_per_zone *
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
task_unlock(task);
if (!curr)
return 0;
- if (curr->use_hierarchy)
+ /*
+ * We should check use_hierarchy of "mem" not "curr". Because checking
+ * use_hierarchy of "curr" here make this function true if hierarchy is
+ * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
+ * hierarchy(even if use_hierarchy is disabled in "mem").
+ */
+ if (mem->use_hierarchy)
ret = css_is_ancestor(&curr->css, &mem->css);
else
ret = (curr == mem);
static char memcg_name[PATH_MAX];
int ret;
- if (!memcg)
+ if (!memcg || !p)
return;
victim = mem_cgroup_select_victim(root_mem);
if (victim == root_mem) {
loop++;
+ if (loop >= 1)
+ drain_all_stock_async();
if (loop >= 2) {
/*
* If we have not been able to reclaim
* Currently used to update mapped file statistics, but the routine can be
* generalized to update other statistics as well.
*/
-void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
+void mem_cgroup_update_file_mapped(struct page *page, int val)
{
struct mem_cgroup *mem;
struct mem_cgroup_stat *stat;
int cpu;
struct page_cgroup *pc;
- if (!page_is_file_cache(page))
- return;
-
pc = lookup_page_cgroup(page);
if (unlikely(!pc))
return;
stat = &mem->stat;
cpustat = &stat->cpustat[cpu];
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val);
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, val);
done:
unlock_page_cgroup(pc);
}
/*
+ * size of first charge trial. "32" comes from vmscan.c's magic value.
+ * TODO: maybe necessary to use big numbers in big irons.
+ */
+#define CHARGE_SIZE (32 * PAGE_SIZE)
+struct memcg_stock_pcp {
+ struct mem_cgroup *cached; /* this never be root cgroup */
+ int charge;
+ struct work_struct work;
+};
+static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
+static atomic_t memcg_drain_count;
+
+/*
+ * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
+ * from local stock and true is returned. If the stock is 0 or charges from a
+ * cgroup which is not current target, returns false. This stock will be
+ * refilled.
+ */
+static bool consume_stock(struct mem_cgroup *mem)
+{
+ struct memcg_stock_pcp *stock;
+ bool ret = true;
+
+ stock = &get_cpu_var(memcg_stock);
+ if (mem == stock->cached && stock->charge)
+ stock->charge -= PAGE_SIZE;
+ else /* need to call res_counter_charge */
+ ret = false;
+ put_cpu_var(memcg_stock);
+ return ret;
+}
+
+/*
+ * Returns stocks cached in percpu to res_counter and reset cached information.
+ */
+static void drain_stock(struct memcg_stock_pcp *stock)
+{
+ struct mem_cgroup *old = stock->cached;
+
+ if (stock->charge) {
+ res_counter_uncharge(&old->res, stock->charge);
+ if (do_swap_account)
+ res_counter_uncharge(&old->memsw, stock->charge);
+ }
+ stock->cached = NULL;
+ stock->charge = 0;
+}
+
+/*
+ * This must be called under preempt disabled or must be called by
+ * a thread which is pinned to local cpu.
+ */
+static void drain_local_stock(struct work_struct *dummy)
+{
+ struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
+ drain_stock(stock);
+}
+
+/*
+ * Cache charges(val) which is from res_counter, to local per_cpu area.
+ * This will be consumed by consumt_stock() function, later.
+ */
+static void refill_stock(struct mem_cgroup *mem, int val)
+{
+ struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
+
+ if (stock->cached != mem) { /* reset if necessary */
+ drain_stock(stock);
+ stock->cached = mem;
+ }
+ stock->charge += val;
+ put_cpu_var(memcg_stock);
+}
+
+/*
+ * Tries to drain stocked charges in other cpus. This function is asynchronous
+ * and just put a work per cpu for draining localy on each cpu. Caller can
+ * expects some charges will be back to res_counter later but cannot wait for
+ * it.
+ */
+static void drain_all_stock_async(void)
+{
+ int cpu;
+ /* This function is for scheduling "drain" in asynchronous way.
+ * The result of "drain" is not directly handled by callers. Then,
+ * if someone is calling drain, we don't have to call drain more.
+ * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
+ * there is a race. We just do loose check here.
+ */
+ if (atomic_read(&memcg_drain_count))
+ return;
+ /* Notify other cpus that system-wide "drain" is running */
+ atomic_inc(&memcg_drain_count);
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
+ schedule_work_on(cpu, &stock->work);
+ }
+ put_online_cpus();
+ atomic_dec(&memcg_drain_count);
+ /* We don't wait for flush_work */
+}
+
+/* This is a synchronous drain interface. */
+static void drain_all_stock_sync(void)
+{
+ /* called when force_empty is called */
+ atomic_inc(&memcg_drain_count);
+ schedule_on_each_cpu(drain_local_stock);
+ atomic_dec(&memcg_drain_count);
+}
+
+static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
+ unsigned long action,
+ void *hcpu)
+{
+ int cpu = (unsigned long)hcpu;
+ struct memcg_stock_pcp *stock;
+
+ if (action != CPU_DEAD)
+ return NOTIFY_OK;
+ stock = &per_cpu(memcg_stock, cpu);
+ drain_stock(stock);
+ return NOTIFY_OK;
+}
+
+/*
* Unlike exported interface, "oom" parameter is added. if oom==true,
* oom-killer can be invoked.
*/
struct mem_cgroup *mem, *mem_over_limit;
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct res_counter *fail_res;
+ int csize = CHARGE_SIZE;
if (unlikely(test_thread_flag(TIF_MEMDIE))) {
/* Don't account this! */
return 0;
VM_BUG_ON(css_is_removed(&mem->css));
+ if (mem_cgroup_is_root(mem))
+ goto done;
while (1) {
int ret = 0;
unsigned long flags = 0;
- if (mem_cgroup_is_root(mem))
- goto done;
- ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
+ if (consume_stock(mem))
+ goto charged;
+
+ ret = res_counter_charge(&mem->res, csize, &fail_res);
if (likely(!ret)) {
if (!do_swap_account)
break;
- ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
- &fail_res);
+ ret = res_counter_charge(&mem->memsw, csize, &fail_res);
if (likely(!ret))
break;
/* mem+swap counter fails */
- res_counter_uncharge(&mem->res, PAGE_SIZE);
+ res_counter_uncharge(&mem->res, csize);
flags |= MEM_CGROUP_RECLAIM_NOSWAP;
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
memsw);
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
res);
+ /* reduce request size and retry */
+ if (csize > PAGE_SIZE) {
+ csize = PAGE_SIZE;
+ continue;
+ }
if (!(gfp_mask & __GFP_WAIT))
goto nomem;
if (!nr_retries--) {
if (oom) {
- mutex_lock(&memcg_tasklist);
mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
- mutex_unlock(&memcg_tasklist);
record_last_oom(mem_over_limit);
}
goto nomem;
}
}
+ if (csize > PAGE_SIZE)
+ refill_stock(mem, csize - PAGE_SIZE);
+charged:
/*
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
* if they exceeds softlimit.
}
/*
+ * Somemtimes we have to undo a charge we got by try_charge().
+ * This function is for that and do uncharge, put css's refcnt.
+ * gotten by try_charge().
+ */
+static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
+{
+ if (!mem_cgroup_is_root(mem)) {
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
+ if (do_swap_account)
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+ }
+ css_put(&mem->css);
+}
+
+/*
* A helper function to get mem_cgroup from ID. must be called under
* rcu_read_lock(). The caller must check css_is_removed() or some if
* it's concern. (dropping refcnt from swap can be called against removed
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
- if (!mem_cgroup_is_root(mem)) {
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- if (do_swap_account)
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
- }
- css_put(&mem->css);
+ mem_cgroup_cancel_charge(mem);
return;
}
}
/**
- * mem_cgroup_move_account - move account of the page
+ * __mem_cgroup_move_account - move account of the page
* @pc: page_cgroup of the page.
* @from: mem_cgroup which the page is moved from.
* @to: mem_cgroup which the page is moved to. @from != @to.
*
* The caller must confirm following.
* - page is not on LRU (isolate_page() is useful.)
- *
- * returns 0 at success,
- * returns -EBUSY when lock is busy or "pc" is unstable.
+ * - the pc is locked, used, and ->mem_cgroup points to @from.
*
* This function does "uncharge" from old cgroup but doesn't do "charge" to
* new cgroup. It should be done by a caller.
*/
-static int mem_cgroup_move_account(struct page_cgroup *pc,
+static void __mem_cgroup_move_account(struct page_cgroup *pc,
struct mem_cgroup *from, struct mem_cgroup *to)
{
- struct mem_cgroup_per_zone *from_mz, *to_mz;
- int nid, zid;
- int ret = -EBUSY;
struct page *page;
int cpu;
struct mem_cgroup_stat *stat;
VM_BUG_ON(from == to);
VM_BUG_ON(PageLRU(pc->page));
-
- nid = page_cgroup_nid(pc);
- zid = page_cgroup_zid(pc);
- from_mz = mem_cgroup_zoneinfo(from, nid, zid);
- to_mz = mem_cgroup_zoneinfo(to, nid, zid);
-
- if (!trylock_page_cgroup(pc))
- return ret;
-
- if (!PageCgroupUsed(pc))
- goto out;
-
- if (pc->mem_cgroup != from)
- goto out;
+ VM_BUG_ON(!PageCgroupLocked(pc));
+ VM_BUG_ON(!PageCgroupUsed(pc));
+ VM_BUG_ON(pc->mem_cgroup != from);
if (!mem_cgroup_is_root(from))
res_counter_uncharge(&from->res, PAGE_SIZE);
mem_cgroup_charge_statistics(from, pc, false);
page = pc->page;
- if (page_is_file_cache(page) && page_mapped(page)) {
+ if (page_mapped(page) && !PageAnon(page)) {
cpu = smp_processor_id();
/* Update mapped_file data for mem_cgroup "from" */
stat = &from->stat;
cpustat = &stat->cpustat[cpu];
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
-1);
/* Update mapped_file data for mem_cgroup "to" */
stat = &to->stat;
cpustat = &stat->cpustat[cpu];
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
1);
}
css_get(&to->css);
pc->mem_cgroup = to;
mem_cgroup_charge_statistics(to, pc, true);
- ret = 0;
-out:
- unlock_page_cgroup(pc);
/*
* We charges against "to" which may not have any tasks. Then, "to"
* can be under rmdir(). But in current implementation, caller of
* this function is just force_empty() and it's garanteed that
* "to" is never removed. So, we don't check rmdir status here.
*/
+}
+
+/*
+ * check whether the @pc is valid for moving account and call
+ * __mem_cgroup_move_account()
+ */
+static int mem_cgroup_move_account(struct page_cgroup *pc,
+ struct mem_cgroup *from, struct mem_cgroup *to)
+{
+ int ret = -EINVAL;
+ lock_page_cgroup(pc);
+ if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
+ __mem_cgroup_move_account(pc, from, to);
+ ret = 0;
+ }
+ unlock_page_cgroup(pc);
return ret;
}
if (!pcg)
return -EINVAL;
+ ret = -EBUSY;
+ if (!get_page_unless_zero(page))
+ goto out;
+ if (isolate_lru_page(page))
+ goto put;
parent = mem_cgroup_from_cont(pcg);
-
-
ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page);
if (ret || !parent)
- return ret;
-
- if (!get_page_unless_zero(page)) {
- ret = -EBUSY;
- goto uncharge;
- }
-
- ret = isolate_lru_page(page);
-
- if (ret)
- goto cancel;
+ goto put_back;
ret = mem_cgroup_move_account(pc, child, parent);
-
+ if (!ret)
+ css_put(&parent->css); /* drop extra refcnt by try_charge() */
+ else
+ mem_cgroup_cancel_charge(parent); /* does css_put */
+put_back:
putback_lru_page(page);
- if (!ret) {
- put_page(page);
- /* drop extra refcnt by try_charge() */
- css_put(&parent->css);
- return 0;
- }
-
-cancel:
+put:
put_page(page);
-uncharge:
- /* drop extra refcnt by try_charge() */
- css_put(&parent->css);
- /* uncharge if move fails */
- if (!mem_cgroup_is_root(parent)) {
- res_counter_uncharge(&parent->res, PAGE_SIZE);
- if (do_swap_account)
- res_counter_uncharge(&parent->memsw, PAGE_SIZE);
- }
+out:
return ret;
}
return;
if (!mem)
return;
- if (!mem_cgroup_is_root(mem)) {
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- if (do_swap_account)
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
- }
- css_put(&mem->css);
+ mem_cgroup_cancel_charge(mem);
}
+static void
+__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
+{
+ struct memcg_batch_info *batch = NULL;
+ bool uncharge_memsw = true;
+ /* If swapout, usage of swap doesn't decrease */
+ if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
+ uncharge_memsw = false;
+ /*
+ * do_batch > 0 when unmapping pages or inode invalidate/truncate.
+ * In those cases, all pages freed continously can be expected to be in
+ * the same cgroup and we have chance to coalesce uncharges.
+ * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
+ * because we want to do uncharge as soon as possible.
+ */
+ if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE))
+ goto direct_uncharge;
+
+ batch = ¤t->memcg_batch;
+ /*
+ * In usual, we do css_get() when we remember memcg pointer.
+ * But in this case, we keep res->usage until end of a series of
+ * uncharges. Then, it's ok to ignore memcg's refcnt.
+ */
+ if (!batch->memcg)
+ batch->memcg = mem;
+ /*
+ * In typical case, batch->memcg == mem. This means we can
+ * merge a series of uncharges to an uncharge of res_counter.
+ * If not, we uncharge res_counter ony by one.
+ */
+ if (batch->memcg != mem)
+ goto direct_uncharge;
+ /* remember freed charge and uncharge it later */
+ batch->bytes += PAGE_SIZE;
+ if (uncharge_memsw)
+ batch->memsw_bytes += PAGE_SIZE;
+ return;
+direct_uncharge:
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
+ if (uncharge_memsw)
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+ return;
+}
/*
* uncharge if !page_mapped(page)
break;
}
- if (!mem_cgroup_is_root(mem)) {
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- if (do_swap_account &&
- (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
- }
+ if (!mem_cgroup_is_root(mem))
+ __do_uncharge(mem, ctype);
if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
mem_cgroup_swap_statistics(mem, true);
mem_cgroup_charge_statistics(mem, pc, false);
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
}
+/*
+ * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
+ * In that cases, pages are freed continuously and we can expect pages
+ * are in the same memcg. All these calls itself limits the number of
+ * pages freed at once, then uncharge_start/end() is called properly.
+ * This may be called prural(2) times in a context,
+ */
+
+void mem_cgroup_uncharge_start(void)
+{
+ current->memcg_batch.do_batch++;
+ /* We can do nest. */
+ if (current->memcg_batch.do_batch == 1) {
+ current->memcg_batch.memcg = NULL;
+ current->memcg_batch.bytes = 0;
+ current->memcg_batch.memsw_bytes = 0;
+ }
+}
+
+void mem_cgroup_uncharge_end(void)
+{
+ struct memcg_batch_info *batch = ¤t->memcg_batch;
+
+ if (!batch->do_batch)
+ return;
+
+ batch->do_batch--;
+ if (batch->do_batch) /* If stacked, do nothing. */
+ return;
+
+ if (!batch->memcg)
+ return;
+ /*
+ * This "batch->memcg" is valid without any css_get/put etc...
+ * bacause we hide charges behind us.
+ */
+ if (batch->bytes)
+ res_counter_uncharge(&batch->memcg->res, batch->bytes);
+ if (batch->memsw_bytes)
+ res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
+ /* forget this pointer (for sanity check) */
+ batch->memcg = NULL;
+}
+
#ifdef CONFIG_SWAP
/*
* called after __delete_from_swap_cache() and drop "page" account.
unsigned long long val)
{
int retry_count;
- int progress;
u64 memswlimit;
int ret = 0;
int children = mem_cgroup_count_children(memcg);
if (!ret)
break;
- progress = mem_cgroup_hierarchical_reclaim(memcg, NULL,
- GFP_KERNEL,
+ mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
MEM_CGROUP_RECLAIM_SHRINK);
curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
/* Usage is reduced ? */
goto out;
/* This is for making all *used* pages to be on LRU. */
lru_add_drain_all();
+ drain_all_stock_sync();
ret = 0;
for_each_node_state(node, N_HIGH_MEMORY) {
for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
val += idx_val;
mem_cgroup_get_recursive_idx_stat(mem,
MEM_CGROUP_STAT_SWAPOUT, &idx_val);
+ val += idx_val;
val <<= PAGE_SHIFT;
} else
val = res_counter_read_u64(&mem->memsw, name);
enum {
MCS_CACHE,
MCS_RSS,
- MCS_MAPPED_FILE,
+ MCS_FILE_MAPPED,
MCS_PGPGIN,
MCS_PGPGOUT,
MCS_SWAP,
s->stat[MCS_CACHE] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
s->stat[MCS_RSS] += val * PAGE_SIZE;
- val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
- s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_FILE_MAPPED);
+ s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
s->stat[MCS_PGPGIN] += val;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
/* root ? */
if (cont->parent == NULL) {
+ int cpu;
enable_swap_cgroup();
parent = NULL;
root_mem_cgroup = mem;
if (mem_cgroup_soft_limit_tree_init())
goto free_out;
+ for_each_possible_cpu(cpu) {
+ struct memcg_stock_pcp *stock =
+ &per_cpu(memcg_stock, cpu);
+ INIT_WORK(&stock->work, drain_local_stock);
+ }
+ hotcpu_notifier(memcg_stock_cpu_callback, 0);
} else {
parent = mem_cgroup_from_cont(cont->parent);
struct task_struct *p,
bool threadgroup)
{
- mutex_lock(&memcg_tasklist);
/*
* FIXME: It's better to move charges of this process from old
* memcg to new memcg. But it's just on TODO-List now.
*/
- mutex_unlock(&memcg_tasklist);
}
struct cgroup_subsys mem_cgroup_subsys = {
details = NULL;
BUG_ON(addr >= end);
+ mem_cgroup_uncharge_start();
tlb_start_vma(tlb, vma);
pgd = pgd_offset(vma->vm_mm, addr);
do {
zap_work, details);
} while (pgd++, addr = next, (addr != end && *zap_work > 0));
tlb_end_vma(tlb, vma);
+ mem_cgroup_uncharge_end();
return addr;
}
/*
* Determine the type of allocation constraint.
*/
-static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
- gfp_t gfp_mask)
-{
#ifdef CONFIG_NUMA
+static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
+ gfp_t gfp_mask, nodemask_t *nodemask)
+{
struct zone *zone;
struct zoneref *z;
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
- nodemask_t nodes = node_states[N_HIGH_MEMORY];
- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
- if (cpuset_zone_allowed_softwall(zone, gfp_mask))
- node_clear(zone_to_nid(zone), nodes);
- else
- return CONSTRAINT_CPUSET;
+ /*
+ * Reach here only when __GFP_NOFAIL is used. So, we should avoid
+ * to kill current.We have to random task kill in this case.
+ * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
+ */
+ if (gfp_mask & __GFP_THISNODE)
+ return CONSTRAINT_NONE;
- if (!nodes_empty(nodes))
+ /*
+ * The nodemask here is a nodemask passed to alloc_pages(). Now,
+ * cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy
+ * feature. mempolicy is an only user of nodemask here.
+ * check mempolicy's nodemask contains all N_HIGH_MEMORY
+ */
+ if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask))
return CONSTRAINT_MEMORY_POLICY;
-#endif
+
+ /* Check this allocation failure is caused by cpuset's wall function */
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
+ high_zoneidx, nodemask)
+ if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
+ return CONSTRAINT_CPUSET;
return CONSTRAINT_NONE;
}
+#else
+static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
+ gfp_t gfp_mask, nodemask_t *nodemask)
+{
+ return CONSTRAINT_NONE;
+}
+#endif
/*
* Simple selection loop. We chose the process with the highest
} while_each_thread(g, p);
}
-static void dump_header(gfp_t gfp_mask, int order, struct mem_cgroup *mem)
+static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
+ struct mem_cgroup *mem)
{
pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
"oom_adj=%d\n",
cpuset_print_task_mems_allowed(current);
task_unlock(current);
dump_stack();
- mem_cgroup_print_oom_info(mem, current);
+ mem_cgroup_print_oom_info(mem, p);
show_mem();
if (sysctl_oom_dump_tasks)
dump_tasks(mem);
}
+#define K(x) ((x) << (PAGE_SHIFT-10))
+
/*
* Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO
* flag though it's unlikely that we select a process with CAP_SYS_RAW_IO
return;
}
+ task_lock(p);
if (!p->mm) {
WARN_ON(1);
- printk(KERN_WARNING "tried to kill an mm-less task!\n");
+ printk(KERN_WARNING "tried to kill an mm-less task %d (%s)!\n",
+ task_pid_nr(p), p->comm);
+ task_unlock(p);
return;
}
if (verbose)
- printk(KERN_ERR "Killed process %d (%s)\n",
- task_pid_nr(p), p->comm);
+ printk(KERN_ERR "Killed process %d (%s) "
+ "vsz:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
+ task_pid_nr(p), p->comm,
+ K(p->mm->total_vm),
+ K(get_mm_counter(p->mm, anon_rss)),
+ K(get_mm_counter(p->mm, file_rss)));
+ task_unlock(p);
/*
* We give our sacrificial lamb high priority and access to
struct task_struct *c;
if (printk_ratelimit())
- dump_header(gfp_mask, order, mem);
+ dump_header(p, gfp_mask, order, mem);
/*
* If the task is already exiting, don't alarm the sysadmin or kill
/* Found nothing?!?! Either we hang forever, or we panic. */
if (!p) {
read_unlock(&tasklist_lock);
- dump_header(gfp_mask, order, NULL);
+ dump_header(NULL, gfp_mask, order, NULL);
panic("Out of memory and no killable processes...\n");
}
* OR try to be smart about which process to kill. Note that we
* don't have to be perfect here, we just have to be good.
*/
-void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
+void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+ int order, nodemask_t *nodemask)
{
unsigned long freed = 0;
enum oom_constraint constraint;
return;
if (sysctl_panic_on_oom == 2) {
- dump_header(gfp_mask, order, NULL);
+ dump_header(NULL, gfp_mask, order, NULL);
panic("out of memory. Compulsory panic_on_oom is selected.\n");
}
* Check if there were limitations on the allocation (only relevant for
* NUMA) that may require different handling.
*/
- constraint = constrained_alloc(zonelist, gfp_mask);
+ constraint = constrained_alloc(zonelist, gfp_mask, nodemask);
read_lock(&tasklist_lock);
switch (constraint) {
case CONSTRAINT_NONE:
if (sysctl_panic_on_oom) {
- dump_header(gfp_mask, order, NULL);
+ dump_header(NULL, gfp_mask, order, NULL);
panic("out of memory. panic_on_oom is selected\n");
}
/* Fall-through */
if (page)
goto out;
- /* The OOM killer will not help higher order allocs */
- if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
- goto out;
-
+ if (!(gfp_mask & __GFP_NOFAIL)) {
+ /* The OOM killer will not help higher order allocs */
+ if (order > PAGE_ALLOC_COSTLY_ORDER)
+ goto out;
+ /*
+ * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
+ * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
+ * The caller should handle page allocation failure by itself if
+ * it specifies __GFP_THISNODE.
+ * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
+ */
+ if (gfp_mask & __GFP_THISNODE)
+ goto out;
+ }
/* Exhausted what can be done so it's blamo time */
- out_of_memory(zonelist, gfp_mask, order);
+ out_of_memory(zonelist, gfp_mask, order, nodemask);
out:
clear_zonelist_oom(zonelist, gfp_mask);
if (percpu_pagelist_fraction)
setup_pagelist_highmark(zone_pcp(zone, cpu),
- (zone->present_pages / percpu_pagelist_fraction));
+ (zone->present_pages / percpu_pagelist_fraction));
}
return 0;
{
if (atomic_inc_and_test(&page->_mapcount)) {
__inc_zone_page_state(page, NR_FILE_MAPPED);
- mem_cgroup_update_mapped_file_stat(page, 1);
+ mem_cgroup_update_file_mapped(page, 1);
}
}
__dec_zone_page_state(page, NR_ANON_PAGES);
} else {
__dec_zone_page_state(page, NR_FILE_MAPPED);
+ mem_cgroup_update_file_mapped(page, -1);
}
- mem_cgroup_update_mapped_file_stat(page, -1);
/*
* It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap
pagevec_release(&pvec);
break;
}
+ mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
unlock_page(page);
}
pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
}
}
EXPORT_SYMBOL(truncate_inode_pages_range);
pagevec_init(&pvec, 0);
while (next <= end &&
pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+ mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t index;
break;
}
pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
cond_resched();
}
return ret;
while (next <= end && !wrapped &&
pagevec_lookup(&pvec, mapping, next,
min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+ mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t page_index;
unlock_page(page);
}
pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
cond_resched();
}
return ret;
/* Pipe buffer operations for a socket. */
-static struct pipe_buf_operations sock_pipe_buf_ops = {
+static const struct pipe_buf_operations sock_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,