repo_name
stringlengths 4
116
| path
stringlengths 3
942
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
tstuckey/WirelessCautionLights | comm_api/commapi_linux/examples/ParallelBlackBox/BufferSize.java | 5316 | /*
* @(#)BufferSize.java 1.5 00/05/04 SMI
*
* Author: Tom Corson
*
* Copyright 2003 Sun Microsystems, Inc. All rights reserved.
* SUN PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*
* Sun grants you ("Licensee") a non-exclusive, royalty free, license
* to use, modify and redistribute this software in source and binary
* code form, provided that i) this copyright notice and license appear
* on all copies of the software; and ii) Licensee does not utilize the
* software in a manner which is disparaging to Sun.
*
* This software is provided "AS IS," without a warranty of any kind.
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN AND
* ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY
* LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE
* SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS
* BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT,
* INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES,
* HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING
* OUT OF THE USE OF OR INABILITY TO USE SOFTWARE, EVEN IF SUN HAS BEEN
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
*
* This software is not designed or intended for use in on-line control
* of aircraft, air traffic, aircraft navigation or aircraft
* communications; or in the design, construction, operation or
* maintenance of any nuclear facility. Licensee represents and
* warrants that it will not use or redistribute the Software for such
* purposes.
*/
import java.awt.Panel;
import java.awt.Label;
import java.awt.TextField;
import java.awt.BorderLayout;
import java.awt.event.MouseListener;
import java.awt.event.MouseEvent;
import java.awt.event.ActionListener;
import java.awt.event.ActionEvent;
import javax.comm.ParallelPort;
/**
* Class declaration
*
*
* @author
* @version 1.5, 05/04/00
*/
public class BufferSize extends Panel implements MouseListener,
ActionListener {
private int value, defaultValue;
private Label label;
private TextField data;
private ParallelPort port = null;
private boolean inputBuffer;
/**
* Constructor declaration
*
*
* @param size
* @param port
* @param inputBuffer
*
* @see
*/
public BufferSize(int size, ParallelPort port, boolean inputBuffer) {
super();
this.setPort(port);
this.inputBuffer = inputBuffer;
this.setLayout(new BorderLayout());
this.label = new Label("Buffer Size");
this.label.addMouseListener(this);
this.add("West", this.label);
this.data = new TextField(new Integer(defaultValue).toString(), size);
this.data.addActionListener(this);
this.add("East", this.data);
this.showValue();
this.defaultValue = this.value;
}
/**
* Method declaration
*
*
* @param port
*
* @see
*/
public void setPort(ParallelPort port) {
this.port = port;
}
/**
* Method declaration
*
*
* @return
*
* @see
*/
public int getValue() {
if (this.port != null) {
/*
* Get the buffer size.
*/
if (inputBuffer) {
this.value = port.getInputBufferSize();
} else {
this.value = port.getOutputBufferSize();
}
return this.value;
} else {
return (0);
}
}
/**
* Method declaration
*
*
* @see
*/
public void showValue() {
this.data.setText(new Integer(this.getValue()).toString());
}
/**
* Method declaration
*
*
* @param val
*
* @see
*/
public void setValue(int val) {
if (this.port != null) {
/*
* Set the new buffer size.
*/
if (inputBuffer) {
port.setInputBufferSize(val);
} else {
port.setOutputBufferSize(val);
}
}
this.showValue();
}
/**
* Method declaration
*
*
* @param val
*
* @see
*/
public void setDefaultValue(int val) {
this.defaultValue = val;
}
/**
* Method declaration
*
*
* @param e
*
* @see
*/
public void actionPerformed(ActionEvent e) {
String s = e.getActionCommand();
try {
Integer newValue = new Integer(s);
this.setValue(newValue.intValue());
} catch (NumberFormatException ex) {
System.out.println("Bad value = " + e.getActionCommand());
this.showValue();
}
}
/**
* Method declaration
*
*
* @param e
*
* @see
*/
public void mouseClicked(MouseEvent e) {}
/**
* Method declaration
*
*
* @param e
*
* @see
*/
public void mouseEntered(MouseEvent e) {}
/**
* Method declaration
*
*
* @param e
*
* @see
*/
public void mouseExited(MouseEvent e) {}
/**
* Method declaration
*
*
* @param e
*
* @see
*/
public void mousePressed(MouseEvent e) {
this.setValue(this.defaultValue);
}
/**
* Method declaration
*
*
* @param e
*
* @see
*/
public void mouseReleased(MouseEvent e) {}
}
| gpl-2.0 |
Nitrate/Nitrate | src/tcms/profiles/models.py | 2379 | # -*- coding: utf-8 -*-
from django.db import models
class Profiles(models.Model):
userid = models.AutoField(primary_key=True)
login_name = models.CharField(max_length=255, unique=True)
cryptpassword = models.CharField(max_length=128, blank=True)
realname = models.CharField(max_length=255)
disabledtext = models.TextField()
disable_mail = models.IntegerField(default=0)
mybugslink = models.IntegerField()
extern_id = models.IntegerField(blank=True)
class Meta:
db_table = "profiles"
def get_groups(self):
q = UserGroupMap.objects.filter(user__userid=self.userid)
q = q.select_related()
groups = [assoc.group for assoc in q.all()]
return groups
class Groups(models.Model):
name = models.CharField(unique=True, max_length=255)
description = models.TextField()
isbuggroup = models.IntegerField()
userregexp = models.TextField()
isactive = models.IntegerField()
class Meta:
db_table = "groups"
class UserGroupMap(models.Model):
user = models.ForeignKey(Profiles, on_delete=models.CASCADE) # user_id
# (actually has two primary keys)
group = models.ForeignKey(Groups, on_delete=models.CASCADE) # group_id
isbless = models.IntegerField(default=0)
grant_type = models.IntegerField(default=0)
class Meta:
db_table = "user_group_map"
unique_together = ("user", "group")
#
# Extra information for users
#
class UserProfile(models.Model):
user = models.OneToOneField(
"auth.User", unique=True, related_name="profile", on_delete=models.CASCADE
)
phone_number = models.CharField(blank=True, default="", max_length=128)
url = models.URLField(blank=True, default="")
im = models.CharField(blank=True, default="", max_length=128)
im_type_id = models.IntegerField(blank=True, default=1, null=True)
address = models.TextField(blank=True, default="")
notes = models.TextField(blank=True, default="")
class Meta:
db_table = "tcms_user_profiles"
def get_im(self):
from .forms import IM_CHOICES
if not self.im:
return None
for c in IM_CHOICES:
if self.im_type_id == c[0]:
return "[{}] {}".format(c[1], self.im)
@classmethod
def get_user_profile(cls, user):
return cls.objects.get(user=user)
| gpl-2.0 |
brianmwaters/medusa | doc/medusa-rdp.html | 2270 | <HTML>
<TITLE>Foofus Networking Services - Medusa::RDP</TITLE>
<BODY BGCOLOR="#999999">
<H1>Medusa Parallel Network Login Auditor :: RDP</H1>
<I>JoMo-Kun / jmk "AT" foofus "DOT" net</I><BR>
<HR>
<P>
The RDP module tests accounts against the Microsoft Remote Desktop Protocol
(RDP) / Terminal Service. It should be noted that will only work with
target systems running Microsoft 2008 and later. Tests against Microsoft
XP/2003/etc, will always report that the logon was successful.
<P>
This module requires <A HREF="http://www.freerdp.com/">FreeRDP</A>. The
pass-the-hash option is also only available if FreeRDP version 1.2 or
greater is installed.
<P>
Pre-built binaries of the FreeRDP master branch are available at <A HREF="https://ci.freerdp.com">https://ci.freerdp.com</A>.
Builds are available for Ubuntu, Debian, Fedora, and OpenSUSE. The nightly
builds are installed into /opt/freerdp-nightly and can be installed in parallel
with the distribution's regular freerdp package. If Medusa detected this version
during its build process, it should have built against it over any other installed
version of FreeRDP.
<P>
For example, the following worked for Kali 2.0 on 2015/11/04:<BR>
- Visit: <A HREF="https://ci.freerdp.com/job/freerdp-nightly-binaries/architecture=amd64,distribution=jessie,label=pkg-deb/">https://ci.freerdp.com/job/freerdp-nightly-binaries/architecture=amd64,distribution=jessie,label=pkg-deb/</A><BR>
- Download: freerdp-nightly_1.2.1+0~20151104024829.185~1.gbpb83356_amd64.deb<BR>
- Download: freerdp-nightly-dev_1.2.1+0~20151104024829.185~1.gbpb83356_amd64.deb<BR>
- Install: dpkg -i freerdp-nightly*<BR>
- Update run time path: echo /opt/freerdp-nightly/lib/ >> /etc/ld.so.conf; ldconfig
- Build Medusa: ./configure;make
<P>
The following examples demonstrate several uses of the RDP module:
<UL>
<LI>Logon attempt using pass-the-hash (NTLM):
<PRE><CODE>
medusa -M rdp -m PASS:HASH -h 10.10.10.10 -u Administrator -p 31D78236327B9619B14ED8EC9AB454C1
</CODE></PRE>
<LI>Logon attempt supplying domain name (default behavior is a local logon):
<PRE><CODE>
medusa -M rdp -m DOMAIN:CORPNAME -h 10.10.10.10 -u Administrator -p Password1
</CODE></PRE>
</UL>
<BR><BR>
<A HREF="medusa.html">Medusa Documentation</A><BR>
</BODY>
<HTML>
| gpl-2.0 |
TrevorLowing/PyGames | data/html/rules/chinesediscipline.html | 1241 | <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2//EN">
<html>
<head>
<title>PySol - Rules for Chinese Discipline</title>
<meta name="license" content="Distributed under the terms of the GNU General Public License">
<meta http-equiv="content-type" content="text/html; charset=utf-8">
</head>
<body text="#000000" bgcolor="#F7F3FF" link="#0000FF" vlink="#660099" alink="#FF0000">
<img src="../images/pysollogo03.gif" alt="">
<br>
<h1>Chinese Discipline</h1>
<p>
Yukon type. 1 deck. No redeal.
<h3>Object</h3>
<p>
Move all cards to the Foundations.
<h3>Quick Description</h3>
<p>
Like <a href="yukon.html">Yukon</a>,
but don't deal all cards at game start.
<h3>Rules</h3>
<p>
Cards in Tableau are built down by alternate color.
Groups of cards can be moved regardless of sequence.
An empty pile in the Tableau can be filled with a King or a group
of cards with a King on the bottom.
<p>
Foundations are built up in suit from Ace to King.
Cards in Foundations are no longer in play.
<p>
When no more moves are possible, click on the Talon.
Three more cards will be dealt.
<p>
<br>
<a href="../glossary.html">Glossary</a>
<br>
<a href="../general_rules.html">General rules</a>
<p>
<a href="../index.html">Back to the index</a>
</body>
</html>
| gpl-2.0 |
mekwars-legends/mekwars-upstream | src/server/campaign/commands/RemoveVoteCommand.java | 4319 | /*
* MekWars - Copyright (C) 2004
*
* Derived from MegaMekNET (http://www.sourceforge.net/projects/megameknet)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
package server.campaign.commands;
import java.util.Enumeration;
import java.util.StringTokenizer;
import java.util.Vector;
import server.campaign.CampaignMain;
import server.campaign.SPlayer;
import server.campaign.votes.Vote;
public class RemoveVoteCommand implements Command {
int accessLevel = 0;
String syntax = "";
public int getExecutionLevel(){return accessLevel;}
public void setExecutionLevel(int i) {accessLevel = i;}
public String getSyntax() { return syntax;}
public void process(StringTokenizer command,String Username) {
if (accessLevel != 0) {
int userLevel = CampaignMain.cm.getServer().getUserLevel(Username);
if(userLevel < getExecutionLevel()) {
CampaignMain.cm.toUser("AM:Insufficient access level for command. Level: " + userLevel + ". Required: " + accessLevel + ".",Username,true);
return;
}
}
SPlayer castingPlayer = CampaignMain.cm.getPlayer(Username);
String recipientName = "";//blank string
try {
recipientName = new String(command.nextToken()).toString();
}//end try
catch (NumberFormatException ex) {
CampaignMain.cm.toUser("AM:RemoveVote command failed. Check your input. It should be something like this: /c removevote#name",Username,true);
return;
}//end catch
//break out if a player is trying to vote for himself
if (Username.equals(recipientName)) {
CampaignMain.cm.toUser("AM:You may not vote for youself.",Username,true);
return;
}
//break out if voting isnt enabled on the server
boolean canVote = new Boolean(CampaignMain.cm.getConfig("VotingEnabled")).booleanValue();
if (!canVote) {
CampaignMain.cm.toUser("AM:Voting is disabled on this server.",Username,true);
return;
}
//get all votes cast by the player issuing the command
Vector<Vote> castersVotes = CampaignMain.cm.getVoteManager().getAllVotesBy(castingPlayer);
//break out if the player has no outstanding votes to remove
if (castersVotes.isEmpty()) {
CampaignMain.cm.toUser("AM:You have not cast any votes. Removal is impossible.",Username,true);
return;
}
//get the SPlayer who is receiving for the next couple of checks
SPlayer recipientPlayer = CampaignMain.cm.getPlayer(recipientName);
//break out if the recieving player isnt known
if (recipientPlayer == null) {
CampaignMain.cm.toUser("AM:You can't remove a vote for a player who doesn't exist.",Username,true);
return;
}
//break out if player has no votes cast for recipient
Enumeration<Vote> e = castersVotes.elements();
boolean hasVoteForRecipient = false;
Vote v = null;
while (e.hasMoreElements() && !hasVoteForRecipient) {
v = e.nextElement();
if (v.getRecipient().equals(recipientName)) {
hasVoteForRecipient = true;
}
}//end while(more elements)
/*
* The last vote drawn from the enumeration has the proper recipient, if
* hasVoteForRecipient is true, because the loop ends before a replacement
* element is drawn. If true, attempt to remove the vote. If false, break the
* bad news to the player.
*/
if (!hasVoteForRecipient) {
CampaignMain.cm.toUser("AM:You have not voted for this player.",Username,true);
return;
}
//else if
boolean voteRemoved = CampaignMain.cm.getVoteManager().removeVote(v);
if (!voteRemoved) {
CampaignMain.cm.toUser("AM:There was an error removing the vote. Please contact your " + "server admin or file a bug report.", Username, true);
return;
}
//else
CampaignMain.cm.toUser("AM:Your vote for " + recipientName + " has been removed.",Username,true);
}
} | gpl-2.0 |
AsgerPetersen/QGIS | src/providers/spatialite/qgsspatialitesourceselect.cpp | 19027 | /***************************************************************************
qgsspatialitesourceselect.cpp
Dialog to select SpatiaLite layer(s) and add it to the map canvas
-------------------
begin : Dec 2008
copyright : (C) 2008 by Sandro Furieri
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
#include "qgsspatialitesourceselect.h"
#include "qgsspatialiteconnection.h"
#include "qgslogger.h"
#include "qgsapplication.h"
#include "qgscontexthelp.h"
#include "qgsquerybuilder.h"
#include "qgsdatasourceuri.h"
#include "qgsvectorlayer.h"
#include <QInputDialog>
#include <QMessageBox>
#include <QSettings>
#include <QTextStream>
#include <QTableWidgetItem>
#include <QHeaderView>
#include <QStringList>
#include <QPushButton>
#ifdef _MSC_VER
#define strcasecmp(a,b) stricmp(a,b)
#endif
QgsSpatiaLiteSourceSelect::QgsSpatiaLiteSourceSelect( QWidget * parent, Qt::WindowFlags fl, bool embedded ):
QDialog( parent, fl )
{
setupUi( this );
QSettings settings;
restoreGeometry( settings.value( "/Windows/SpatiaLiteSourceSelect/geometry" ).toByteArray() );
mHoldDialogOpen->setChecked( settings.value( "/Windows/SpatiaLiteSourceSelect/HoldDialogOpen", false ).toBool() );
setWindowTitle( tr( "Add SpatiaLite Table(s)" ) );
connectionsGroupBox->setTitle( tr( "Databases" ) );
btnEdit->hide(); // hide the edit button
btnSave->hide();
btnLoad->hide();
mStatsButton = new QPushButton( tr( "&Update Statistics" ) );
connect( mStatsButton, SIGNAL( clicked() ), this, SLOT( updateStatistics() ) );
mStatsButton->setEnabled( false );
mAddButton = new QPushButton( tr( "&Add" ) );
connect( mAddButton, SIGNAL( clicked() ), this, SLOT( addClicked() ) );
mAddButton->setEnabled( false );
mBuildQueryButton = new QPushButton( tr( "&Set Filter" ) );
connect( mBuildQueryButton, SIGNAL( clicked() ), this, SLOT( buildQuery() ) );
mBuildQueryButton->setEnabled( false );
if ( embedded )
{
buttonBox->button( QDialogButtonBox::Close )->hide();
}
else
{
buttonBox->addButton( mAddButton, QDialogButtonBox::ActionRole );
buttonBox->addButton( mBuildQueryButton, QDialogButtonBox::ActionRole );
buttonBox->addButton( mStatsButton, QDialogButtonBox::ActionRole );
}
populateConnectionList();
mSearchModeComboBox->addItem( tr( "Wildcard" ) );
mSearchModeComboBox->addItem( tr( "RegExp" ) );
mSearchColumnComboBox->addItem( tr( "All" ) );
mSearchColumnComboBox->addItem( tr( "Table" ) );
mSearchColumnComboBox->addItem( tr( "Type" ) );
mSearchColumnComboBox->addItem( tr( "Geometry column" ) );
mSearchColumnComboBox->addItem( tr( "Sql" ) );
mProxyModel.setParent( this );
mProxyModel.setFilterKeyColumn( -1 );
mProxyModel.setFilterCaseSensitivity( Qt::CaseInsensitive );
mProxyModel.setDynamicSortFilter( true );
mProxyModel.setSourceModel( &mTableModel );
mTablesTreeView->setModel( &mProxyModel );
mTablesTreeView->setSortingEnabled( true );
connect( mTablesTreeView->selectionModel(), SIGNAL( selectionChanged( const QItemSelection&, const QItemSelection& ) ), this, SLOT( treeWidgetSelectionChanged( const QItemSelection&, const QItemSelection& ) ) );
//for Qt < 4.3.2, passing -1 to include all model columns
//in search does not seem to work
mSearchColumnComboBox->setCurrentIndex( 1 );
//hide the search options by default
//they will be shown when the user ticks
//the search options group box
mSearchLabel->setVisible( false );
mSearchColumnComboBox->setVisible( false );
mSearchColumnsLabel->setVisible( false );
mSearchModeComboBox->setVisible( false );
mSearchModeLabel->setVisible( false );
mSearchTableEdit->setVisible( false );
cbxAllowGeometrylessTables->setDisabled( true );
}
QgsSpatiaLiteSourceSelect::~QgsSpatiaLiteSourceSelect()
{
QSettings settings;
settings.setValue( "/Windows/SpatiaLiteSourceSelect/geometry", saveGeometry() );
settings.setValue( "/Windows/SpatiaLiteSourceSelect/HoldDialogOpen", mHoldDialogOpen->isChecked() );
}
// Slot for performing action when the Add button is clicked
void QgsSpatiaLiteSourceSelect::addClicked()
{
addTables();
}
/** End Autoconnected SLOTS **/
// Remember which database is selected
void QgsSpatiaLiteSourceSelect::on_cmbConnections_activated( int )
{
dbChanged();
}
void QgsSpatiaLiteSourceSelect::buildQuery()
{
setSql( mTablesTreeView->currentIndex() );
}
void QgsSpatiaLiteSourceSelect::updateStatistics()
{
QString subKey = cmbConnections->currentText();
int idx = subKey.indexOf( '@' );
if ( idx > 0 )
subKey.truncate( idx );
QString msg = tr( "Are you sure you want to update the internal statistics for DB: %1?\n\n"
"This could take a long time (depending on the DB size),\n"
"but implies better performance thereafter." ).arg( subKey );
QMessageBox::StandardButton result =
QMessageBox::information( this, tr( "Confirm Update Statistics" ), msg, QMessageBox::Ok | QMessageBox::Cancel );
if ( result != QMessageBox::Ok )
return;
// trying to connect to SpatiaLite DB
QgsSpatiaLiteConnection conn( subKey );
if ( conn.updateStatistics() )
{
QMessageBox::information( this, tr( "Update Statistics" ),
tr( "Internal statistics successfully updated for: %1" ).arg( subKey ) );
}
else
{
QMessageBox::critical( this, tr( "Update Statistics" ),
tr( "Error while updating internal statistics for: %1" ).arg( subKey ) );
}
}
void QgsSpatiaLiteSourceSelect::on_cbxAllowGeometrylessTables_stateChanged( int )
{
on_btnConnect_clicked();
}
void QgsSpatiaLiteSourceSelect::on_mTablesTreeView_clicked( const QModelIndex &index )
{
mBuildQueryButton->setEnabled( index.parent().isValid() );
}
void QgsSpatiaLiteSourceSelect::on_mTablesTreeView_doubleClicked( const QModelIndex &index )
{
setSql( index );
}
void QgsSpatiaLiteSourceSelect::on_mSearchGroupBox_toggled( bool checked )
{
if ( mSearchTableEdit->text().isEmpty() )
return;
on_mSearchTableEdit_textChanged( checked ? mSearchTableEdit->text() : "" );
}
void QgsSpatiaLiteSourceSelect::on_mSearchTableEdit_textChanged( const QString & text )
{
if ( mSearchModeComboBox->currentText() == tr( "Wildcard" ) )
{
mProxyModel._setFilterWildcard( text );
}
else if ( mSearchModeComboBox->currentText() == tr( "RegExp" ) )
{
mProxyModel._setFilterRegExp( text );
}
}
void QgsSpatiaLiteSourceSelect::on_mSearchColumnComboBox_currentIndexChanged( const QString & text )
{
if ( text == tr( "All" ) )
{
mProxyModel.setFilterKeyColumn( -1 );
}
else if ( text == tr( "Table" ) )
{
mProxyModel.setFilterKeyColumn( 0 );
}
else if ( text == tr( "Type" ) )
{
mProxyModel.setFilterKeyColumn( 1 );
}
else if ( text == tr( "Geometry column" ) )
{
mProxyModel.setFilterKeyColumn( 2 );
}
else if ( text == tr( "Sql" ) )
{
mProxyModel.setFilterKeyColumn( 3 );
}
}
void QgsSpatiaLiteSourceSelect::on_mSearchModeComboBox_currentIndexChanged( const QString & text )
{
Q_UNUSED( text );
on_mSearchTableEdit_textChanged( mSearchTableEdit->text() );
}
void QgsSpatiaLiteSourceSelect::setLayerType( const QString& table, const QString& column, const QString& type )
{
mTableModel.setGeometryTypesForTable( table, column, type );
mTablesTreeView->sortByColumn( 0, Qt::AscendingOrder );
}
void QgsSpatiaLiteSourceSelect::populateConnectionList()
{
cmbConnections->clear();
Q_FOREACH ( const QString& name, QgsSpatiaLiteConnection::connectionList() )
{
// retrieving the SQLite DB name and full path
QString text = name + tr( "@" ) + QgsSpatiaLiteConnection::connectionPath( name );
cmbConnections->addItem( text );
}
setConnectionListPosition();
btnConnect->setDisabled( cmbConnections->count() == 0 );
btnDelete->setDisabled( cmbConnections->count() == 0 );
cmbConnections->setDisabled( cmbConnections->count() == 0 );
}
void QgsSpatiaLiteSourceSelect::on_btnNew_clicked()
{
if ( ! newConnection( this ) )
return;
populateConnectionList();
emit connectionsChanged();
}
bool QgsSpatiaLiteSourceSelect::newConnection( QWidget* parent )
{
// Retrieve last used project dir from persistent settings
QSettings settings;
QString lastUsedDir = settings.value( "/UI/lastSpatiaLiteDir", QDir::homePath() ).toString();
QString myFile = QFileDialog::getOpenFileName( parent,
tr( "Choose a SpatiaLite/SQLite DB to open" ),
lastUsedDir, tr( "SpatiaLite DB" ) + " (*.sqlite *.db *.sqlite3 *.db3 *.s3db);;" + tr( "All files" ) + " (*)" );
if ( myFile.isEmpty() )
return false;
QFileInfo myFI( myFile );
QString myPath = myFI.path();
QString myName = myFI.fileName();
QString savedName = myFI.fileName();
QString baseKey = "/SpatiaLite/connections/";
// TODO: keep the test
//handle = openSpatiaLiteDb( myFI.canonicalFilePath() );
//if ( !handle )
// return false;
// OK, this one is a valid SpatiaLite DB
//closeSpatiaLiteDb( handle );
// if there is already a connection with this name, ask for a new name
while ( ! settings.value( baseKey + savedName + "/sqlitepath", "" ).toString().isEmpty() )
{
bool ok;
savedName = QInputDialog::getText( nullptr , tr( "Cannot add connection '%1'" ).arg( myName ) ,
tr( "A connection with the same name already exists,\nplease provide a new name:" ), QLineEdit::Normal,
"", &ok );
if ( !ok || savedName.isEmpty() )
{
return false;
}
}
// Persist last used SpatiaLite dir
settings.setValue( "/UI/lastSpatiaLiteDir", myPath );
// inserting this SQLite DB path
settings.setValue( baseKey + "selected", savedName );
settings.setValue( baseKey + savedName + "/sqlitepath", myFI.canonicalFilePath() );
return true;
}
QString QgsSpatiaLiteSourceSelect::layerURI( const QModelIndex &index )
{
QString tableName = mTableModel.itemFromIndex( index.sibling( index.row(), 0 ) )->text();
QString geomColumnName = mTableModel.itemFromIndex( index.sibling( index.row(), 2 ) )->text();
QString sql = mTableModel.itemFromIndex( index.sibling( index.row(), 3 ) )->text();
if ( geomColumnName.contains( " AS " ) )
{
int a = geomColumnName.indexOf( " AS " );
QString typeName = geomColumnName.mid( a + 4 ); //only the type name
geomColumnName = geomColumnName.left( a ); //only the geom column name
QString geomFilter;
if ( typeName == "POINT" )
{
geomFilter = QString( "geometrytype(\"%1\") IN ('POINT','MULTIPOINT')" ).arg( geomColumnName );
}
else if ( typeName == "LINESTRING" )
{
geomFilter = QString( "geometrytype(\"%1\") IN ('LINESTRING','MULTILINESTRING')" ).arg( geomColumnName );
}
else if ( typeName == "POLYGON" )
{
geomFilter = QString( "geometrytype(\"%1\") IN ('POLYGON','MULTIPOLYGON')" ).arg( geomColumnName );
}
if ( !geomFilter.isEmpty() && !sql.contains( geomFilter ) )
{
if ( !sql.isEmpty() )
{
sql += " AND ";
}
sql += geomFilter;
}
}
QgsDataSourceURI uri( connectionInfo() );
uri.setDataSource( "", tableName, geomColumnName, sql, "" );
return uri.uri();
}
// Slot for deleting an existing connection
void QgsSpatiaLiteSourceSelect::on_btnDelete_clicked()
{
QString subKey = cmbConnections->currentText();
int idx = subKey.indexOf( '@' );
if ( idx > 0 )
subKey.truncate( idx );
QString msg = tr( "Are you sure you want to remove the %1 connection and all associated settings?" ).arg( subKey );
QMessageBox::StandardButton result =
QMessageBox::information( this, tr( "Confirm Delete" ), msg, QMessageBox::Ok | QMessageBox::Cancel );
if ( result != QMessageBox::Ok )
return;
QgsSpatiaLiteConnection::deleteConnection( subKey );
populateConnectionList();
emit connectionsChanged();
}
void QgsSpatiaLiteSourceSelect::addTables()
{
m_selectedTables.clear();
typedef QMap < int, bool >schemaInfo;
QMap < QString, schemaInfo > dbInfo;
QItemSelection selection = mTablesTreeView->selectionModel()->selection();
QModelIndexList selectedIndices = selection.indexes();
QStandardItem *currentItem = nullptr;
QModelIndexList::const_iterator selected_it = selectedIndices.constBegin();
for ( ; selected_it != selectedIndices.constEnd(); ++selected_it )
{
if ( !selected_it->parent().isValid() )
{
//top level items only contain the schema names
continue;
}
currentItem = mTableModel.itemFromIndex( mProxyModel.mapToSource( *selected_it ) );
if ( !currentItem )
{
continue;
}
QString currentSchemaName = currentItem->parent()->text();
int currentRow = currentItem->row();
if ( !dbInfo[currentSchemaName].contains( currentRow ) )
{
dbInfo[currentSchemaName][currentRow] = true;
m_selectedTables << layerURI( mProxyModel.mapToSource( *selected_it ) );
}
}
if ( m_selectedTables.empty() )
{
QMessageBox::information( this, tr( "Select Table" ), tr( "You must select a table in order to add a Layer." ) );
}
else
{
emit addDatabaseLayers( m_selectedTables, "spatialite" );
if ( !mHoldDialogOpen->isChecked() )
{
accept();
}
}
}
void QgsSpatiaLiteSourceSelect::on_btnConnect_clicked()
{
cbxAllowGeometrylessTables->setEnabled( false );
QString subKey = cmbConnections->currentText();
int idx = subKey.indexOf( '@' );
if ( idx > 0 )
subKey.truncate( idx );
// trying to connect to SpatiaLite DB
QgsSpatiaLiteConnection conn( subKey );
mSqlitePath = conn.path();
QApplication::setOverrideCursor( Qt::WaitCursor );
QgsSpatiaLiteConnection::Error err;
err = conn.fetchTables( cbxAllowGeometrylessTables->isChecked() );
QApplication::restoreOverrideCursor();
if ( err != QgsSpatiaLiteConnection::NoError )
{
QString errCause = conn.errorMessage();
switch ( err )
{
case QgsSpatiaLiteConnection::NotExists:
QMessageBox::critical( this, tr( "SpatiaLite DB Open Error" ),
tr( "Database does not exist: %1" ).arg( mSqlitePath ) );
break;
case QgsSpatiaLiteConnection::FailedToOpen:
QMessageBox::critical( this, tr( "SpatiaLite DB Open Error" ),
tr( "Failure while connecting to: %1\n\n%2" ).arg( mSqlitePath, errCause ) );
break;
case QgsSpatiaLiteConnection::FailedToGetTables:
QMessageBox::critical( this, tr( "SpatiaLite getTableInfo Error" ),
tr( "Failure exploring tables from: %1\n\n%2" ).arg( mSqlitePath, errCause ) );
break;
default:
QMessageBox::critical( this, tr( "SpatiaLite Error" ),
tr( "Unexpected error when working with: %1\n\n%2" ).arg( mSqlitePath, errCause ) );
}
mSqlitePath = QString();
return;
}
QModelIndex rootItemIndex = mTableModel.indexFromItem( mTableModel.invisibleRootItem() );
mTableModel.removeRows( 0, mTableModel.rowCount( rootItemIndex ), rootItemIndex );
// populate the table list
// get the list of suitable tables and columns and populate the UI
mTableModel.setSqliteDb( subKey );
QList<QgsSpatiaLiteConnection::TableEntry> tables = conn.tables();
Q_FOREACH ( const QgsSpatiaLiteConnection::TableEntry& table, tables )
{
mTableModel.addTableEntry( table.type, table.tableName, table.column, "" );
}
if ( cmbConnections->count() > 0 )
{
mStatsButton->setEnabled( true );
}
mTablesTreeView->sortByColumn( 0, Qt::AscendingOrder );
//expand all the toplevel items
int numTopLevelItems = mTableModel.invisibleRootItem()->rowCount();
for ( int i = 0; i < numTopLevelItems; ++i )
{
mTablesTreeView->expand( mProxyModel.mapFromSource( mTableModel.indexFromItem( mTableModel.invisibleRootItem()->child( i ) ) ) );
}
mTablesTreeView->resizeColumnToContents( 0 );
mTablesTreeView->resizeColumnToContents( 1 );
cbxAllowGeometrylessTables->setEnabled( true );
}
QStringList QgsSpatiaLiteSourceSelect::selectedTables()
{
return m_selectedTables;
}
QString QgsSpatiaLiteSourceSelect::connectionInfo()
{
return QString( "dbname='%1'" ).arg( QString( mSqlitePath ).replace( '\'', "\\'" ) );
}
void QgsSpatiaLiteSourceSelect::setSql( const QModelIndex &index )
{
QModelIndex idx = mProxyModel.mapToSource( index );
QString tableName = mTableModel.itemFromIndex( idx.sibling( idx.row(), 0 ) )->text();
QgsVectorLayer *vlayer = new QgsVectorLayer( layerURI( idx ), tableName, "spatialite" );
if ( !vlayer->isValid() )
{
delete vlayer;
return;
}
// create a query builder object
QgsQueryBuilder *gb = new QgsQueryBuilder( vlayer, this );
if ( gb->exec() )
{
mTableModel.setSql( mProxyModel.mapToSource( index ), gb->sql() );
}
delete gb;
delete vlayer;
}
QString QgsSpatiaLiteSourceSelect::fullDescription( const QString& table, const QString& column, const QString& type )
{
QString full_desc = "";
full_desc += table + "\" (" + column + ") " + type;
return full_desc;
}
void QgsSpatiaLiteSourceSelect::dbChanged()
{
// Remember which database was selected.
QSettings settings;
settings.setValue( "/SpatiaLite/connections/selected", cmbConnections->currentText() );
}
void QgsSpatiaLiteSourceSelect::setConnectionListPosition()
{
QSettings settings;
// If possible, set the item currently displayed database
QString toSelect = settings.value( "/SpatiaLite/connections/selected" ).toString();
toSelect += '@' + settings.value( "/SpatiaLite/connections/" + toSelect + "/sqlitepath" ).toString();
cmbConnections->setCurrentIndex( cmbConnections->findText( toSelect ) );
if ( cmbConnections->currentIndex() < 0 )
{
if ( toSelect.isNull() )
cmbConnections->setCurrentIndex( 0 );
else
cmbConnections->setCurrentIndex( cmbConnections->count() - 1 );
}
}
void QgsSpatiaLiteSourceSelect::setSearchExpression( const QString & regexp )
{
Q_UNUSED( regexp );
}
void QgsSpatiaLiteSourceSelect::treeWidgetSelectionChanged( const QItemSelection &selected, const QItemSelection &deselected )
{
Q_UNUSED( deselected )
mAddButton->setEnabled( !selected.isEmpty() );
}
| gpl-2.0 |
airputih/kip | app/views/emails/request.blade.php | 475 | @extends('emails.layout')
@section('header')
{{HTML::linkRoute('admin-index', 'Admin Dashboard')}}
@stop
@section('title')
Request Baru
@stop
@section('description')
Halo admin {{{Config::get('setting.site_name')}}}, <br>
Request permintaan informasi dari {{{$name}}}:<br>
<br>
Untuk melihat detail request silahkan klik link berikut:<br>
<h3>{{HTML::link($link, 'Detail request')}}</h3>
<br><br>
Dikirim oleh<br>
Sistem {{{Config::get('setting.site_name')}}}
@stop | gpl-2.0 |
hansiglaser/chll | examples/wsn-soc/apps/max6682/sim-yosys/sim.sh | 200 | #!/bin/bash
#
# Start the simulation
#
# Be sure to create a Makefile first and compile all Verilog and VHDL files.
#
vsim -t ps -voptargs=+acc MAX6682_tb_verilog_cfg -do "do wave-app.do ; run -all"
| gpl-2.0 |
JetBrains/jdk8u_jdk | test/com/sun/crypto/provider/Cipher/RSA/TestOAEPParameterSpec.java | 5439 | /*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 4923484 8146293
* @summary test ASN.1 encoding generation/parsing for the OAEPParameters
* implementation in SunJCE provider.
* @author Valerie Peng
*/
import java.math.BigInteger;
import java.util.*;
import java.security.*;
import java.security.spec.MGF1ParameterSpec;
import javax.crypto.*;
import javax.crypto.spec.OAEPParameterSpec;
import javax.crypto.spec.PSource;
public class TestOAEPParameterSpec {
private static Provider cp;
private static boolean runTest(String mdName, MGF1ParameterSpec mgfSpec,
byte[] p) throws Exception {
OAEPParameterSpec spec = new OAEPParameterSpec(mdName, "MGF1",
mgfSpec, new PSource.PSpecified(p));
cp = Security.getProvider("SunJCE");
System.out.println("Testing provider " + cp.getName() + "...");
AlgorithmParameters ap = AlgorithmParameters.getInstance("OAEP", cp);
ap.init(spec);
byte[] encoding = ap.getEncoded();
AlgorithmParameters ap2 = AlgorithmParameters.getInstance("OAEP", cp);
ap2.init(encoding);
OAEPParameterSpec spec2 = (OAEPParameterSpec) ap2.getParameterSpec
(OAEPParameterSpec.class);
return compareSpec(spec, spec2);
}
private static boolean compareMD(OAEPParameterSpec s1,
OAEPParameterSpec s2) {
boolean result = false;
String alg1 = s1.getDigestAlgorithm().toUpperCase().trim();
String alg2 = s2.getDigestAlgorithm().toUpperCase().trim();
alg1 = alg1.replaceAll("\\-", "");
alg2 = alg2.replaceAll("\\-", "");
if (alg1.equals("SHA") || alg1.equals("SHA1")) {
result = (alg2.equals("SHA") || alg2.equals("SHA1"));
} else {
result = (alg1.equals(alg2));
}
return result;
}
private static boolean compareMGF(OAEPParameterSpec s1,
OAEPParameterSpec s2) {
String alg1 = s1.getMGFAlgorithm();
String alg2 = s2.getMGFAlgorithm();
if (alg1.equals(alg2)) {
MGF1ParameterSpec mp1 = (MGF1ParameterSpec)s1.getMGFParameters();
MGF1ParameterSpec mp2 = (MGF1ParameterSpec)s2.getMGFParameters();
alg1 = mp1.getDigestAlgorithm();
alg2 = mp2.getDigestAlgorithm();
if (alg1.equals(alg2)) {
return true;
} else {
System.out.println("MGF's MD algos: " + alg1 + " vs " + alg2);
return false;
}
} else {
System.out.println("MGF algos: " + alg1 + " vs " + alg2);
return false;
}
}
private static boolean comparePSource(OAEPParameterSpec s1,
OAEPParameterSpec s2) {
PSource src1 = s1.getPSource();
PSource src2 = s2.getPSource();
String alg1 = src1.getAlgorithm();
String alg2 = src2.getAlgorithm();
if (alg1.equals(alg2)) {
// assumes they are PSource.PSpecified
return Arrays.equals(((PSource.PSpecified) src1).getValue(),
((PSource.PSpecified) src2).getValue());
} else {
System.out.println("PSource algos: " + alg1 + " vs " + alg2);
return false;
}
}
private static boolean compareSpec(OAEPParameterSpec s1,
OAEPParameterSpec s2) {
return (compareMD(s1, s2) && compareMGF(s1, s2) &&
comparePSource(s1, s2));
}
public static void main(String[] argv) throws Exception {
boolean status = true;
byte[] p = { (byte) 0x01, (byte) 0x02, (byte) 0x03, (byte) 0x04 };
status &= runTest("SHA-224", MGF1ParameterSpec.SHA224, p);
status &= runTest("SHA-256", MGF1ParameterSpec.SHA256, p);
status &= runTest("SHA-384", MGF1ParameterSpec.SHA384, p);
status &= runTest("SHA-512", MGF1ParameterSpec.SHA512, p);
status &= runTest("SHA-512/224", MGF1ParameterSpec.SHA512_224, p);
status &= runTest("SHA-512/256", MGF1ParameterSpec.SHA512_256, p);
status &= runTest("SHA", MGF1ParameterSpec.SHA1, new byte[0]);
status &= runTest("SHA-1", MGF1ParameterSpec.SHA1, new byte[0]);
status &= runTest("SHA1", MGF1ParameterSpec.SHA1, new byte[0]);
if (status) {
System.out.println("Test Passed");
} else {
throw new Exception("One or More Test Failed");
}
}
}
| gpl-2.0 |
samuli/NDL-VuFind2 | module/Finna/src/Finna/AjaxHandler/BrowseActionTrait.php | 1874 | <?php
/**
* Browse action support trait
*
* PHP version 7
*
* Copyright (C) The National Library of Finland 2018.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package AJAX
* @author Ere Maijala <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development Wiki
*/
namespace Finna\AjaxHandler;
/**
* Browse action support trait
*
* @category VuFind
* @package AJAX
* @author Ere Maijala <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development Wiki
*/
trait BrowseActionTrait
{
/**
* Return browse action from the request.
*
* @param \Zend\Http\Request $request Request
*
* @return null|string Browse action or null if request is not a browse action
*/
protected function getBrowseAction($request)
{
$referer = $request->getServer()->get('HTTP_REFERER');
$match = null;
$regex = '/^http[s]?:.*\/Browse\/(Database|Journal)[\/.*]?/';
if (preg_match($regex, $referer, $match)) {
return $match[1];
}
return null;
}
}
| gpl-2.0 |
alvarotarela/php_users | Users/application/views/layouts/onix-business-portfolio-template/12-skins-v1/onix-html-color-fblue/blog.html | 34293 | <!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>ONIX Responsive Business & Portfolio Template</title>
<!-- Mobile Specific Metas -->
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="">
<meta name="author" content="">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<!-- CSS files begin-->
<link href='http://fonts.googleapis.com/css?family=Oswald:400,300,700|Open+Sans+Condensed:700,300,300italic|Open+Sans:400,300italic,400italic,600,600italic,700,700italic,800,800italic|PT+Sans:400,400italic,700,700italic' rel='stylesheet' type='text/css'>
<link href="assets/css/bootstrap.css" rel="stylesheet">
<link href="assets/css/bootstrap-responsive.css" rel="stylesheet">
<link href="assets/css/docs.css" rel="stylesheet">
<link href="assets/js/google-code-prettify/prettify.css" rel="stylesheet">
<link href="assets/css/responsiveslides.css" rel="stylesheet">
<link rel="stylesheet" href="assets/css/prettyPhoto.css" type='text/css'>
<link rel="stylesheet" href="assets/build/mediaelementplayer.min.css" />
<link rel="stylesheet" type="text/css" media="screen" href="assets/css/slide-in.css" />
<!--[if lt IE 9]><link rel="stylesheet" type="text/css" media="screen" href="assets/css/slide-in.ie.css" /><![endif]-->
<link href="assets/css/style.css" rel="stylesheet">
<!-- Color Style Setting CSS file-->
<link href="assets/css/color-theme/color-fblue.css" rel="stylesheet">
<!-- HTML5 shim, for IE6-8 support of HTML5 elements -->
<!--[if lt IE 9]>
<script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script>
<![endif]-->
<!-- fav and touch icons -->
<link rel="shortcut icon" href="assets/ico/favicon.ico">
<link rel="apple-touch-icon-precomposed" sizes="144x144" href="assets/ico/apple-touch-icon-144-precomposed.png">
<link rel="apple-touch-icon-precomposed" sizes="114x114" href="assets/ico/apple-touch-icon-114-precomposed.png">
<link rel="apple-touch-icon-precomposed" sizes="72x72" href="assets/ico/apple-touch-icon-72-precomposed.png">
<link rel="apple-touch-icon-precomposed" href="assets/ico/apple-touch-icon-57-precomposed.png">
</head>
<body>
<!-- Head
================================================== -->
<div class="top-bar">
<div class="container">
<div class="row">
<div class="span6">
<!-- text widget begin here -->
<ul class="info-text pull-left">
<li>
<a href="#">Lorem ipsum dolor sit amet, consectetuer adipiscing elit Aenean commodo ligula eget.</a>
</li>
<li>
<a href="#">
This informational text widget - Buy this theme now !
</a>
</li>
<li>
<a href="#">
Lorem ipsum dolor sit
</a>
amet, consectetuer adipiscing elit. Aenean commodo ligula eget.
</li>
</ul>
</div>
<div class="span6">
<!-- social begin here -->
<ul class="socicon right top-w">
<li>
<a href="#" class="share-icon">
</a>
</li>
<li>
<a href="#" class="google">
</a>
</li>
<li>
<a href="#" class="facebook">
</a>
</li>
<li>
<a href="#" class="twitter">
</a>
</li>
<li>
<a href="#" class="flickr">
</a>
</li>
<li>
<a href="#" class="dribbble">
</a>
</li>
<li>
<a href="#" class="linkedin">
</a>
</li>
<li class="last">
<a href="#" class="vimeo">
</a>
</li>
</ul>
<form class="navbar-search pull-right" action="">
<input class="search-query" type="text">
</form>
</div>
</div>
</div>
</div>
<!-- Logo / Menu
================================================== -->
<header class="header">
<div class="container">
<div class="row">
<div class="span4">
<a href="index.html" class="logo">
<img src="assets/img/logo.png" alt="">
</a>
</div>
<div class="span8">
<nav>
<ul class="right">
<li>
<a href="index.html">
home
</a>
<ul>
<li>
<a href="index_2.html">
home two
</a>
</li>
</ul>
</li>
<li>
<a href="about.html">
about
</a>
</li>
<li>
<a href="features.html">
features
</a>
</li>
<li>
<a href="portfolio.html">
portfolio
</a>
<ul>
<li>
<a href="portfolio-two.html">
portfolio two
</a>
</li>
<li>
<a href="portfolio-three.html">
portfolio three
</a>
</li>
<li>
<a href="portfolio-single.html">
single portfolio
</a>
</li>
<li>
<a href="portfolio-single-2.html">
single portfolio slider
</a>
</li>
<li>
<a href="portfolio-single-3.html">
single portfolio video
</a>
</li>
</ul>
</li>
<li class="current">
<a href="blog.html">
blog
</a>
<ul>
<li>
<a href="blog_rs.html">
blog right sidebar
</a>
</li>
<li>
<a href="single-post.html">
single post
</a>
<ul>
<li>
<a href="#">
example level
</a>
</li>
<li>
<a href="#">
example level
</a>
</li>
</ul>
</li>
</ul>
</li>
<li>
<a href="contact.html">
contact
</a>
</li>
</ul>
</nav>
</div>
</div>
</div>
</header>
<div class="slider-cont">
<div class="container">
<header id="pagehead">
<h1>Blog<small> ⁄ Lorem ipsum dolor sit amet, consectetuer adipiscing elit. </small></h1>
</header>
</div>
</div>
<div class="container">
<!-- Blog post
================================================== -->
<section>
<div class="row">
<div class="span8">
<div class="row">
<div class="span8">
<div class="row">
<div class="span1">
<ul class="blog-meta meta pull-left"><li class="data"><h3>21 may</h3><p>2012</p></li><li class="post-format default"><span></span></li>
<li><a href="#">John Smith</a></li><hr>
<li><a href="#">Design</a>, <a href="#">Wordpress</a>,<a href="#"> Website</a></li><hr>
<li><a href="#">3 Comments</a></li><hr>
<li><!-- AddThis Button BEGIN -->
<div class="addthis_toolbox addthis_default_style center">
<a class="addthis_counter"></a>
</div>
<script type="text/javascript">var addthis_config = {"data_track_addressbar":true};</script>
<script type="text/javascript" src="http://s7.addthis.com/js/250/addthis_widget.js#pubid=ra-4fbf81fd14a77faa"></script>
<!-- AddThis Button END --></li>
</ul>
</div>
<div class="span7">
<h2 class="post-title">Lorem ipsum dolor sit amet, consectetuer adipiscing elit.</h2>
<p>Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu. Cras dapibus. Vivamus elementum semper nisi. Aenean vulputate eleifend tellus. Aenean leo ligula, porttitor eu, consequat vitae, eleifend ac, enim. </p>
<blockquote><p>Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu.</p>
<small>Someone famous</small></blockquote>
<p>Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu.</p>
<a class="btn">read more</a>
</div>
</div>
<div class="divider-post"></div>
</div>
<div class="span8">
<div class="row">
<div class="span1">
<ul class="blog-meta meta pull-left"><li class="data"><h3>21 may</h3><p>2012</p></li><li class="post-format audio"><span></span></li>
<li><a href="#">John Smith</a></li><hr>
<li><a href="#">Design</a>, <a href="#">Wordpress</a>,<a href="#"> Website</a></li><hr>
<li><a href="#">3 Comments</a></li><hr>
<li><!-- AddThis Button BEGIN -->
<div class="addthis_toolbox addthis_default_style center">
<a class="addthis_counter"></a>
</div>
<script type="text/javascript">var addthis_config = {"data_track_addressbar":true};</script>
<script type="text/javascript" src="http://s7.addthis.com/js/250/addthis_widget.js#pubid=ra-4fbf81fd14a77faa"></script>
<!-- AddThis Button END --></li>
</ul>
</div>
<div class="span7">
<h2 class="post-title">Lorem ipsum dolor sit amet, consectetuer adipiscing elit.</h2>
<p>Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu. Cras dapibus. Vivamus elementum semper nisi. Aenean vulputate eleifend tellus. Aenean leo ligula, porttitor eu, consequat vitae, eleifend ac, enim. </p>
<blockquote><p>Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu.</p>
<small>Someone famous</small></blockquote>
<audio src="assets/media/adg3com_electrofreak.mp3" type="audio/mp3" controls="controls"></audio>
</div>
</div>
<div class="divider-post"></div>
</div>
<div class="span8">
<div class="row">
<div class="span1">
<ul class="blog-meta meta pull-left"><li class="data"><h3>21 may</h3><p>2012</p></li><li class="post-format video"><span></span></li>
<li><a href="#">John Smith</a></li><hr>
<li><a href="#">Design</a>, <a href="#">Wordpress</a>,<a href="#"> Website</a></li><hr>
<li><a href="#">3 Comments</a></li><hr>
<li><!-- AddThis Button BEGIN -->
<div class="addthis_toolbox addthis_default_style center">
<a class="addthis_counter"></a>
</div>
<script type="text/javascript">var addthis_config = {"data_track_addressbar":true};</script>
<script type="text/javascript" src="http://s7.addthis.com/js/250/addthis_widget.js#pubid=ra-4fbf81fd14a77faa"></script>
<!-- AddThis Button END --></li>
</ul>
</div>
<div class="span7">
<video src="assets/media/VH_videoAsset.flv" type="video/flv" controls="controls"></video>
<h2 class="post-title">Lorem ipsum dolor sit amet, consectetuer adipiscing elit.</h2>
<p>Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu. Cras dapibus. Vivamus elementum semper nisi. Aenean vulputate eleifend tellus. Aenean leo ligula, porttitor eu, consequat vitae, eleifend ac, enim. </p>
<a class="btn">read more</a>
</div>
</div>
<div class="divider-post"></div>
</div>
<div class="span8">
<div class="row">
<div class="span1">
<ul class="blog-meta meta pull-left"><li class="data"><h3>21 may</h3><p>2012</p></li><li class="post-format quote"><span></span></li>
<li><a href="#">John Smith</a></li><hr>
<li><a href="#">Design</a>, <a href="#">Wordpress</a>,<a href="#"> Website</a></li><hr>
<li><a href="#">3 Comments</a></li><hr>
<li><!-- AddThis Button BEGIN -->
<div class="addthis_toolbox addthis_default_style center">
<a class="addthis_counter"></a>
</div>
<script type="text/javascript">var addthis_config = {"data_track_addressbar":true};</script>
<script type="text/javascript" src="http://s7.addthis.com/js/250/addthis_widget.js#pubid=ra-4fbf81fd14a77faa"></script>
<!-- AddThis Button END --></li>
</ul>
</div>
<div class="span7">
<h2 class="post-title">Lorem ipsum dolor sit amet, consectetuer adipiscing elit.</h2>
<blockquote><p>Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu. Cras dapibus. Vivamus elementum semper nisi. Aenean vulputate eleifend tellus. Aenean leo ligula, porttitor eu, consequat vitae, eleifend ac, enim. </p><p>Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu.</p>
<small>Someone famous</small></blockquote>
<a class="btn">read more</a>
</div>
</div>
<div class="divider-post"></div>
</div>
<div class="span8">
<div class="row">
<div class="span1">
<ul class="blog-meta meta pull-left"><li class="data"><h3>21 may</h3><p>2012</p></li><li class="post-format image"><span></span></li>
<li><a href="#">John Smith</a></li><hr>
<li><a href="#">Design</a>, <a href="#">Wordpress</a>,<a href="#"> Website</a></li><hr>
<li><a href="#">3 Comments</a></li><hr>
<li><!-- AddThis Button BEGIN -->
<div class="addthis_toolbox addthis_default_style center">
<a class="addthis_counter"></a>
</div>
<script type="text/javascript">var addthis_config = {"data_track_addressbar":true};</script>
<script type="text/javascript" src="http://s7.addthis.com/js/250/addthis_widget.js#pubid=ra-4fbf81fd14a77faa"></script>
<!-- AddThis Button END --></li>
</ul>
</div>
<div class="span7">
<div class="row">
<div class="span7">
<img src="assets/img/slider-img03.jpg" alt="">
</div>
</div>
<h2 class="post-title">Lorem ipsum dolor sit amet, consectetuer adipiscing elit.</h2>
<p>Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu. Cras dapibus. Vivamus elementum semper nisi. Aenean vulputate eleifend tellus. Aenean leo ligula, porttitor eu, consequat vitae, eleifend ac, enim. </p>
<a class="btn">read more</a>
</div>
</div>
<div class="divider-post"></div>
</div>
<div class="span8">
<div class="row">
<div class="span1">
<ul class="blog-meta meta pull-left"><li class="data"><h3>21 may</h3><p>2012</p></li><li class="post-format gallery"><span></span></li>
<li><a href="#">John Smith</a></li><hr>
<li><a href="#">Design</a>, <a href="#">Wordpress</a>,<a href="#"> Website</a></li><hr>
<li><a href="#">3 Comments</a></li><hr>
<li><!-- AddThis Button BEGIN -->
<div class="addthis_toolbox addthis_default_style center">
<a class="addthis_counter"></a>
</div>
<script type="text/javascript">var addthis_config = {"data_track_addressbar":true};</script>
<script type="text/javascript" src="http://s7.addthis.com/js/250/addthis_widget.js#pubid=ra-4fbf81fd14a77faa"></script>
<!-- AddThis Button END --></li>
</ul>
</div>
<div class="span7">
<div class="row">
<div class="span7">
<div class="carousel" id="Carousel1">
<div class="carousel-inner">
<div class="active item"><img src="assets/img/slider-img01.jpg" alt="" /></div>
<div class="item"><img src="assets/img/slider-img02.jpg" alt="" /></div>
<div class="item"><img src="assets/img/slider-img03.jpg" alt="" /></div>
</div>
<!-- Carousel nav -->
<a class="carousel-control left" href="#Carousel1" data-slide="prev">‹</a>
<a class="carousel-control right" href="#Carousel1" data-slide="next">›</a>
</div>
</div>
</div>
<h2 class="post-title">Lorem ipsum dolor sit amet, consectetuer adipiscing elit.</h2>
<p>Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu. Cras dapibus. Vivamus elementum semper nisi. Aenean vulputate eleifend tellus. Aenean leo ligula, porttitor eu, consequat vitae, eleifend ac, enim. </p>
<a class="btn">read more</a>
</div>
</div>
<div class="divider-post"></div>
</div>
<div class="span8">
<ul class="pagination">
<li><a href="#">Prev</a></li>
<li class="current">
<a href="#">1</a>
</li>
<li><a href="#">2</a></li>
<li><a href="#">3</a></li>
<li><a href="#">4</a></li>
<li><a href="#">Next</a></li>
</ul>
</div>
</div>
</div>
<!-- Sidebar
================================================== -->
<div class="span4">
<div class="accordion" id="accordion2">
<div class="accordion-group">
<div class="accordion-heading">
<a class="accordion-toggle" data-toggle="collapse" data-parent="#accordion2" href="#collapseOne">
Popular post
</a>
</div>
<div id="collapseOne" class="accordion-body collapse in">
<div class="accordion-inner">
<ul style="margin-left:0;">
<li><img src="assets/img/blog-examples/blog-home-c2.jpg" alt="" class="imageLeft" width="70" height="50"><p><a href="#">Anim pariatur cliche reprehenderit, enim eiusmod high life accusamus terry richardson ad squid ...</a></p></li>
<li><img src="assets/img/blog-examples/blog-home-c3.jpg" alt="" class="imageLeft" width="70" height="50"><p><a href="#">Anim pariatur cliche reprehenderit, enim eiusmod high life accusamus terry richardson ad squid ...</a></p></li>
<li><img src="assets/img/blog-examples/blog-home-c2.jpg" alt="" class="imageLeft" width="70" height="50"><p><a href="#">Anim pariatur cliche reprehenderit, enim eiusmod high life accusamus terry richardson ad squid ...</a></p></li>
</ul>
</div>
</div>
</div>
<div class="accordion-group">
<div class="accordion-heading">
<a class="accordion-toggle" data-toggle="collapse" data-parent="#accordion2" href="#collapseTwo">
Recent post
</a>
</div>
<div id="collapseTwo" class="accordion-body collapse">
<div class="accordion-inner">
<ul style="margin-left:0;">
<li><img src="assets/img/blog-examples/blog-home-c2.jpg" alt="" class="imageLeft" width="70" height="50"><p><a href="#">Anim pariatur cliche reprehenderit, enim eiusmod high life accusamus terry richardson ad squid ...</a></p></li>
<li><img src="assets/img/blog-examples/blog-home-c3.jpg" alt="" class="imageLeft" width="70" height="50"><p><a href="#">Anim pariatur cliche reprehenderit, enim eiusmod high life accusamus terry richardson ad squid ...</a></p></li>
<li><img src="assets/img/blog-examples/blog-home-c2.jpg" alt="" class="imageLeft" width="70" height="50"><p><a href="#">Anim pariatur cliche reprehenderit, enim eiusmod high life accusamus terry richardson ad squid ...</a></p></li>
</ul>
</div>
</div>
</div>
<div class="accordion-group">
<div class="accordion-heading">
<a class="accordion-toggle" data-toggle="collapse" data-parent="#accordion2" href="#collapseThree">
Tags
</a>
</div>
<div id="collapseThree" class="accordion-body collapse">
<div class="accordion-inner">
Anim pariatur cliche reprehenderit, enim eiusmod high life accusamus terry richardson ad squid. 3 wolf moon officia aute, non cupidatat skateboard dolor brunch. Food truck quinoa nesciunt laborum eiusmod. Brunch 3 wolf moon tempor, sunt aliqua put a bird on it squid single-origin coffee nulla assumenda shoreditch et. Nihil anim keffiyeh helvetica, craft beer labore wes anderson cred nesciunt sapiente ea proident. Ad vegan excepteur butcher vice lomo. Leggings occaecat craft beer farm-to-table, raw denim aesthetic synth nesciunt you probably haven't heard of them accusamus labore sustainable VHS.
</div>
</div>
</div>
</div>
<div class="divider-strip block-title"><h4>Example tabs</h4><span class="strip-block"></span></div>
<p><em>Lorem ipsum dolor sit amet, consectetuer adipiscing elit. </em></p>
<ul id="myTab" class="nav nav-tabs">
<li class="active"><a href="#home" data-toggle="tab">Home</a></li>
<li><a href="#profile" data-toggle="tab">Profile</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">Dropdown <b class="caret"></b></a>
<ul class="dropdown-menu">
<li><a href="#dropdown1" data-toggle="tab">@fat</a></li>
<li><a href="#dropdown2" data-toggle="tab">@mdo</a></li>
</ul>
</li>
</ul>
<div id="myTabContent" class="tab-content">
<div class="tab-pane fade in active" id="home">
<p>Raw denim you probably haven't heard of them jean shorts Austin. Nesciunt tofu stumptown aliqua, retro synth master cleanse. Mustache cliche tempor, williamsburg carles vegan helvetica. Reprehenderit butcher retro keffiyeh dreamcatcher synth. Cosby sweater eu banh mi, qui irure terry richardson ex squid. Aliquip placeat salvia cillum iphone. Seitan aliquip quis cardigan american apparel, butcher voluptate nisi qui.</p>
</div>
<div class="tab-pane fade" id="profile">
<p>Food truck fixie locavore, accusamus mcsweeney's marfa nulla single-origin coffee squid. Exercitation +1 labore velit, blog sartorial PBR leggings next level wes anderson artisan four loko farm-to-table craft beer twee. Qui photo booth letterpress, commodo enim craft beer mlkshk aliquip jean shorts ullamco ad vinyl cillum PBR. Homo nostrud organic, assumenda labore aesthetic magna delectus mollit. Keytar helvetica VHS salvia yr, vero magna velit sapiente labore stumptown.</p>
</div>
<div class="tab-pane fade" id="dropdown1">
<p>Etsy mixtape wayfarers, ethical wes anderson tofu before they sold out mcsweeney's organic lomo retro fanny pack lo-fi farm-to-table readymade. Messenger bag gentrify pitchfork tattooed craft beer, iphone skateboard locavore carles etsy salvia banksy hoodie helvetica. DIY synth PBR banksy irony. Leggings gentrify squid 8-bit cred pitchfork. Williamsburg banh mi whatever gluten-free, carles pitchfork biodiesel fixie etsy retro mlkshk vice blog. Scenester cred you probably haven't heard of them, vinyl craft beer blog stumptown. Pitchfork sustainable tofu synth chambray yr.</p>
</div>
<div class="tab-pane fade" id="dropdown2">
<p>Trust fund seitan letterpress, keytar raw denim keffiyeh etsy art party before they sold out master cleanse gluten-free squid scenester freegan cosby sweater. Fanny pack portland seitan DIY, art party locavore wolf cliche high life echo park Austin. Cred vinyl keffiyeh DIY salvia PBR, banh mi before they sold out farm-to-table VHS viral locavore cosby sweater. Lomo wolf viral, mustache readymade thundercats keffiyeh craft beer marfa ethical. Wolf salvia freegan, sartorial keffiyeh echo park vegan.</p>
</div>
</div>
<div class="divider-strip block-title"><h4>Our Skills</h4><span class="strip-block"></span></div>
<p><em>Lorem ipsum dolor sit amet, consectetuer adipiscing elit. </em></p>
<h6>Web design</h6>
<div class="progress progress-warning progress-striped active" style="margin-bottom: 9px;">
<div class="bar" style="width: 50%"></div>
</div>
<h6>Web design</h6>
<div class="progress progress-success progress-striped active" style="margin-bottom: 9px;">
<div class="bar" style="width: 70%"></div>
</div>
<h6>Web design</h6>
<div class="progress progress-striped active" style="margin-bottom: 9px;">
<div class="bar" style="width: 80%"></div>
</div>
<h6>Web design</h6>
<div class="progress progress-danger progress-striped active" style="margin-bottom: 9px;">
<div class="bar" style="width: 90%"></div>
</div>
<h6>Web design</h6>
<div class="progress progress-info progress-striped active" style="margin-bottom: 9px;">
<div class="bar" style="width: 100%"></div>
</div>
<div class="divider-strip block-title"><h4>Flickr Widget</h4><span class="strip-block"></span></div>
<script type="text/javascript" src="http://www.flickr.com/badge_code_v2.gne?count=8&source=user&user=52617155@N08&layout=x&display=random&size=s"></script>
<div class="divider-strip block-title"><h4>Gallery Widget</h4><span class="strip-block"></span></div>
<p><em>Lorem ipsum dolor sit amet, consectetuer adipiscing elit. </em></p>
<div class="carousel" id="Carousel">
<div class="carousel-inner">
<div class="active item"><img src="assets/img/blog-examples/blog-home-c.jpg" alt="" width="370"></div>
<div class="item"><img src="assets/img/blog-examples/blog-home-c2.jpg" alt="" width="370"></div>
<div class="item"><img src="assets/img/blog-examples/blog-home-c3.jpg" alt="" width="370"></div>
</div>
<!-- Carousel nav -->
<a class="carousel-control left" href="#Carousel" data-slide="prev">‹</a>
<a class="carousel-control right" href="#Carousel" data-slide="next">›</a>
</div>
</div>
</div><!--/row-->
</section>
<div class="divider"></div>
</div><!-- /container -->
<!-- Footer
================================================== -->
<footer>
<div class="container">
<div class="row">
<div class="span4">
<h3>About</h3>
<p>Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem.</p>
<h3>Connect With Us</h3>
<!-- social begin here -->
<ul class="socicon left">
<li>
<a href="#" class="share-icon">
</a>
</li>
<li>
<a href="#" class="google">
</a>
</li>
<li>
<a href="#" class="facebook">
</a>
</li>
<li>
<a href="#" class="twitter">
</a>
</li>
<li>
<a href="#" class="flickr">
</a>
</li>
<li>
<a href="#" class="dribbble">
</a>
</li>
<li>
<a href="#" class="linkedin">
</a>
</li>
<li class="last">
<a href="#" class="vimeo">
</a>
</li>
</ul>
</div>
<!-- tweets begin here -->
<div class="span4">
<h3>Latest Tweets</h3>
<div class="tweets">
<p>
Loading Tweets...
</p>
<ul id="tweet-list">
</ul>
</div>
</div>
<div class="span4">
<!-- flickr begin here -->
<h3>From Flickr</h3>
<script type="text/javascript" src="http://www.flickr.com/badge_code_v2.gne?count=8&source=user&user=52617155@N08&layout=x&display=random&size=s"></script>
</div>
<div class="span12 copy">
© 2012 NLINE. All Rights Reserved.
</div>
</div>
</div>
</footer>
<!-- JavaScript files begin-->
<!-- Placed at the end of the document so the pages load faster -->
<script src="assets/js/jquery.js"></script>
<script src="http://maps.google.com/maps/api/js?sensor=false"></script>
<script src="assets/js/jquery.form.js"></script>
<script src="assets/js/jquery.ufvalidator-1.0.5.js"></script>
<script src="assets/js/jquery.easing.1.3.js"></script>
<script src="assets/js/jquery.cycle.all.js"></script>
<script src="assets/js/jquery.prettyPhoto.js"></script>
<script src="assets/js/google-code-prettify/prettify.js"></script>
<script src="assets/js/bootstrap.min.js"></script>
<script src="assets/js/application.js"></script>
<script src="assets/js/responsiveslides.min.js"></script>
<script src="assets/build/mediaelement-and-player.min.js"></script>
<script src="assets/js/gmap3.min.js"></script>
<script src="assets/js/custom.js"></script>
<script src="assets/js/jquery.ui.totop.js" type="text/javascript"></script>
<script type="text/javascript">
$(document).ready(function() {
$().UItoTop({ easingType: 'easeOutQuart' });
});
</script>
</body>
</html>
| gpl-2.0 |
ValuingElectronicMusic/network-analysis | remove_waves.py | 2612 | # If you start collecting a wave and then regret it, you can use this
# to roll back the data collection. I would recommend duplicating the database
# first and letting this program loose on a copy, as you won't be able to
# get back any of the data you don't explicitly tell it to keep.
import sqlite3
import itertools
import add_data as ad
def rollback(db_path,waves_to_keep=[],waves_to_lose=[]):
'''waves_to_keep and waves_to_lose should be lists of names of wave
tables in the database currently being cleaned'''
conn=sqlite3.connect(db_path)
curs=conn.cursor()
'''
for wave in waves_to_lose:
curs.execute('DROP TABLE {}'.format(wave))
users_to_keep=[]
for wave in waves_to_keep:
curs.execute('SELECT id FROM {}'.format(wave))
users_to_keep.extend(curs.fetchall())
curs.execute('ALTER TABLE users RENAME TO old_users')
ad.create_table(curs,'users')
curs.execute('ALTER TABLE x_follows_y RENAME TO old_x_follows_y')
ad.create_table(curs,'x_follows_y')
follow_data=set([])
for n, user in enumerate(users_to_keep):
curs.execute('SELECT follower,followed FROM old_x_follows_y '
'WHERE follower=?',user)
follow_data.update(curs.fetchall())
curs.execute('SELECT follower,followed FROM old_x_follows_y '
'WHERE followed=?',user)
follow_data.update(curs.fetchall())
if n % 250 == 0: print "{} users' follow data read.".format(n)
curs.executemany('INSERT INTO x_follows_y VALUES (?,?)',
follow_data)
conn.commit()
print 'Cleaned x_follows_y table filled.'
'''
curs.execute('SELECT follower,followed FROM old_x_follows_y')
follow_data=curs.fetchall()
print 'Got follow data: {} follows'.format(len(follow_data))
users_to_keep = set(itertools.chain.from_iterable(follow_data))
print 'Got users from follow data: {} of them'.format(len(users_to_keep))
print list(users_to_keep)[:10]
n=0
curs.execute('SELECT * FROM old_users')
for i,user_data in enumerate(curs.fetchall()):
if user_data[0] in users_to_keep:
curs.execute('INSERT INTO users VALUES ('
'?,?,?,?,?,?,?,?,?,?,'
'?,?,?,?,?,?,?,?,?,?)',user_data)
n+=1
if i % 1000 == 0:
print '{}th user details checked.'.format(i)
if n % 1000 == 0:
print '{}th user\'s details copied.'.format(n)
print 'Gone through them all now'
conn.commit()
print 'Cleaned users table filled.'
| gpl-2.0 |
AndresFelipe27/ProyectoIngenieriaSoftware3 | SW3/app/cache/prod/twig/6c/ac/ef96adfcc32d4e0dd4163ee51b1694ed3e96e083117399aab131e4fd4a6e.php | 4652 | <?php
/* BufeteAplicacionBundle:Documento:index.html.twig */
class __TwigTemplate_6cacef96adfcc32d4e0dd4163ee51b1694ed3e96e083117399aab131e4fd4a6e extends Twig_Template
{
public function __construct(Twig_Environment $env)
{
parent::__construct($env);
// line 1
try {
$this->parent = $this->env->loadTemplate("BufeteAplicacionBundle::Default/admin.html.twig");
} catch (Twig_Error_Loader $e) {
$e->setTemplateFile($this->getTemplateName());
$e->setTemplateLine(1);
throw $e;
}
$this->blocks = array(
'page' => array($this, 'block_page'),
);
}
protected function doGetParent(array $context)
{
return "BufeteAplicacionBundle::Default/admin.html.twig";
}
protected function doDisplay(array $context, array $blocks = array())
{
$this->parent->display($context, array_merge($this->blocks, $blocks));
}
// line 2
public function block_page($context, array $blocks = array())
{
// line 3
echo "
<div id=\"micaso\" class=\"fullsize\" style=\" background-color: #F0F0D8;height: 540px;\"> <h1>Documento list</h1>
<table class=\"records_list\">
<thead>
<tr>
<th>Id</th>
<th>Nombre</th>
<th>Descripcion</th>
<th>Documento</th>
<th>Actions</th>
</tr>
</thead>
<tbody>
";
// line 17
$context['_parent'] = (array) $context;
$context['_seq'] = twig_ensure_traversable((isset($context["entities"]) ? $context["entities"] : null));
foreach ($context['_seq'] as $context["_key"] => $context["entity"]) {
// line 18
echo " <tr>
<td><a href=\"";
// line 19
echo twig_escape_filter($this->env, $this->env->getExtension('routing')->getPath("documento_show", array("id" => $this->getAttribute($context["entity"], "id", array()))), "html", null, true);
echo "\">";
echo twig_escape_filter($this->env, $this->getAttribute($context["entity"], "id", array()), "html", null, true);
echo "</a></td>
<td>";
// line 20
echo twig_escape_filter($this->env, $this->getAttribute($context["entity"], "nombre", array()), "html", null, true);
echo "</td>
<td>";
// line 21
echo twig_escape_filter($this->env, $this->getAttribute($context["entity"], "descripcion", array()), "html", null, true);
echo "</td>
<td>";
// line 22
echo twig_escape_filter($this->env, $this->getAttribute($context["entity"], "documento", array()), "html", null, true);
echo "</td>
<td>
<ul>
<li>
<a href=\"";
// line 26
echo twig_escape_filter($this->env, $this->env->getExtension('routing')->getPath("documento_show", array("id" => $this->getAttribute($context["entity"], "id", array()))), "html", null, true);
echo "\">show</a>
</li>
<li>
<a href=\"";
// line 29
echo twig_escape_filter($this->env, $this->env->getExtension('routing')->getPath("documento_edit", array("id" => $this->getAttribute($context["entity"], "id", array()))), "html", null, true);
echo "\">edit</a>
</li>
</ul>
</td>
</tr>
";
}
$_parent = $context['_parent'];
unset($context['_seq'], $context['_iterated'], $context['_key'], $context['entity'], $context['_parent'], $context['loop']);
$context = array_intersect_key($context, $_parent) + $_parent;
// line 35
echo " </tbody>
</table>
<ul>
<li>
<a href=\"";
// line 40
echo $this->env->getExtension('routing')->getPath("documento_new");
echo "\">
Create a new entry
</a>
</li>
</ul>
</div>
";
}
public function getTemplateName()
{
return "BufeteAplicacionBundle:Documento:index.html.twig";
}
public function isTraitable()
{
return false;
}
public function getDebugInfo()
{
return array ( 108 => 40, 101 => 35, 89 => 29, 83 => 26, 76 => 22, 72 => 21, 68 => 20, 62 => 19, 59 => 18, 55 => 17, 39 => 3, 36 => 2, 11 => 1,);
}
}
| gpl-2.0 |
MattDevo/coreboot | src/mainboard/asus/p8h61-m_pro/gpio.c | 5265 | /*
* This file is part of the coreboot project.
*
* Copyright (C) 2018 Angel Pons <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <southbridge/intel/common/gpio.h>
static const struct pch_gpio_set1 pch_gpio_set1_mode = {
.gpio0 = GPIO_MODE_GPIO,
.gpio1 = GPIO_MODE_GPIO,
.gpio2 = GPIO_MODE_NATIVE,
.gpio3 = GPIO_MODE_NATIVE,
.gpio4 = GPIO_MODE_NATIVE,
.gpio5 = GPIO_MODE_NATIVE,
.gpio6 = GPIO_MODE_GPIO,
.gpio7 = GPIO_MODE_GPIO,
.gpio8 = GPIO_MODE_GPIO,
.gpio9 = GPIO_MODE_NATIVE,
.gpio10 = GPIO_MODE_GPIO,
.gpio11 = GPIO_MODE_NATIVE,
.gpio12 = GPIO_MODE_GPIO,
.gpio13 = GPIO_MODE_GPIO,
.gpio14 = GPIO_MODE_GPIO,
.gpio15 = GPIO_MODE_GPIO,
.gpio16 = GPIO_MODE_GPIO,
.gpio17 = GPIO_MODE_GPIO,
.gpio18 = GPIO_MODE_NATIVE,
.gpio19 = GPIO_MODE_NATIVE,
.gpio20 = GPIO_MODE_NATIVE,
.gpio21 = GPIO_MODE_NATIVE,
.gpio22 = GPIO_MODE_NATIVE,
.gpio23 = GPIO_MODE_NATIVE,
.gpio24 = GPIO_MODE_GPIO,
.gpio25 = GPIO_MODE_NATIVE,
.gpio26 = GPIO_MODE_NATIVE,
.gpio27 = GPIO_MODE_GPIO,
.gpio28 = GPIO_MODE_GPIO,
.gpio29 = GPIO_MODE_GPIO,
.gpio30 = GPIO_MODE_NATIVE,
.gpio31 = GPIO_MODE_GPIO,
};
static const struct pch_gpio_set1 pch_gpio_set1_direction = {
.gpio0 = GPIO_DIR_OUTPUT,
.gpio1 = GPIO_DIR_INPUT,
.gpio6 = GPIO_DIR_INPUT,
.gpio7 = GPIO_DIR_INPUT,
.gpio8 = GPIO_DIR_INPUT,
.gpio10 = GPIO_DIR_INPUT,
.gpio12 = GPIO_DIR_INPUT,
.gpio13 = GPIO_DIR_INPUT,
.gpio14 = GPIO_DIR_INPUT,
.gpio15 = GPIO_DIR_OUTPUT,
.gpio16 = GPIO_DIR_INPUT,
.gpio17 = GPIO_DIR_INPUT,
.gpio24 = GPIO_DIR_OUTPUT,
.gpio27 = GPIO_DIR_INPUT,
.gpio28 = GPIO_DIR_OUTPUT,
.gpio29 = GPIO_DIR_INPUT,
.gpio31 = GPIO_DIR_OUTPUT,
};
static const struct pch_gpio_set1 pch_gpio_set1_level = {
.gpio0 = GPIO_LEVEL_LOW,
.gpio15 = GPIO_LEVEL_LOW,
.gpio24 = GPIO_LEVEL_LOW,
.gpio28 = GPIO_LEVEL_LOW,
.gpio31 = GPIO_LEVEL_HIGH,
};
static const struct pch_gpio_set1 pch_gpio_set1_reset = {
};
static const struct pch_gpio_set1 pch_gpio_set1_invert = {
.gpio1 = GPIO_INVERT,
.gpio6 = GPIO_INVERT,
.gpio13 = GPIO_INVERT,
};
static const struct pch_gpio_set1 pch_gpio_set1_blink = {
};
static const struct pch_gpio_set2 pch_gpio_set2_mode = {
.gpio32 = GPIO_MODE_GPIO,
.gpio33 = GPIO_MODE_GPIO,
.gpio34 = GPIO_MODE_GPIO,
.gpio35 = GPIO_MODE_NATIVE,
.gpio36 = GPIO_MODE_NATIVE,
.gpio37 = GPIO_MODE_NATIVE,
.gpio38 = GPIO_MODE_NATIVE,
.gpio39 = GPIO_MODE_NATIVE,
.gpio40 = GPIO_MODE_NATIVE,
.gpio41 = GPIO_MODE_NATIVE,
.gpio42 = GPIO_MODE_GPIO,
.gpio43 = GPIO_MODE_NATIVE,
.gpio44 = GPIO_MODE_NATIVE,
.gpio45 = GPIO_MODE_NATIVE,
.gpio46 = GPIO_MODE_GPIO,
.gpio47 = GPIO_MODE_NATIVE,
.gpio48 = GPIO_MODE_NATIVE,
.gpio49 = GPIO_MODE_GPIO,
.gpio50 = GPIO_MODE_NATIVE,
.gpio51 = GPIO_MODE_NATIVE,
.gpio52 = GPIO_MODE_NATIVE,
.gpio53 = GPIO_MODE_NATIVE,
.gpio54 = GPIO_MODE_NATIVE,
.gpio55 = GPIO_MODE_NATIVE,
.gpio56 = GPIO_MODE_NATIVE,
.gpio57 = GPIO_MODE_GPIO,
.gpio58 = GPIO_MODE_NATIVE,
.gpio59 = GPIO_MODE_NATIVE,
.gpio60 = GPIO_MODE_NATIVE,
.gpio61 = GPIO_MODE_GPIO,
.gpio62 = GPIO_MODE_NATIVE,
.gpio63 = GPIO_MODE_NATIVE,
};
static const struct pch_gpio_set2 pch_gpio_set2_direction = {
.gpio32 = GPIO_DIR_OUTPUT,
.gpio33 = GPIO_DIR_OUTPUT,
.gpio34 = GPIO_DIR_INPUT,
.gpio42 = GPIO_DIR_INPUT,
.gpio46 = GPIO_DIR_INPUT,
.gpio49 = GPIO_DIR_INPUT,
.gpio57 = GPIO_DIR_INPUT,
.gpio61 = GPIO_DIR_OUTPUT,
};
static const struct pch_gpio_set2 pch_gpio_set2_level = {
.gpio32 = GPIO_LEVEL_HIGH,
.gpio33 = GPIO_LEVEL_HIGH,
.gpio61 = GPIO_LEVEL_HIGH,
};
static const struct pch_gpio_set2 pch_gpio_set2_reset = {
};
static const struct pch_gpio_set3 pch_gpio_set3_mode = {
.gpio64 = GPIO_MODE_NATIVE,
.gpio65 = GPIO_MODE_NATIVE,
.gpio66 = GPIO_MODE_NATIVE,
.gpio67 = GPIO_MODE_NATIVE,
.gpio68 = GPIO_MODE_GPIO,
.gpio69 = GPIO_MODE_GPIO,
.gpio70 = GPIO_MODE_NATIVE,
.gpio71 = GPIO_MODE_NATIVE,
.gpio72 = GPIO_MODE_GPIO,
.gpio73 = GPIO_MODE_NATIVE,
.gpio74 = GPIO_MODE_NATIVE,
.gpio75 = GPIO_MODE_NATIVE,
};
static const struct pch_gpio_set3 pch_gpio_set3_direction = {
.gpio68 = GPIO_DIR_INPUT,
.gpio69 = GPIO_DIR_INPUT,
.gpio72 = GPIO_DIR_INPUT,
};
static const struct pch_gpio_set3 pch_gpio_set3_level = {
};
static const struct pch_gpio_set3 pch_gpio_set3_reset = {
};
const struct pch_gpio_map mainboard_gpio_map = {
.set1 = {
.mode = &pch_gpio_set1_mode,
.direction = &pch_gpio_set1_direction,
.level = &pch_gpio_set1_level,
.blink = &pch_gpio_set1_blink,
.invert = &pch_gpio_set1_invert,
.reset = &pch_gpio_set1_reset,
},
.set2 = {
.mode = &pch_gpio_set2_mode,
.direction = &pch_gpio_set2_direction,
.level = &pch_gpio_set2_level,
.reset = &pch_gpio_set2_reset,
},
.set3 = {
.mode = &pch_gpio_set3_mode,
.direction = &pch_gpio_set3_direction,
.level = &pch_gpio_set3_level,
.reset = &pch_gpio_set3_reset,
},
};
| gpl-2.0 |
asuradaimao/dolphin | Source/Core/DolphinQt2/GameList/GameListProxyModel.cpp | 2678 | // Copyright 2015 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include "DolphinQt2/Resources.h"
#include "DolphinQt2/GameList/GameListModel.h"
#include "DolphinQt2/GameList/GameListProxyModel.h"
static constexpr QSize NORMAL_BANNER_SIZE(96, 32);
static constexpr QSize LARGE_BANNER_SIZE(144, 48);
// Convert an integer size to a friendly string representation.
static QString FormatSize(qint64 size)
{
QStringList units{
QStringLiteral("KB"),
QStringLiteral("MB"),
QStringLiteral("GB"),
QStringLiteral("TB")
};
QStringListIterator i(units);
QString unit = QStringLiteral("B");
double num = (double) size;
while (num > 1024.0 && i.hasNext())
{
unit = i.next();
num /= 1024.0;
}
return QStringLiteral("%1 %2").arg(QString::number(num, 'f', 1)).arg(unit);
}
GameListProxyModel::GameListProxyModel(QObject* parent)
: QSortFilterProxyModel(parent)
{
}
QVariant GameListProxyModel::data(const QModelIndex& i, int role) const
{
QModelIndex source_index = mapToSource(i);
QVariant source_data = sourceModel()->data(source_index, Qt::DisplayRole);
if (role == Qt::DisplayRole)
{
switch (i.column())
{
// Sort by the integer but display the formatted string.
case GameListModel::COL_SIZE:
return FormatSize(source_data.toULongLong());
// These fall through to the underlying model.
case GameListModel::COL_ID:
case GameListModel::COL_TITLE:
case GameListModel::COL_DESCRIPTION:
case GameListModel::COL_MAKER:
return source_data;
// Show the title in the display role of the icon view.
case GameListModel::COL_LARGE_ICON:
return data(index(i.row(), GameListModel::COL_TITLE), Qt::DisplayRole);
}
}
else if (role == Qt::DecorationRole)
{
switch (i.column())
{
// Show icons in the decoration roles. This lets us sort by the
// underlying ints, but display just the icons without doing any
// fixed-width hacks.
case GameListModel::COL_PLATFORM:
return Resources::GetPlatform(source_data.toInt());
case GameListModel::COL_BANNER:
return source_data.value<QPixmap>().scaled(
NORMAL_BANNER_SIZE,
Qt::KeepAspectRatio,
Qt::SmoothTransformation);
case GameListModel::COL_COUNTRY:
return Resources::GetCountry(source_data.toInt());
case GameListModel::COL_RATING:
return Resources::GetRating(source_data.toInt());
// Show a scaled icon in the decoration role of the icon view.
case GameListModel::COL_LARGE_ICON:
return data(index(i.row(), GameListModel::COL_BANNER), Qt::DecorationRole)
.value<QPixmap>().scaled(
LARGE_BANNER_SIZE,
Qt::KeepAspectRatio,
Qt::SmoothTransformation);
}
}
return QVariant();
}
| gpl-2.0 |
varunarora/OpenCurriculum | sites/all/themes/opencurriculum/templates/page.tpl.php | 6764 | <?php
/**
* @file
* Bartik's theme implementation to display a single Drupal page.
*
* The doctype, html, head and body tags are not in this template. Instead they
* can be found in the html.tpl.php template normally located in the
* modules/system folder.
*
* Available variables:
*
* General utility variables:
* - $base_path: The base URL path of the Drupal installation. At the very
* least, this will always default to /.
* - $directory: The directory the template is located in, e.g. modules/system
* or themes/bartik.
* - $is_front: TRUE if the current page is the front page.
* - $logged_in: TRUE if the user is registered and signed in.
* - $is_admin: TRUE if the user has permission to access administration pages.
*
* Site identity:
* - $front_page: The URL of the front page. Use this instead of $base_path,
* when linking to the front page. This includes the language domain or
* prefix.
* - $logo: The path to the logo image, as defined in theme configuration.
* - $site_name: The name of the site, empty when display has been disabled
* in theme settings.
* - $site_slogan: The slogan of the site, empty when display has been disabled
* in theme settings.
* - $hide_site_name: TRUE if the site name has been toggled off on the theme
* settings page. If hidden, the "element-invisible" class is added to make
* the site name visually hidden, but still accessible.
* - $hide_site_slogan: TRUE if the site slogan has been toggled off on the
* theme settings page. If hidden, the "element-invisible" class is added to
* make the site slogan visually hidden, but still accessible.
*
* Navigation:
* - $main_menu (array): An array containing the Main menu links for the
* site, if they have been configured.
* - $secondary_menu (array): An array containing the Secondary menu links for
* the site, if they have been configured.
* - $breadcrumb: The breadcrumb trail for the current page.
*
* Page content (in order of occurrence in the default page.tpl.php):
* - $title_prefix (array): An array containing additional output populated by
* modules, intended to be displayed in front of the main title tag that
* appears in the template.
* - $title: The page title, for use in the actual HTML content.
* - $title_suffix (array): An array containing additional output populated by
* modules, intended to be displayed after the main title tag that appears in
* the template.
* - $messages: HTML for status and error messages. Should be displayed
* prominently.
* - $tabs (array): Tabs linking to any sub-pages beneath the current page
* (e.g., the view and edit tabs when displaying a node).
* - $action_links (array): Actions local to the page, such as 'Add menu' on the
* menu administration interface.
* - $feed_icons: A string of all feed icons for the current page.
* - $node: The node object, if there is an automatically-loaded node
* associated with the page, and the node ID is the second argument
* in the page's path (e.g. node/12345 and node/12345/revisions, but not
* comment/reply/12345).
*
* Regions:
* - $page['search']: Items for the header region.
* - $page['top']: Items for the featured region.
* - $page['left']: Items for the highlighted content region.
* - $page['center']: Dynamic help text, mostly for admin pages.
* - $page['right']: The main content of the current page.
* - $page['footer']: Items for the footer region.
*
* @see template_preprocess()
* @see template_preprocess_page()
* @see template_process()
* @see bartik_process_page()
*/
?>
<script type="text/javascript">
var uvOptions = {};
(function() {
var uv = document.createElement('script'); uv.type = 'text/javascript'; uv.async = true;
uv.src = ('https:' == document.location.protocol ? 'https://' : 'http://') + 'widget.uservoice.com/aKrd0ayIyn6BsSmA8yegCQ.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(uv, s);
})();
</script>
<div id="header">
<div id="header-content">
<div id="header-left">
<a href="<?php print $front_page; ?>"><img src="<?php print $logo; ?>" alt="The Open Curriculum Project" /></a><br/>
<span id="one-liner">
<?php if ($site_slogan): ?>
<?php print $site_slogan; ?>
<?php endif; ?>
</span>
</div>
<div id="header-right">
<?php /*if ($secondary_menu): ?>
<?php print theme('links__system_secondary_menu', array(
'links' => $secondary_menu,
'attributes' => array(
'id' => 'secondary-menu-links',
'class' => array('links', 'inline', 'clearfix'),
),
'heading' => array(
'text' => t('Secondary menu'),
'level' => 'h2',
'class' => array('element-invisible'),
),
)); ?>
<!-- /#secondary-menu -->
<?php endif;*/ ?>
<?php include('login.inc') ?>
<?php print render($page['search']); ?>
<?php include('gradeblock.inc') ?>
</div>
</div>
</div>
<div id="main">
<?php if ($main_menu): ?>
<div id="main-menu" class="navigation">
<?php print theme('links__system_main_menu', array(
'links' => $main_menu,
'attributes' => array(
'id' => 'main-menu-links',
'class' => array('links', 'clearfix'),
),
'heading' => array(
'text' => t('Main menu'),
'level' => 'h2',
'class' => array('element-invisible'),
),
)); ?>
</div> <!-- /#main-menu -->
<?php endif; ?>
<div id="left-panel">
<?php include('left_menu.inc'); ?>
</div>
<div id="right-panel">
<?php print render($title_prefix); ?>
<?php if ($title): ?>
<h1 class="title" id="page-title">
<?php print $title; ?>
</h1>
<?php endif; ?>
<?php print render($title_suffix); ?>
<?php if ($breadcrumb): ?>
<div id="breadcrumb"><?php print $breadcrumb; ?></div>
<?php endif; ?>
<?php if ($tabs): ?><p/>
<div class="tabs">
<?php print render($tabs); ?>
</div>
<?php endif; ?>
<?php print render($page['help']); ?>
<?php if ($action_links): ?>
<ul class="action-links">
<?php print render($action_links); ?>
</ul>
<?php endif; ?><p/>
<?php if ($messages): ?>
<div id="messages"><div class="section clearfix">
<?php print $messages; ?>
</div></div> <!-- /.section, /#messages -->
<?php endif; ?>
<?php print render($page['content']); ?>
</div>
</div>
<?php include('footer.inc') ?>
<?php if ($page['highlighted']): ?><div id="highlighted"><?php print render($page['highlighted']); ?></div><?php endif; ?>
<!-- Content was HERE-->
| gpl-2.0 |
QEF/q-e_schrodinger | PP/src/d_matrix_so.f90 | 9353 | !
! Copyright (C) 2001 PWSCF group
! This file is distributed under the terms of the
! GNU General Public License. See the file `License'
! in the root directory of the present distribution,
! or http://www.gnu.org/copyleft/gpl.txt .
!
!
!---------------------------------------------------------------
SUBROUTINE d_matrix_so (dyj12, dyj32, dyj52, dyj72)
!---------------------------------------------------------------
!
! Provides symmetry operations in the j=1/2, j=3/2, j=5/2 and j=7/2
! subspaces
!
USE kinds, ONLY: DP
USE symm_base, ONLY: nsym, sr
USE upf_spinorb, ONLY : maxl => lmaxx, rot_ylm
USE random_numbers, ONLY : randy
USE matrix_inversion
!
IMPLICIT NONE
!
! first the input/output variables
!
COMPLEX(DP) :: dyj12 (2, 2, 48), dyj32 (4, 4, 48), dyj52 (6, 6, 48), &
dyj72 (8, 8, 48)
!
! output: symmetry matrices in the j=1/2, j=3/2, j=5/2 and j=7/2 subspace
! respectively
!
! here the local parameters
!
INTEGER, PARAMETER :: maxm = 2*maxl+1, &
maxlm = (maxl+1)*(maxl+1)
INTEGER, PARAMETER :: maxmj = 2*maxl+2
!
! maxl = max value of l allowed
! maxm = number of m components for l=maxl
! maxlm= number of l,m spherical harmonics for l <= maxl
! maxmj = number of mj components
!
REAL(DP), PARAMETER :: eps = 1.0d-8
!
! and the local variables
!
INTEGER :: m, n, m1, n1, ipol, isym
INTEGER :: l, n2, ind, ind1, ind2
REAL(DP) :: j, ylm(maxm, maxlm), ylms(maxm, maxlm), &
rl(3,maxm), rrl (maxm), srl(3,maxm), capel
REAL(DP) :: Ulall(maxl,maxmj+(maxmj-1),maxmj+(maxmj-1)), &
Ul1(6,6), Ul1_inv(6,6), Ul3(14,14), Ul3_inv(14,14)
COMPLEX(DP) :: dy1 (3, 3, 48), dy2 (5, 5, 48), &
dy3 (7, 7, 48), dy112 (6, 6, 48), &
dy212 (10, 10, 48), dy312 (14, 14, 48), &
d12_con(2,2), d32_con(4,4), d52_con(6,6), d72_con(8,8), &
s_spin(2,2), delta(8,8)
COMPLEX(DP) :: ylm_compl(maxm, maxlm), ylms_compl(maxm, maxlm), &
yl1 (3, 3), yl2(5, 5), yl3(7,7), &
yl1_inv (3, 3), yl2_inv(5, 5), yl3_inv(7, 7), &
Ul1C(6,6), Ul1C_inv(6,6), Ul3C(14,14), Ul3C_inv(14,14)
REAL(DP), EXTERNAL :: spinor
COMPLEX(DP), EXTERNAL :: ZDOTU
!
! Transformation matrices from the | l m s s_z > basis to the
! | j mj l s > basis in the l-subspace
!
Ulall (:,:,:) = 0.d0
DO l = 1, 3
j = dble(l) - 0.5d0
DO m1= 1, 2*l
m= m1 - l
Ulall (l,m1,2*(m1-1)+1) = spinor (l,j,m,1)
Ulall (l,m1,2*(m1-1)+4) = spinor (l,j,m,2)
ENDDO
j = dble(l) + 0.5d0
DO m1= 1, 2*l + 2
m = m1 - l - 2
IF (m1 == 1) THEN
Ulall (l,m1+2*l,2*(m1-1)+2) = spinor (l,j,m,2)
ELSEIF (m1==2*l+2) THEN
Ulall (l,m1+2*l,2*(m1-1)-1) = spinor (l,j,m,1)
ELSE
Ulall (l,m1+2*l,2*(m1-1)-1) = spinor (l,j,m,1)
Ulall (l,m1+2*l,2*(m1-1)+2) = spinor (l,j,m,2)
ENDIF
ENDDO
ENDDO
!
! invert Ulall for l = 1 and l = 3 blocks
! (we don't need to invert l = 2 block, see below)
!
! l = 1 block
!
DO m = 1, 6
DO n = 1, 6
Ul1 (m, n) = Ulall (1, m, n)
ENDDO
ENDDO
CALL invmat (6, Ul1, Ul1_inv)
Ul1C (:,:) = cmplx(Ul1 (:,:), 0.d0,kind=DP)
Ul1C_inv (:,:) = cmplx(Ul1_inv (:,:), 0.d0,kind=DP)
!
! l = 3 block
!
DO m = 1, 14
DO n = 1, 14
Ul3 (m, n) = Ulall (3, m, n)
ENDDO
ENDDO
CALL invmat (14, Ul3, Ul3_inv)
Ul3C (:,:) = cmplx(Ul3 (:,:), 0.d0,kind=DP)
Ul3C_inv (:,:) = cmplx(Ul3_inv (:,:), 0.d0,kind=DP)
!
! randomly distributed points on a sphere
!
DO m = 1, maxm
rl (1, m) = randy () - 0.5d0
rl (2, m) = randy () - 0.5d0
rl (3, m) = randy () - 0.5d0
rrl (m) = rl (1,m)**2 + rl (2,m)**2 + rl (3,m)**2
ENDDO
CALL ylmr2 ( maxlm, 2*maxl+1, rl, rrl, ylm )
!
! calculate complex spherical harmonics
!
ylm_compl = (0.d0,0.d0)
DO l = 1, maxl
DO m = -l, l
ind = maxl + 1 + m
ind1 = l**2 + l + 1 + m
DO n2 = 1, 2*l+1
ind2 = l**2 + n2
IF (abs(rot_ylm(ind,n2))>1.d-8) &
ylm_compl(:,ind1) = ylm_compl(:,ind1) + rot_ylm(ind,n2)*ylm(:,ind2)
ENDDO
ENDDO
ENDDO
!
! invert Yl for each block of definite l (note the transpose operation)
!
! l = 1 block
!
DO m = 1, 3
DO n = 1, 3
yl1 (m, n) = ylm_compl (n, 1+m)
ENDDO
ENDDO
CALL invmat (3, yl1, yl1_inv)
!
! l = 2 block
!
DO m = 1, 5
DO n = 1, 5
yl2 (m, n) = ylm_compl (n, 4+m)
ENDDO
ENDDO
CALL invmat (5, yl2, yl2_inv)
!
! l = 3 block
!
DO m = 1, 7
DO n = 1, 7
yl3 (m, n) = ylm_compl (n, 9+m)
ENDDO
ENDDO
CALL invmat (7, yl3, yl3_inv)
!
! now for each symmetry operation of the point-group ...
!
DO isym = 1, nsym
!
! s_spin = rotation matrix for spin components (complex conjugate)
!
CALL find_u(sr(1,1,isym), s_spin)
s_spin(:,:) = CONJG(s_spin(:,:))
!
! srl(:,m) = rotated rl(:,m) vectors
!
srl = matmul (sr(:,:,isym), rl)
!
CALL ylmr2 ( maxlm, maxm, srl, rrl, ylms )
!
! find D_S = Yl_S * Yl_inv (again, beware the transpose)
! and the rotation matrices for the | j mj l s > basis
!
ylms_compl = (0.d0,0.d0)
DO l = 1, maxl
DO m = -l, l
ind = maxl + 1 + m
ind1 = l**2 + l + 1 + m
DO n2 = 1, 2*l+1
ind2 = l**2 + n2
IF (abs(rot_ylm(ind,n2))>1.d-8) &
ylms_compl (:,ind1) = ylms_compl (:,ind1) &
+ rot_ylm (ind,n2) * ylms (:,ind2)
ENDDO
ENDDO
ENDDO
!
! l = 1 block => j=1/2 and j=3/2
!
DO m = 1, 3
DO n = 1, 3
yl1 (m, n) = ylms_compl (n, 1+m)
ENDDO
ENDDO
dy1 (:, :, isym) = matmul (yl1 (:,:), yl1_inv (:,:))
DO m = 1, 3
DO n = 1, 3
DO m1 = 1, 2
DO n1 = 1, 2
dy112 (2*(m-1)+m1, 2*(n-1)+n1, isym) = dy1 (m, n, isym) &
* s_spin (m1, n1)
ENDDO
ENDDO
ENDDO
ENDDO
dy112 (:, :, isym) = matmul (dy112 (:,:,isym), Ul1C_inv (:,:))
dy112 (:, :, isym) = matmul (Ul1C (:,:), dy112 (:,:,isym))
DO m = 1, 2
DO n = 1, 2
dyj12 (m, n, isym) = conjg(dy112 (n, m, isym))
ENDDO
ENDDO
DO m = 1, 4
DO n = 1, 4
dyj32 (m ,n, isym) = conjg(dy112 (2+n, 2+m, isym))
ENDDO
ENDDO
!
! l = 3 block => j=5/2 and j=7/2
!
DO m = 1, 7
DO n = 1, 7
yl3 (m, n) = ylms_compl (n, 9+m)
ENDDO
ENDDO
dy3 (:, :, isym) = matmul (yl3 (:,:), yl3_inv (:,:))
DO m = 1, 7
DO n = 1, 7
DO m1 = 1, 2
DO n1 = 1, 2
dy312 (2*(m-1)+m1, 2*(n-1)+n1, isym) = dy3 (m, n, isym) &
* s_spin (m1, n1)
ENDDO
ENDDO
ENDDO
ENDDO
dy312 (:, :, isym) = matmul (dy312 (:,:,isym), Ul3C_inv (:,:))
dy312 (:, :, isym) = matmul (Ul3C (:,:), dy312 (:,:,isym))
DO m = 1, 6
DO n = 1, 6
dyj52 (m, n, isym) = conjg(dy312 (n, m, isym))
ENDDO
ENDDO
DO m = 1, 8
DO n = 1, 8
dyj72 (m ,n, isym) = conjg(dy312 (6+n, 6+m, isym))
ENDDO
ENDDO
!
ENDDO
!
! check that D_S matrices are unitary as they should
!
delta (:,:) = (0.d0,0.d0)
DO m= 1, 8
delta(m,m) = (1.d0,0.d0)
ENDDO
DO isym =1,nsym
!
! j = 1/2 block
!
capel = 0.d0
d12_con(:,:) = conjg(dyj12 (:,:,isym))
DO m = 1, 2
DO n = 1, 2
capel = capel + &
abs( ZDOTU(2, d12_con(1,m), 1, dyj12(1,n,isym), 1) - delta(m,n) )**2
ENDDO
ENDDO
IF (capel>eps) CALL errore ('d_matrix_so', &
'D_S (j=1/2) for this symmetry operation is not unitary',isym)
!
! j = 3/2 block
!
capel = 0.d0
d32_con(:,:) = conjg(dyj32 (:,:,isym))
DO m = 1, 4
DO n = 1, 4
capel = capel + &
abs( ZDOTU(4, d32_con(1,m), 1, dyj32(1,n,isym), 1) - delta(m,n) )**2
ENDDO
ENDDO
IF (capel>eps) CALL errore ('d_matrix_so', &
'D_S (j=3/2) for this symmetry operation is not unitary',isym)
!
! j = 5/2 block
!
capel = 0.d0
d52_con(:,:) = conjg(dyj52 (:,:,isym))
DO m = 1, 6
DO n = 1, 6
capel = capel + &
abs( ZDOTU(6, d52_con(1,m), 1, dyj52(1,n,isym), 1) - delta(m,n) )**2
ENDDO
ENDDO
IF (capel>eps) CALL errore ('d_matrix_so', &
'D_S (j=5/2) for this symmetry operation is not unitary',isym)
!
! j = 7/2 block
!
capel = 0.d0
d72_con(:,:) = conjg(dyj72 (:,:,isym))
DO m = 1, 8
DO n = 1, 8
capel = capel + &
abs( ZDOTU(8, d72_con(1,m), 1, dyj72(1,n,isym), 1) - delta(m,n) )**2
ENDDO
ENDDO
IF (capel>eps) CALL errore ('d_matrix_so', &
'D_S (j=7/2) for this symmetry operation is not unitary',isym)
!
ENDDO
!
RETURN
!
END SUBROUTINE d_matrix_so
| gpl-2.0 |
napcode/ardour | libs/pbd/pbd.cc | 2849 | /*
* Copyright (C) 2013-2015 Tim Mayberry <[email protected]>
* Copyright (C) 2014-2016 Paul Davis <[email protected]>
* Copyright (C) 2014-2019 Robin Gareus <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <iostream>
#include <cstdlib>
#include <string>
#ifdef PLATFORM_WINDOWS
#include <fcntl.h>
#endif
#include <giomm.h>
#include <glibmm/thread.h>
#include "pbd/pbd.h"
#include "pbd/debug.h"
#include "pbd/error.h"
#include "pbd/id.h"
#include "pbd/enumwriter.h"
#include "pbd/fpu.h"
#include "pbd/xml++.h"
#ifdef PLATFORM_WINDOWS
#include <winsock2.h>
#include "pbd/windows_timer_utils.h"
#include "pbd/windows_mmcss.h"
#endif
#include "pbd/i18n.h"
extern void setup_libpbd_enums ();
namespace {
static bool libpbd_initialized = false;
#ifdef PLATFORM_WINDOWS
static
void
test_timers_from_env ()
{
bool set;
std::string options;
options = Glib::getenv ("PBD_TEST_TIMERS", set);
if (set) {
if (!PBD::QPC::check_timer_valid ()) {
PBD::error << X_("Windows QPC Timer source not usable") << endmsg;
} else {
PBD::info << X_("Windows QPC Timer source usable") << endmsg;
}
}
}
#endif
} // namespace
bool
PBD::init ()
{
if (libpbd_initialized) {
return true;
}
#ifdef PLATFORM_WINDOWS
// Essential!! Make sure that any files used by Ardour
// will be created or opened in BINARY mode!
_fmode = O_BINARY;
WSADATA wsaData;
/* Initialize windows socket DLL for PBD::CrossThreadChannel
*/
if (WSAStartup(MAKEWORD(1,1),&wsaData) != 0) {
error << X_("Windows socket initialization failed with error: ") << WSAGetLastError() << endmsg;
return false;
}
QPC::initialize();
test_timers_from_env ();
if (!PBD::MMCSS::initialize()) {
PBD::info << X_("Unable to initialize MMCSS") << endmsg;
} else {
PBD::info << X_("MMCSS Initialized") << endmsg;
}
#endif
if (!Glib::thread_supported()) {
Glib::thread_init();
}
Gio::init ();
PBD::ID::init ();
setup_libpbd_enums ();
libpbd_initialized = true;
return true;
}
void
PBD::cleanup ()
{
#ifdef PLATFORM_WINDOWS
PBD::MMCSS::deinitialize ();
WSACleanup();
#endif
EnumWriter::destroy ();
FPU::destroy ();
}
| gpl-2.0 |
tobiasjakobi/mpv | stream/dvbin.h | 5066 | /* Imported from the dvbstream project
*
* Modified for use with MPlayer, for details see the changelog at
* http://svn.mplayerhq.hu/mplayer/trunk/
* $Id$
*/
#ifndef MPLAYER_DVBIN_H
#define MPLAYER_DVBIN_H
#include "config.h"
#include "stream.h"
#if !HAVE_GPL
#error GPL only
#endif
#define SLOF (11700 * 1000UL)
#define LOF1 (9750 * 1000UL)
#define LOF2 (10600 * 1000UL)
#include <inttypes.h>
#include <linux/dvb/dmx.h>
#include <linux/dvb/frontend.h>
#include <linux/dvb/video.h>
#include <linux/dvb/audio.h>
#include <linux/dvb/version.h>
#undef DVB_ATSC
#if defined(DVB_API_VERSION_MINOR)
/* kernel headers >=2.6.28 have version 5.
*
* Version 5 is also called S2API, it adds support for tuning to S2 channels
* and is extensible for future delivery systems. Old API is deprecated.
* StreamID-implementation only supported since API >=5.2.
* At least DTV_ENUM_DELSYS requires 5.5.
*/
#if (DVB_API_VERSION == 5 && DVB_API_VERSION_MINOR >= 5)
#define DVB_USE_S2API 1
// This had a different name until API 5.8.
#ifndef DTV_STREAM_ID
#define DTV_STREAM_ID DTV_ISDBS_TS_ID
#endif
#endif
// This is only defined, for convenience, since API 5.8.
#ifndef NO_STREAM_ID_FILTER
#define NO_STREAM_ID_FILTER (~0U)
#endif
#if (DVB_API_VERSION == 3 && DVB_API_VERSION_MINOR >= 1) || DVB_API_VERSION == 5
#define DVB_ATSC 1
#endif
#endif
#define DVB_CHANNEL_LOWER -1
#define DVB_CHANNEL_HIGHER 1
#ifndef DMX_FILTER_SIZE
#define DMX_FILTER_SIZE 32
#endif
typedef struct {
char *name;
unsigned int freq, srate, diseqc;
char pol;
unsigned int tpid, dpid1, dpid2, progid, ca, pids[DMX_FILTER_SIZE], pids_cnt;
bool is_dvb_x2;
unsigned int delsys;
unsigned int stream_id;
unsigned int service_id;
fe_spectral_inversion_t inv;
fe_modulation_t mod;
fe_transmit_mode_t trans;
fe_bandwidth_t bw;
fe_guard_interval_t gi;
fe_code_rate_t cr, cr_lp;
fe_hierarchy_t hier;
} dvb_channel_t;
typedef struct {
unsigned int NUM_CHANNELS;
unsigned int current;
dvb_channel_t *channels;
} dvb_channels_list_t;
typedef struct {
int devno;
unsigned int delsys_mask;
dvb_channels_list_t *list;
} dvb_adapter_config_t;
typedef struct {
unsigned int adapters_count;
dvb_adapter_config_t *adapters;
unsigned int cur_adapter;
int fe_fd;
int dvr_fd;
int demux_fd[3], demux_fds[DMX_FILTER_SIZE], demux_fds_cnt;
int is_on;
int retry;
int timeout;
unsigned int last_freq;
bool switching_channel;
bool stream_used;
} dvb_state_t;
typedef struct {
struct mp_log *log;
dvb_state_t *state;
char *cfg_prog;
int cfg_devno;
int cfg_timeout;
char *cfg_file;
int cfg_full_transponder;
} dvb_priv_t;
/* Keep in sync with enum fe_delivery_system. */
#ifndef DVB_USE_S2API
# define SYS_DVBC_ANNEX_A 1
# define SYS_DVBC_ANNEX_B 1
# define SYS_DVBT 3
# define SYS_DVBS 5
# define SYS_DVBS2 6
# define SYS_ATSC 11
# define SYS_DVBT2 16
# define SYS_DVBC_ANNEX_C 18
#endif
#define SYS_DVB__COUNT__ (SYS_DVBC_ANNEX_C + 1)
#define DELSYS_BIT(__bit) (((unsigned int)1) << (__bit))
#define DELSYS_SET(__mask, __bit) \
(__mask) |= DELSYS_BIT((__bit))
#define DELSYS_IS_SET(__mask, __bit) \
(0 != ((__mask) & DELSYS_BIT((__bit))))
#ifdef DVB_ATSC
#define DELSYS_SUPP_MASK \
( \
DELSYS_BIT(SYS_DVBC_ANNEX_A) | \
DELSYS_BIT(SYS_DVBT) | \
DELSYS_BIT(SYS_DVBS) | \
DELSYS_BIT(SYS_DVBS2) | \
DELSYS_BIT(SYS_ATSC) | \
DELSYS_BIT(SYS_DVBC_ANNEX_B) | \
DELSYS_BIT(SYS_DVBT2) | \
DELSYS_BIT(SYS_DVBC_ANNEX_C) \
)
#else
#define DELSYS_SUPP_MASK \
( \
DELSYS_BIT(SYS_DVBC_ANNEX_A) | \
DELSYS_BIT(SYS_DVBT) | \
DELSYS_BIT(SYS_DVBS) | \
DELSYS_BIT(SYS_DVBS2) | \
DELSYS_BIT(SYS_DVBT2) | \
DELSYS_BIT(SYS_DVBC_ANNEX_C) \
)
#endif
int dvb_step_channel(stream_t *, int);
int dvb_set_channel(stream_t *, unsigned int, unsigned int);
dvb_state_t *dvb_get_state(stream_t *);
void dvb_free_state(dvb_state_t *);
#endif /* MPLAYER_DVBIN_H */
| gpl-2.0 |
david-visteon/linux-3.0.101 | drivers/usb/gadget/f_phonet.c | 15588 | /*
* f_phonet.c -- USB CDC Phonet function
*
* Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
*
* Author: Rémi Denis-Courmont
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/if_phonet.h>
#include <linux/if_arp.h>
#include <linux/usb/ch9.h>
#include <linux/usb/cdc.h>
#include <linux/usb/composite.h>
#include "u_phonet.h"
#define PN_MEDIA_USB 0x1B
#define MAXPACKET 512
#if (PAGE_SIZE % MAXPACKET)
#error MAXPACKET must divide PAGE_SIZE!
#endif
/*-------------------------------------------------------------------------*/
struct phonet_port {
struct f_phonet *usb;
spinlock_t lock;
};
struct f_phonet {
struct usb_function function;
struct {
struct sk_buff *skb;
spinlock_t lock;
} rx;
struct net_device *dev;
struct usb_ep *in_ep, *out_ep;
struct usb_request *in_req;
struct usb_request *out_reqv[0];
};
static int phonet_rxq_size = 17;
static inline struct f_phonet *func_to_pn(struct usb_function *f)
{
return container_of(f, struct f_phonet, function);
}
/*-------------------------------------------------------------------------*/
#define USB_CDC_SUBCLASS_PHONET 0xfe
#define USB_CDC_PHONET_TYPE 0xab
static struct usb_interface_descriptor
pn_control_intf_desc = {
.bLength = sizeof pn_control_intf_desc,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC, */
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_PHONET,
};
static const struct usb_cdc_header_desc
pn_header_desc = {
.bLength = sizeof pn_header_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_HEADER_TYPE,
.bcdCDC = cpu_to_le16(0x0110),
};
static const struct usb_cdc_header_desc
pn_phonet_desc = {
.bLength = sizeof pn_phonet_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_PHONET_TYPE,
.bcdCDC = cpu_to_le16(0x1505), /* ??? */
};
static struct usb_cdc_union_desc
pn_union_desc = {
.bLength = sizeof pn_union_desc,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_UNION_TYPE,
/* .bMasterInterface0 = DYNAMIC, */
/* .bSlaveInterface0 = DYNAMIC, */
};
static struct usb_interface_descriptor
pn_data_nop_intf_desc = {
.bLength = sizeof pn_data_nop_intf_desc,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC, */
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_CDC_DATA,
};
static struct usb_interface_descriptor
pn_data_intf_desc = {
.bLength = sizeof pn_data_intf_desc,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC, */
.bAlternateSetting = 1,
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_CDC_DATA,
};
static struct usb_endpoint_descriptor
pn_fs_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor
pn_hs_sink_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(MAXPACKET),
};
static struct usb_endpoint_descriptor
pn_fs_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor
pn_hs_source_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *fs_pn_function[] = {
(struct usb_descriptor_header *) &pn_control_intf_desc,
(struct usb_descriptor_header *) &pn_header_desc,
(struct usb_descriptor_header *) &pn_phonet_desc,
(struct usb_descriptor_header *) &pn_union_desc,
(struct usb_descriptor_header *) &pn_data_nop_intf_desc,
(struct usb_descriptor_header *) &pn_data_intf_desc,
(struct usb_descriptor_header *) &pn_fs_sink_desc,
(struct usb_descriptor_header *) &pn_fs_source_desc,
NULL,
};
static struct usb_descriptor_header *hs_pn_function[] = {
(struct usb_descriptor_header *) &pn_control_intf_desc,
(struct usb_descriptor_header *) &pn_header_desc,
(struct usb_descriptor_header *) &pn_phonet_desc,
(struct usb_descriptor_header *) &pn_union_desc,
(struct usb_descriptor_header *) &pn_data_nop_intf_desc,
(struct usb_descriptor_header *) &pn_data_intf_desc,
(struct usb_descriptor_header *) &pn_hs_sink_desc,
(struct usb_descriptor_header *) &pn_hs_source_desc,
NULL,
};
/*-------------------------------------------------------------------------*/
static int pn_net_open(struct net_device *dev)
{
netif_wake_queue(dev);
return 0;
}
static int pn_net_close(struct net_device *dev)
{
netif_stop_queue(dev);
return 0;
}
static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_phonet *fp = ep->driver_data;
struct net_device *dev = fp->dev;
struct sk_buff *skb = req->context;
switch (req->status) {
case 0:
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
break;
case -ESHUTDOWN: /* disconnected */
case -ECONNRESET: /* disabled */
dev->stats.tx_aborted_errors++;
default:
dev->stats.tx_errors++;
}
dev_kfree_skb_any(skb);
netif_wake_queue(dev);
}
static int pn_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct phonet_port *port = netdev_priv(dev);
struct f_phonet *fp;
struct usb_request *req;
unsigned long flags;
if (skb->protocol != htons(ETH_P_PHONET))
goto out;
spin_lock_irqsave(&port->lock, flags);
fp = port->usb;
if (unlikely(!fp)) /* race with carrier loss */
goto out_unlock;
req = fp->in_req;
req->buf = skb->data;
req->length = skb->len;
req->complete = pn_tx_complete;
req->zero = 1;
req->context = skb;
if (unlikely(usb_ep_queue(fp->in_ep, req, GFP_ATOMIC)))
goto out_unlock;
netif_stop_queue(dev);
skb = NULL;
out_unlock:
spin_unlock_irqrestore(&port->lock, flags);
out:
if (unlikely(skb)) {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
}
return NETDEV_TX_OK;
}
static int pn_net_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < PHONET_MIN_MTU) || (new_mtu > PHONET_MAX_MTU))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static const struct net_device_ops pn_netdev_ops = {
.ndo_open = pn_net_open,
.ndo_stop = pn_net_close,
.ndo_start_xmit = pn_net_xmit,
.ndo_change_mtu = pn_net_mtu,
};
static void pn_net_setup(struct net_device *dev)
{
dev->features = 0;
dev->type = ARPHRD_PHONET;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = PHONET_DEV_MTU;
dev->hard_header_len = 1;
dev->dev_addr[0] = PN_MEDIA_USB;
dev->addr_len = 1;
dev->tx_queue_len = 1;
dev->netdev_ops = &pn_netdev_ops;
dev->destructor = free_netdev;
dev->header_ops = &phonet_header_ops;
}
/*-------------------------------------------------------------------------*/
/*
* Queue buffer for data from the host
*/
static int
pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
{
struct net_device *dev = fp->dev;
struct page *page;
int err;
page = __netdev_alloc_page(dev, gfp_flags);
if (!page)
return -ENOMEM;
req->buf = page_address(page);
req->length = PAGE_SIZE;
req->context = page;
err = usb_ep_queue(fp->out_ep, req, gfp_flags);
if (unlikely(err))
netdev_free_page(dev, page);
return err;
}
static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_phonet *fp = ep->driver_data;
struct net_device *dev = fp->dev;
struct page *page = req->context;
struct sk_buff *skb;
unsigned long flags;
int status = req->status;
switch (status) {
case 0:
spin_lock_irqsave(&fp->rx.lock, flags);
skb = fp->rx.skb;
if (!skb)
skb = fp->rx.skb = netdev_alloc_skb(dev, 12);
if (req->actual < req->length) /* Last fragment */
fp->rx.skb = NULL;
spin_unlock_irqrestore(&fp->rx.lock, flags);
if (unlikely(!skb))
break;
if (skb->len == 0) { /* First fragment */
skb->protocol = htons(ETH_P_PHONET);
skb_reset_mac_header(skb);
/* Can't use pskb_pull() on page in IRQ */
memcpy(skb_put(skb, 1), page_address(page), 1);
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
skb->len == 0, req->actual);
page = NULL;
if (req->actual < req->length) { /* Last fragment */
skb->dev = dev;
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
netif_rx(skb);
}
break;
/* Do not resubmit in these cases: */
case -ESHUTDOWN: /* disconnect */
case -ECONNABORTED: /* hw reset */
case -ECONNRESET: /* dequeued (unlink or netif down) */
req = NULL;
break;
/* Do resubmit in these cases: */
case -EOVERFLOW: /* request buffer overflow */
dev->stats.rx_over_errors++;
default:
dev->stats.rx_errors++;
break;
}
if (page)
netdev_free_page(dev, page);
if (req)
pn_rx_submit(fp, req, GFP_ATOMIC);
}
/*-------------------------------------------------------------------------*/
static void __pn_reset(struct usb_function *f)
{
struct f_phonet *fp = func_to_pn(f);
struct net_device *dev = fp->dev;
struct phonet_port *port = netdev_priv(dev);
netif_carrier_off(dev);
port->usb = NULL;
usb_ep_disable(fp->out_ep);
usb_ep_disable(fp->in_ep);
if (fp->rx.skb) {
dev_kfree_skb_irq(fp->rx.skb);
fp->rx.skb = NULL;
}
}
static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_phonet *fp = func_to_pn(f);
struct usb_gadget *gadget = fp->function.config->cdev->gadget;
if (intf == pn_control_intf_desc.bInterfaceNumber)
/* control interface, no altsetting */
return (alt > 0) ? -EINVAL : 0;
if (intf == pn_data_intf_desc.bInterfaceNumber) {
struct net_device *dev = fp->dev;
struct phonet_port *port = netdev_priv(dev);
/* data intf (0: inactive, 1: active) */
if (alt > 1)
return -EINVAL;
spin_lock(&port->lock);
__pn_reset(f);
if (alt == 1) {
struct usb_endpoint_descriptor *out, *in;
int i;
out = ep_choose(gadget,
&pn_hs_sink_desc,
&pn_fs_sink_desc);
in = ep_choose(gadget,
&pn_hs_source_desc,
&pn_fs_source_desc);
usb_ep_enable(fp->out_ep, out);
usb_ep_enable(fp->in_ep, in);
port->usb = fp;
fp->out_ep->driver_data = fp;
fp->in_ep->driver_data = fp;
netif_carrier_on(dev);
for (i = 0; i < phonet_rxq_size; i++)
pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC);
}
spin_unlock(&port->lock);
return 0;
}
return -EINVAL;
}
static int pn_get_alt(struct usb_function *f, unsigned intf)
{
struct f_phonet *fp = func_to_pn(f);
if (intf == pn_control_intf_desc.bInterfaceNumber)
return 0;
if (intf == pn_data_intf_desc.bInterfaceNumber) {
struct phonet_port *port = netdev_priv(fp->dev);
u8 alt;
spin_lock(&port->lock);
alt = port->usb != NULL;
spin_unlock(&port->lock);
return alt;
}
return -EINVAL;
}
static void pn_disconnect(struct usb_function *f)
{
struct f_phonet *fp = func_to_pn(f);
struct phonet_port *port = netdev_priv(fp->dev);
unsigned long flags;
/* remain disabled until set_alt */
spin_lock_irqsave(&port->lock, flags);
__pn_reset(f);
spin_unlock_irqrestore(&port->lock, flags);
}
/*-------------------------------------------------------------------------*/
static __init
int pn_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct usb_gadget *gadget = cdev->gadget;
struct f_phonet *fp = func_to_pn(f);
struct usb_ep *ep;
int status, i;
/* Reserve interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
goto err;
pn_control_intf_desc.bInterfaceNumber = status;
pn_union_desc.bMasterInterface0 = status;
status = usb_interface_id(c, f);
if (status < 0)
goto err;
pn_data_nop_intf_desc.bInterfaceNumber = status;
pn_data_intf_desc.bInterfaceNumber = status;
pn_union_desc.bSlaveInterface0 = status;
/* Reserve endpoints */
status = -ENODEV;
ep = usb_ep_autoconfig(gadget, &pn_fs_sink_desc);
if (!ep)
goto err;
fp->out_ep = ep;
ep->driver_data = fp; /* Claim */
ep = usb_ep_autoconfig(gadget, &pn_fs_source_desc);
if (!ep)
goto err;
fp->in_ep = ep;
ep->driver_data = fp; /* Claim */
pn_hs_sink_desc.bEndpointAddress =
pn_fs_sink_desc.bEndpointAddress;
pn_hs_source_desc.bEndpointAddress =
pn_fs_source_desc.bEndpointAddress;
/* Do not try to bind Phonet twice... */
fp->function.descriptors = fs_pn_function;
fp->function.hs_descriptors = hs_pn_function;
/* Incoming USB requests */
status = -ENOMEM;
for (i = 0; i < phonet_rxq_size; i++) {
struct usb_request *req;
req = usb_ep_alloc_request(fp->out_ep, GFP_KERNEL);
if (!req)
goto err_req;
req->complete = pn_rx_complete;
fp->out_reqv[i] = req;
}
/* Outgoing USB requests */
fp->in_req = usb_ep_alloc_request(fp->in_ep, GFP_KERNEL);
if (!fp->in_req)
goto err_req;
INFO(cdev, "USB CDC Phonet function\n");
INFO(cdev, "using %s, OUT %s, IN %s\n", cdev->gadget->name,
fp->out_ep->name, fp->in_ep->name);
return 0;
err_req:
for (i = 0; i < phonet_rxq_size && fp->out_reqv[i]; i++)
usb_ep_free_request(fp->out_ep, fp->out_reqv[i]);
err:
if (fp->out_ep)
fp->out_ep->driver_data = NULL;
if (fp->in_ep)
fp->in_ep->driver_data = NULL;
ERROR(cdev, "USB CDC Phonet: cannot autoconfigure\n");
return status;
}
static void
pn_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_phonet *fp = func_to_pn(f);
int i;
/* We are already disconnected */
if (fp->in_req)
usb_ep_free_request(fp->in_ep, fp->in_req);
for (i = 0; i < phonet_rxq_size; i++)
if (fp->out_reqv[i])
usb_ep_free_request(fp->out_ep, fp->out_reqv[i]);
kfree(fp);
}
/*-------------------------------------------------------------------------*/
static struct net_device *dev;
int __init phonet_bind_config(struct usb_configuration *c)
{
struct f_phonet *fp;
int err, size;
size = sizeof(*fp) + (phonet_rxq_size * sizeof(struct usb_request *));
fp = kzalloc(size, GFP_KERNEL);
if (!fp)
return -ENOMEM;
fp->dev = dev;
fp->function.name = "phonet";
fp->function.bind = pn_bind;
fp->function.unbind = pn_unbind;
fp->function.set_alt = pn_set_alt;
fp->function.get_alt = pn_get_alt;
fp->function.disable = pn_disconnect;
spin_lock_init(&fp->rx.lock);
err = usb_add_function(c, &fp->function);
if (err)
kfree(fp);
return err;
}
int __init gphonet_setup(struct usb_gadget *gadget)
{
struct phonet_port *port;
int err;
/* Create net device */
BUG_ON(dev);
dev = alloc_netdev(sizeof(*port), "upnlink%d", pn_net_setup);
if (!dev)
return -ENOMEM;
port = netdev_priv(dev);
spin_lock_init(&port->lock);
netif_carrier_off(dev);
SET_NETDEV_DEV(dev, &gadget->dev);
err = register_netdev(dev);
if (err)
free_netdev(dev);
return err;
}
void gphonet_cleanup(void)
{
unregister_netdev(dev);
}
| gpl-2.0 |
fir3element/downgrade1.1 | src/npc.h | 6549 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2015 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef FS_NPC_H_B090D0CB549D4435AFA03647195D156F
#define FS_NPC_H_B090D0CB549D4435AFA03647195D156F
#include "creature.h"
#include "luascript.h"
#include <set>
class Npc;
class Player;
class Npcs
{
public:
static void reload();
};
class NpcScriptInterface final : public LuaScriptInterface
{
public:
NpcScriptInterface();
bool loadNpcLib(const std::string& file);
protected:
void registerFunctions();
static int32_t luaActionSay(lua_State* L);
static int32_t luaActionMove(lua_State* L);
static int32_t luaActionMoveTo(lua_State* L);
static int32_t luaActionTurn(lua_State* L);
static int32_t luaActionFollow(lua_State* L);
static int32_t luaSelfGetPos(lua_State* L);
static int32_t luagetDistanceTo(lua_State* L);
static int32_t luaSetNpcFocus(lua_State* L);
static int32_t luaGetNpcCid(lua_State* L);
static int32_t luaGetNpcPos(lua_State* L);
static int32_t luaGetNpcName(lua_State* L);
static int32_t luaGetNpcParameter(lua_State* L);
static int32_t luaOpenShopWindow(lua_State* L);
static int32_t luaCloseShopWindow(lua_State* L);
static int32_t luaDoSellItem(lua_State* L);
// metatable
static int32_t luaNpcGetParameter(lua_State* L);
static int32_t luaNpcSetFocus(lua_State* L);
static int32_t luaNpcOpenShopWindow(lua_State* L);
static int32_t luaNpcCloseShopWindow(lua_State* L);
private:
bool initState() final;
bool closeState() final;
bool m_libLoaded;
};
class NpcEventsHandler
{
public:
NpcEventsHandler(const std::string& file, Npc* npc);
void onCreatureAppear(Creature* creature);
void onCreatureDisappear(Creature* creature);
void onCreatureMove(Creature* creature, const Position& oldPos, const Position& newPos);
void onCreatureSay(Creature* creature, SpeakClasses, const std::string& text);
void onPlayerTrade(Player* player, int32_t callback, uint16_t itemId, uint8_t count, uint8_t amount, bool ignore = false, bool inBackpacks = false);
void onPlayerCloseChannel(Player* player);
void onPlayerEndTrade(Player* player);
void onThink();
bool isLoaded() const;
protected:
Npc* m_npc;
NpcScriptInterface* m_scriptInterface;
int32_t m_onCreatureAppear;
int32_t m_onCreatureDisappear;
int32_t m_onCreatureMove;
int32_t m_onCreatureSay;
int32_t m_onPlayerCloseChannel;
int32_t m_onPlayerEndTrade;
int32_t m_onThink;
bool m_loaded;
};
class Npc final : public Creature
{
public:
~Npc();
// non-copyable
Npc(const Npc&) = delete;
Npc& operator=(const Npc&) = delete;
Npc* getNpc() final {
return this;
}
const Npc* getNpc() const final {
return this;
}
bool isPushable() const final {
return walkTicks > 0;
}
void setID() final {
if (id == 0) {
id = npcAutoID++;
}
}
void removeList() final;
void addList() final;
static Npc* createNpc(const std::string& name);
bool canSee(const Position& pos) const final;
bool load();
void reload();
const std::string& getName() const final {
return name;
}
const std::string& getNameDescription() const final {
return name;
}
CreatureType_t getType() const final {
return CREATURETYPE_NPC;
}
void doSay(const std::string& text);
void doSayToPlayer(Player* player, const std::string& text);
void doMove(Direction dir);
void doTurn(Direction dir);
void doMoveTo(const Position& pos);
int32_t getMasterRadius() const {
return masterRadius;
}
const Position& getMasterPos() const {
return masterPos;
}
void setMasterPos(const Position& pos, int32_t radius = 1) {
masterPos = pos;
if (masterRadius == -1) {
masterRadius = radius;
}
}
void onPlayerCloseChannel(Player* player);
void onPlayerTrade(Player* player, int32_t callback, uint16_t itemId, uint8_t count,
uint8_t amount, bool ignore = false, bool inBackpacks = false);
void onPlayerEndTrade(Player* player, int32_t buyCallback, int32_t sellCallback);
void turnToCreature(Creature* creature);
void setCreatureFocus(Creature* creature);
NpcScriptInterface* getScriptInterface();
static uint32_t npcAutoID;
protected:
Npc(const std::string& _name);
void onCreatureAppear(Creature* creature, bool isLogin) final;
void onCreatureDisappear(Creature* creature, uint32_t stackpos, bool isLogout) final;
void onCreatureMove(Creature* creature, const Tile* newTile, const Position& newPos,
const Tile* oldTile, const Position& oldPos, bool teleport) final;
void onCreatureSay(Creature* creature, SpeakClasses type, const std::string& text) final;
void onThink(uint32_t interval) final;
std::string getDescription(int32_t lookDistance) const final;
bool isImmune(CombatType_t) const final {
return !attackable;
}
bool isImmune(ConditionType_t) const final {
return !attackable;
}
bool isAttackable() const final {
return attackable;
}
bool getNextStep(Direction& dir, uint32_t& flags) final;
bool canWalkTo(const Position& fromPos, Direction dir) const;
bool getRandomStep(Direction& dir) const;
void reset();
bool loadFromXml(const std::string& name);
void addShopPlayer(Player* player);
void removeShopPlayer(Player* player);
void closeAllShopWindows();
std::map<std::string, std::string> m_parameters;
std::set<Player*> shopPlayerSet;
std::string name;
std::string m_filename;
NpcEventsHandler* m_npcEventHandler;
Position masterPos;
uint32_t walkTicks;
int32_t focusCreature;
int32_t masterRadius;
bool floorChange;
bool attackable;
bool ignoreHeight;
bool loaded;
static NpcScriptInterface* m_scriptInterface;
friend class Npcs;
friend class NpcScriptInterface;
};
#endif
| gpl-2.0 |
grev-uv/hpg-methyl | lib/c/src/bioformats/ped/ped_reader.h | 558 | #ifndef PED_RAGEL_H
#define PED_RAGEL_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <commons/log.h>
#include <containers/list.h>
#include <cprops/linked_list.h>
#include "ped_error.h"
#include "ped_file_structure.h"
#include "ped_file.h"
#include "ped_read.h"
#include "ped_batch.h"
enum PED_Field { FAMILY_ID, INDIVIDUAL_ID, FATHER_ID, MOTHER_ID, SEX, PHENOTYPE, OTHER };
int ped_ragel_read(list_t *batches_list, size_t batch_size, ped_file_t *file);
#ifdef __cplusplus
}
#endif
#endif
| gpl-2.0 |
kdave/kernel-source | scripts/python/tests/test_header.py | 20987 | #!/usr/bin/python3
# -*- coding: utf-8 -*-,
import sys
import os.path
import unittest
from io import StringIO
from suse_git import header
class TestHeaderChecker(unittest.TestCase):
def test_empty(self):
try:
self.header = header.Checker("")
except header.HeaderException as e:
self.assertEqual(4, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('patch-mainline'))
self.assertTrue(e.tag_is_missing('from'))
self.assertTrue(e.tag_is_missing('subject'))
self.assertTrue(e.tag_is_missing('references'))
self.assertEqual(4, e.errors())
def test_subject_dupe(self):
text = """
From: [email protected]
Subject: some patch
Subject: some patch
Patch-mainline: v4.2-rc2
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.DuplicateTagError))
self.assertEqual(1, e.errors())
def test_patch_mainline_dupe(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Patch-mainline: v4.2-rc2
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.DuplicateTagError))
self.assertEqual(1, e.errors())
def test_patch_mainline_empty(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline:
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.EmptyTagError))
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('patch-mainline'))
self.assertEqual(2, e.errors())
def test_patch_mainline_version_no_ack_or_sob(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
References: bsc#12345
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
"""
try:
self.header = header.Checker(text)
except header.HeaderException as e:
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('acked-by'))
self.assertTrue(e.tag_is_missing('signed-off-by'))
self.assertEqual(1, e.errors())
def test_patch_mainline_version_correct_multi_ack(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_multi_ack_ext_last(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_mixed_ack_sob(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Signed-off-by: [email protected]
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_ack(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_from(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_review(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Reviewed-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_sob(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Signed-off-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_multi_sob(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Signed-off-by: [email protected]
Signed-off-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_version_correct_multi_sob_ext_last(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Signed-off-by: [email protected]
Signed-off-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_na(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: n/a
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_submitted_correct_ml(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Submitted, 19 July 2015 - linux-btrfs
References: bsc#12345
Acked-by: [email protected]
"""
errors = self.header = header.Checker(text)
def test_patch_mainline_submitted_correct_url(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Submitted, https://lkml.org/archive/link-to-post
References: bsc#12345
Acked-by: [email protected]
"""
errors = self.header = header.Checker(text)
def test_patch_mainline_submitted_no_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Submitted
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_submitted_detail_git_commit(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Submitted, https://lkml.org/archive/link-to-post
Git-repo: git://host/valid/path/to/repo
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.ExcludedTagError))
self.assertEqual(1, e.errors())
# Required/Excluded conflict between Patch-mainline (Submitted)
# and Git-commit
def test_patch_mainline_submitted_detail_git_commit(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Submitted, https://lkml.org/archive/link-to-post
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertEqual(1, e.errors(header.ExcludedTagError))
self.assertEqual(2, e.errors())
def test_patch_mainline_submitted_no_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Submitted
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_never_no_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Never
References: bsc#12345
Acked-by: [email protected]
"""
try:
self.header = header.Checker(text)
except header.HeaderException as e:
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_yes_with_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Yes, v4.1-rc1
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_yes_no_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Yes
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_not_yet_no_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Not yet
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_never_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Never, SLES-specific feature
References: FATE#123456
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_no_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: No, handled differently upstream
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_patch_mainline_not_yet_detail(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Not yet, rare reason
References: bsc#12345
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_git_commit_standalone(self):
text = """
From: [email protected]
Subject: some patch
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
"""
try:
self.header = header.Checker(text)
except header.HeaderException as e:
# Both policy and Git-commit require Patch-mainline
self.assertEqual(2, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('patch-mainline'))
self.assertEqual(2, e.errors())
def test_patch_mainline_queued_correct(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Queued
Git-repo: git://path/to/git/repo
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_mainline_queued_standalone(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Queued
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(2, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('git-commit'))
self.assertTrue(e.tag_is_missing('git-repo'))
self.assertEqual(2, e.errors())
def test_patch_mainline_queued_with_git_repo(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Queued
Git-repo: git://path/to/git/repo
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
# Required by both Patch-mainline (Queued) and
# Git-repo
self.assertEqual(2, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('git-commit'))
self.assertEqual(2, e.errors())
def test_patch_mainline_queued_with_git_commit(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: Queued
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('git-repo'))
self.assertEqual(1, e.errors())
def test_patch_mainline_invalid(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: n/a
References: bsc#12345
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.FormatError))
self.assertEqual(1, e.errors())
def test_diff_like_description(self):
text = """
From: [email protected]
Subject: blablah
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
This is a thing. I ran across it:
*** Testing resulted in failure
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_diff_like_description2(self):
text = """
From: [email protected]
Subject: blablah
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
This is a thing. I ran across it:
--- Testing resulted in failure
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_references_empty(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References:
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.EmptyTagError))
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('references'))
self.assertEqual(2, e.errors())
def test_patch_references_missing(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('references'))
self.assertEqual(1, e.errors())
def test_patch_references_multi(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
References: bsc#12354
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_references_multi2(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345 bsc#12354
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_references_multi3(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345, bsc#12354
Acked-by: [email protected]
"""
self.header = header.Checker(text)
def test_patch_references_multi3(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345, bsc#12354
References: fix for blahblah
Acked-by: [email protected]
"""
self.header = header.Checker(text)
@unittest.skip("Enable this check when we want to require a real "
"References tag")
def test_patch_references_only_freeform(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: fix for blahblah
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text)
e = cm.exception
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('references'))
self.assertEqual(1, e.errors())
def test_patch_references_empty_update(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References:
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text, True)
e = cm.exception
self.assertEqual(1, e.errors(header.EmptyTagError))
self.assertEqual(1, e.errors())
def test_patch_references_missing_update(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
Acked-by: [email protected]
"""
self.header = header.Checker(text, True)
def test_patch_references_multi_update(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345
References: bsc#12354
Acked-by: [email protected]
"""
self.header = header.Checker(text, True)
def test_patch_references_multi2_update(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345 bsc#12354
Acked-by: [email protected]
"""
self.header = header.Checker(text, True)
def test_patch_references_multi3_update(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345, bsc#12354
Acked-by: [email protected]
"""
self.header = header.Checker(text, True)
def test_patch_references_multi3_update(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: bsc#12345, bsc#12354
References: fix for blahblah
Acked-by: [email protected]
"""
self.header = header.Checker(text, True)
@unittest.skip("Enable this check when we want to require a real "
"References tag")
def test_patch_references_only_freeform_update(self):
text = """
From: [email protected]
Subject: some patch
Patch-mainline: v4.2-rc1
Git-commit: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
References: fix for blahblah
Acked-by: [email protected]
"""
with self.assertRaises(header.HeaderException) as cm:
self.header = header.Checker(text, True)
e = cm.exception
self.assertEqual(1, e.errors(header.MissingTagError))
self.assertTrue(e.tag_is_missing('references'))
self.assertEqual(1, e.errors())
def test_no_patch_mainline_for_kabi(self):
text = """
From: [email protected]
Subject: some patch
References: FATE#123456
Acked-by: [email protected]
"""
self.header = header.Checker(text, False, "patches.kabi/FATE123456_fix_kabi.patch")
| gpl-2.0 |
ballock/kernel-xenial-bbr | fs/namei.c | 117645 | /*
* linux/fs/namei.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* Some corrections by tytso.
*/
/* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname
* lookup logic.
*/
/* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture.
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/fsnotify.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/ima.h>
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fcntl.h>
#include <linux/device_cgroup.h>
#include <linux/fs_struct.h>
#include <linux/posix_acl.h>
#include <linux/hash.h>
#include <linux/init_task.h>
#include <asm/uaccess.h>
#include "internal.h"
#include "mount.h"
/* [Feb-1997 T. Schoebel-Theuer]
* Fundamental changes in the pathname lookup mechanisms (namei)
* were necessary because of omirr. The reason is that omirr needs
* to know the _real_ pathname, not the user-supplied one, in case
* of symlinks (and also when transname replacements occur).
*
* The new code replaces the old recursive symlink resolution with
* an iterative one (in case of non-nested symlink chains). It does
* this with calls to <fs>_follow_link().
* As a side effect, dir_namei(), _namei() and follow_link() are now
* replaced with a single function lookup_dentry() that can handle all
* the special cases of the former code.
*
* With the new dcache, the pathname is stored at each inode, at least as
* long as the refcount of the inode is positive. As a side effect, the
* size of the dcache depends on the inode cache and thus is dynamic.
*
* [29-Apr-1998 C. Scott Ananian] Updated above description of symlink
* resolution to correspond with current state of the code.
*
* Note that the symlink resolution is not *completely* iterative.
* There is still a significant amount of tail- and mid- recursion in
* the algorithm. Also, note that <fs>_readlink() is not used in
* lookup_dentry(): lookup_dentry() on the result of <fs>_readlink()
* may return different results than <fs>_follow_link(). Many virtual
* filesystems (including /proc) exhibit this behavior.
*/
/* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation:
* New symlink semantics: when open() is called with flags O_CREAT | O_EXCL
* and the name already exists in form of a symlink, try to create the new
* name indicated by the symlink. The old code always complained that the
* name already exists, due to not following the symlink even if its target
* is nonexistent. The new semantics affects also mknod() and link() when
* the name is a symlink pointing to a non-existent name.
*
* I don't know which semantics is the right one, since I have no access
* to standards. But I found by trial that HP-UX 9.0 has the full "new"
* semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the
* "old" one. Personally, I think the new semantics is much more logical.
* Note that "ln old new" where "new" is a symlink pointing to a non-existing
* file does succeed in both HP-UX and SunOs, but not in Solaris
* and in the old Linux semantics.
*/
/* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink
* semantics. See the comments in "open_namei" and "do_link" below.
*
* [10-Sep-98 Alan Modra] Another symlink change.
*/
/* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks:
* inside the path - always follow.
* in the last component in creation/removal/renaming - never follow.
* if LOOKUP_FOLLOW passed - follow.
* if the pathname has trailing slashes - follow.
* otherwise - don't follow.
* (applied in that order).
*
* [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT
* restored for 2.4. This is the last surviving part of old 4.2BSD bug.
* During the 2.4 we need to fix the userland stuff depending on it -
* hopefully we will be able to get rid of that wart in 2.5. So far only
* XEmacs seems to be relying on it...
*/
/*
* [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
* implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives
* any extra contention...
*/
/* In order to reduce some races, while at the same time doing additional
* checking and hopefully speeding things up, we copy filenames to the
* kernel data space before using them..
*
* POSIX.1 2.4: an empty pathname is invalid (ENOENT).
* PATH_MAX includes the nul terminator --RR.
*/
#define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
struct filename *
getname_flags(const char __user *filename, int flags, int *empty)
{
struct filename *result;
char *kname;
int len;
result = audit_reusename(filename);
if (result)
return result;
result = __getname();
if (unlikely(!result))
return ERR_PTR(-ENOMEM);
/*
* First, try to embed the struct filename inside the names_cache
* allocation
*/
kname = (char *)result->iname;
result->name = kname;
len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX);
if (unlikely(len < 0)) {
__putname(result);
return ERR_PTR(len);
}
/*
* Uh-oh. We have a name that's approaching PATH_MAX. Allocate a
* separate struct filename so we can dedicate the entire
* names_cache allocation for the pathname, and re-do the copy from
* userland.
*/
if (unlikely(len == EMBEDDED_NAME_MAX)) {
const size_t size = offsetof(struct filename, iname[1]);
kname = (char *)result;
/*
* size is chosen that way we to guarantee that
* result->iname[0] is within the same object and that
* kname can't be equal to result->iname, no matter what.
*/
result = kzalloc(size, GFP_KERNEL);
if (unlikely(!result)) {
__putname(kname);
return ERR_PTR(-ENOMEM);
}
result->name = kname;
len = strncpy_from_user(kname, filename, PATH_MAX);
if (unlikely(len < 0)) {
__putname(kname);
kfree(result);
return ERR_PTR(len);
}
if (unlikely(len == PATH_MAX)) {
__putname(kname);
kfree(result);
return ERR_PTR(-ENAMETOOLONG);
}
}
result->refcnt = 1;
/* The empty path is special. */
if (unlikely(!len)) {
if (empty)
*empty = 1;
if (!(flags & LOOKUP_EMPTY)) {
putname(result);
return ERR_PTR(-ENOENT);
}
}
result->uptr = filename;
result->aname = NULL;
audit_getname(result);
return result;
}
struct filename *
getname(const char __user * filename)
{
return getname_flags(filename, 0, NULL);
}
struct filename *
getname_kernel(const char * filename)
{
struct filename *result;
int len = strlen(filename) + 1;
result = __getname();
if (unlikely(!result))
return ERR_PTR(-ENOMEM);
if (len <= EMBEDDED_NAME_MAX) {
result->name = (char *)result->iname;
} else if (len <= PATH_MAX) {
struct filename *tmp;
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
if (unlikely(!tmp)) {
__putname(result);
return ERR_PTR(-ENOMEM);
}
tmp->name = (char *)result;
result = tmp;
} else {
__putname(result);
return ERR_PTR(-ENAMETOOLONG);
}
memcpy((char *)result->name, filename, len);
result->uptr = NULL;
result->aname = NULL;
result->refcnt = 1;
audit_getname(result);
return result;
}
void putname(struct filename *name)
{
BUG_ON(name->refcnt <= 0);
if (--name->refcnt > 0)
return;
if (name->name != name->iname) {
__putname(name->name);
kfree(name);
} else
__putname(name);
}
static int check_acl(struct inode *inode, int mask)
{
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *acl;
if (mask & MAY_NOT_BLOCK) {
acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS);
if (!acl)
return -EAGAIN;
/* no ->get_acl() calls in RCU mode... */
if (acl == ACL_NOT_CACHED)
return -ECHILD;
return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
}
acl = get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl) {
int error = posix_acl_permission(inode, acl, mask);
posix_acl_release(acl);
return error;
}
#endif
return -EAGAIN;
}
/*
* This does the basic permission checking
*/
static int acl_permission_check(struct inode *inode, int mask)
{
unsigned int mode = inode->i_mode;
if (likely(uid_eq(current_fsuid(), inode->i_uid)))
mode >>= 6;
else {
if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
int error = check_acl(inode, mask);
if (error != -EAGAIN)
return error;
}
if (in_group_p(inode->i_gid))
mode >>= 3;
}
/*
* If the DACs are ok we don't need any capability check.
*/
if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
return 0;
return -EACCES;
}
/**
* generic_permission - check for access rights on a Posix-like filesystem
* @inode: inode to check access rights for
* @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
*
* Used to check for read/write/execute permissions on a file.
* We use "fsuid" for this, letting us set arbitrary permissions
* for filesystem access without changing the "normal" uids which
* are used for other things.
*
* generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
* request cannot be satisfied (eg. requires blocking or too much complexity).
* It would then be called again in ref-walk mode.
*/
int generic_permission(struct inode *inode, int mask)
{
int ret;
/*
* Do the basic permission checks.
*/
ret = acl_permission_check(inode, mask);
if (ret != -EACCES)
return ret;
if (S_ISDIR(inode->i_mode)) {
/* DACs are overridable for directories */
if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
return 0;
if (!(mask & MAY_WRITE))
if (capable_wrt_inode_uidgid(inode,
CAP_DAC_READ_SEARCH))
return 0;
return -EACCES;
}
/*
* Read/write DACs are always overridable.
* Executable DACs are overridable when there is
* at least one exec bit set.
*/
if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO))
if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
return 0;
/*
* Searching includes executable on directories, else just read.
*/
mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
if (mask == MAY_READ)
if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
return 0;
return -EACCES;
}
EXPORT_SYMBOL(generic_permission);
/*
* We _really_ want to just do "generic_permission()" without
* even looking at the inode->i_op values. So we keep a cache
* flag in inode->i_opflags, that says "this has not special
* permission function, use the fast case".
*/
static inline int do_inode_permission(struct inode *inode, int mask)
{
if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) {
if (likely(inode->i_op->permission))
return inode->i_op->permission(inode, mask);
/* This gets set once for the inode lifetime */
spin_lock(&inode->i_lock);
inode->i_opflags |= IOP_FASTPERM;
spin_unlock(&inode->i_lock);
}
return generic_permission(inode, mask);
}
/**
* __inode_permission - Check for access rights to a given inode
* @inode: Inode to check permission on
* @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
*
* Check for read/write/execute permissions on an inode.
*
* When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
*
* This does not check for a read-only file system. You probably want
* inode_permission().
*/
int __inode_permission(struct inode *inode, int mask)
{
int retval;
if (unlikely(mask & MAY_WRITE)) {
/*
* Nobody gets write access to an immutable file.
*/
if (IS_IMMUTABLE(inode))
return -EACCES;
}
retval = do_inode_permission(inode, mask);
if (retval)
return retval;
retval = devcgroup_inode_permission(inode, mask);
if (retval)
return retval;
return security_inode_permission(inode, mask);
}
EXPORT_SYMBOL(__inode_permission);
/**
* sb_permission - Check superblock-level permissions
* @sb: Superblock of inode to check permission on
* @inode: Inode to check permission on
* @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
*
* Separate out file-system wide checks from inode-specific permission checks.
*/
static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
{
if (unlikely(mask & MAY_WRITE)) {
umode_t mode = inode->i_mode;
/* Nobody gets write access to a read-only fs. */
if ((sb->s_flags & MS_RDONLY) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
}
return 0;
}
/**
* inode_permission - Check for access rights to a given inode
* @inode: Inode to check permission on
* @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
*
* Check for read/write/execute permissions on an inode. We use fs[ug]id for
* this, letting us set arbitrary permissions for filesystem access without
* changing the "normal" UIDs which are used for other things.
*
* When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
*/
int inode_permission(struct inode *inode, int mask)
{
int retval;
retval = sb_permission(inode->i_sb, inode, mask);
if (retval)
return retval;
return __inode_permission(inode, mask);
}
EXPORT_SYMBOL(inode_permission);
/**
* path_get - get a reference to a path
* @path: path to get the reference to
*
* Given a path increment the reference count to the dentry and the vfsmount.
*/
void path_get(const struct path *path)
{
mntget(path->mnt);
dget(path->dentry);
}
EXPORT_SYMBOL(path_get);
/**
* path_put - put a reference to a path
* @path: path to put the reference to
*
* Given a path decrement the reference count to the dentry and the vfsmount.
*/
void path_put(const struct path *path)
{
dput(path->dentry);
mntput(path->mnt);
}
EXPORT_SYMBOL(path_put);
#define EMBEDDED_LEVELS 2
struct nameidata {
struct path path;
struct qstr last;
struct path root;
struct inode *inode; /* path.dentry.d_inode */
unsigned int flags;
unsigned seq, m_seq;
int last_type;
unsigned depth;
int total_link_count;
struct saved {
struct path link;
void *cookie;
const char *name;
struct inode *inode;
unsigned seq;
} *stack, internal[EMBEDDED_LEVELS];
struct filename *name;
struct nameidata *saved;
unsigned root_seq;
int dfd;
};
static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
{
struct nameidata *old = current->nameidata;
p->stack = p->internal;
p->dfd = dfd;
p->name = name;
p->total_link_count = old ? old->total_link_count : 0;
p->saved = old;
current->nameidata = p;
}
static void restore_nameidata(void)
{
struct nameidata *now = current->nameidata, *old = now->saved;
current->nameidata = old;
if (old)
old->total_link_count = now->total_link_count;
if (now->stack != now->internal) {
kfree(now->stack);
now->stack = now->internal;
}
}
static int __nd_alloc_stack(struct nameidata *nd)
{
struct saved *p;
if (nd->flags & LOOKUP_RCU) {
p= kmalloc(MAXSYMLINKS * sizeof(struct saved),
GFP_ATOMIC);
if (unlikely(!p))
return -ECHILD;
} else {
p= kmalloc(MAXSYMLINKS * sizeof(struct saved),
GFP_KERNEL);
if (unlikely(!p))
return -ENOMEM;
}
memcpy(p, nd->internal, sizeof(nd->internal));
nd->stack = p;
return 0;
}
/**
* path_connected - Verify that a path->dentry is below path->mnt.mnt_root
* @path: nameidate to verify
*
* Rename can sometimes move a file or directory outside of a bind
* mount, path_connected allows those cases to be detected.
*/
static bool path_connected(const struct path *path)
{
struct vfsmount *mnt = path->mnt;
/* Only bind mounts can have disconnected paths */
if (mnt->mnt_root == mnt->mnt_sb->s_root)
return true;
return is_subdir(path->dentry, mnt->mnt_root);
}
static inline int nd_alloc_stack(struct nameidata *nd)
{
if (likely(nd->depth != EMBEDDED_LEVELS))
return 0;
if (likely(nd->stack != nd->internal))
return 0;
return __nd_alloc_stack(nd);
}
static void drop_links(struct nameidata *nd)
{
int i = nd->depth;
while (i--) {
struct saved *last = nd->stack + i;
struct inode *inode = last->inode;
if (last->cookie && inode->i_op->put_link) {
inode->i_op->put_link(inode, last->cookie);
last->cookie = NULL;
}
}
}
static void terminate_walk(struct nameidata *nd)
{
drop_links(nd);
if (!(nd->flags & LOOKUP_RCU)) {
int i;
path_put(&nd->path);
for (i = 0; i < nd->depth; i++)
path_put(&nd->stack[i].link);
if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
path_put(&nd->root);
nd->root.mnt = NULL;
}
} else {
nd->flags &= ~LOOKUP_RCU;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
rcu_read_unlock();
}
nd->depth = 0;
}
/* path_put is needed afterwards regardless of success or failure */
static bool legitimize_path(struct nameidata *nd,
struct path *path, unsigned seq)
{
int res = __legitimize_mnt(path->mnt, nd->m_seq);
if (unlikely(res)) {
if (res > 0)
path->mnt = NULL;
path->dentry = NULL;
return false;
}
if (unlikely(!lockref_get_not_dead(&path->dentry->d_lockref))) {
path->dentry = NULL;
return false;
}
return !read_seqcount_retry(&path->dentry->d_seq, seq);
}
static bool legitimize_links(struct nameidata *nd)
{
int i;
for (i = 0; i < nd->depth; i++) {
struct saved *last = nd->stack + i;
if (unlikely(!legitimize_path(nd, &last->link, last->seq))) {
drop_links(nd);
nd->depth = i + 1;
return false;
}
}
return true;
}
/*
* Path walking has 2 modes, rcu-walk and ref-walk (see
* Documentation/filesystems/path-lookup.txt). In situations when we can't
* continue in RCU mode, we attempt to drop out of rcu-walk mode and grab
* normal reference counts on dentries and vfsmounts to transition to rcu-walk
* mode. Refcounts are grabbed at the last known good point before rcu-walk
* got stuck, so ref-walk may continue from there. If this is not successful
* (eg. a seqcount has changed), then failure is returned and it's up to caller
* to restart the path walk from the beginning in ref-walk mode.
*/
/**
* unlazy_walk - try to switch to ref-walk mode.
* @nd: nameidata pathwalk data
* @dentry: child of nd->path.dentry or NULL
* @seq: seq number to check dentry against
* Returns: 0 on success, -ECHILD on failure
*
* unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry
* for ref-walk mode. @dentry must be a path found by a do_lookup call on
* @nd or NULL. Must be called from rcu-walk context.
* Nothing should touch nameidata between unlazy_walk() failure and
* terminate_walk().
*/
static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq)
{
struct dentry *parent = nd->path.dentry;
BUG_ON(!(nd->flags & LOOKUP_RCU));
nd->flags &= ~LOOKUP_RCU;
if (unlikely(!legitimize_links(nd)))
goto out2;
if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq)))
goto out2;
if (unlikely(!lockref_get_not_dead(&parent->d_lockref)))
goto out1;
/*
* For a negative lookup, the lookup sequence point is the parents
* sequence point, and it only needs to revalidate the parent dentry.
*
* For a positive lookup, we need to move both the parent and the
* dentry from the RCU domain to be properly refcounted. And the
* sequence number in the dentry validates *both* dentry counters,
* since we checked the sequence number of the parent after we got
* the child sequence number. So we know the parent must still
* be valid if the child sequence number is still valid.
*/
if (!dentry) {
if (read_seqcount_retry(&parent->d_seq, nd->seq))
goto out;
BUG_ON(nd->inode != parent->d_inode);
} else {
if (!lockref_get_not_dead(&dentry->d_lockref))
goto out;
if (read_seqcount_retry(&dentry->d_seq, seq))
goto drop_dentry;
}
/*
* Sequence counts matched. Now make sure that the root is
* still valid and get it if required.
*/
if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq))) {
rcu_read_unlock();
dput(dentry);
return -ECHILD;
}
}
rcu_read_unlock();
return 0;
drop_dentry:
rcu_read_unlock();
dput(dentry);
goto drop_root_mnt;
out2:
nd->path.mnt = NULL;
out1:
nd->path.dentry = NULL;
out:
rcu_read_unlock();
drop_root_mnt:
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
return -ECHILD;
}
static int unlazy_link(struct nameidata *nd, struct path *link, unsigned seq)
{
if (unlikely(!legitimize_path(nd, link, seq))) {
drop_links(nd);
nd->depth = 0;
nd->flags &= ~LOOKUP_RCU;
nd->path.mnt = NULL;
nd->path.dentry = NULL;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
rcu_read_unlock();
} else if (likely(unlazy_walk(nd, NULL, 0)) == 0) {
return 0;
}
path_put(link);
return -ECHILD;
}
static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
{
return dentry->d_op->d_revalidate(dentry, flags);
}
/**
* complete_walk - successful completion of path walk
* @nd: pointer nameidata
*
* If we had been in RCU mode, drop out of it and legitimize nd->path.
* Revalidate the final result, unless we'd already done that during
* the path walk or the filesystem doesn't ask for it. Return 0 on
* success, -error on failure. In case of failure caller does not
* need to drop nd->path.
*/
static int complete_walk(struct nameidata *nd)
{
struct dentry *dentry = nd->path.dentry;
int status;
if (nd->flags & LOOKUP_RCU) {
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
if (unlikely(unlazy_walk(nd, NULL, 0)))
return -ECHILD;
}
if (likely(!(nd->flags & LOOKUP_JUMPED)))
return 0;
if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
return 0;
status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
if (status > 0)
return 0;
if (!status)
status = -ESTALE;
return status;
}
static void set_root(struct nameidata *nd)
{
get_fs_root(current->fs, &nd->root);
}
static void set_root_rcu(struct nameidata *nd)
{
struct fs_struct *fs = current->fs;
unsigned seq;
do {
seq = read_seqcount_begin(&fs->seq);
nd->root = fs->root;
nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
} while (read_seqcount_retry(&fs->seq, seq));
}
static void path_put_conditional(struct path *path, struct nameidata *nd)
{
dput(path->dentry);
if (path->mnt != nd->path.mnt)
mntput(path->mnt);
}
static inline void path_to_nameidata(const struct path *path,
struct nameidata *nd)
{
if (!(nd->flags & LOOKUP_RCU)) {
dput(nd->path.dentry);
if (nd->path.mnt != path->mnt)
mntput(nd->path.mnt);
}
nd->path.mnt = path->mnt;
nd->path.dentry = path->dentry;
}
/*
* Helper to directly jump to a known parsed path from ->follow_link,
* caller must have taken a reference to path beforehand.
*/
void nd_jump_link(struct path *path)
{
struct nameidata *nd = current->nameidata;
path_put(&nd->path);
nd->path = *path;
nd->inode = nd->path.dentry->d_inode;
nd->flags |= LOOKUP_JUMPED;
}
static inline void put_link(struct nameidata *nd)
{
struct saved *last = nd->stack + --nd->depth;
struct inode *inode = last->inode;
if (last->cookie && inode->i_op->put_link)
inode->i_op->put_link(inode, last->cookie);
if (!(nd->flags & LOOKUP_RCU))
path_put(&last->link);
}
int sysctl_protected_symlinks __read_mostly = 1;
int sysctl_protected_hardlinks __read_mostly = 1;
/**
* may_follow_link - Check symlink following for unsafe situations
* @nd: nameidata pathwalk data
*
* In the case of the sysctl_protected_symlinks sysctl being enabled,
* CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
* in a sticky world-writable directory. This is to protect privileged
* processes from failing races against path names that may change out
* from under them by way of other users creating malicious symlinks.
* It will permit symlinks to be followed only when outside a sticky
* world-writable directory, or when the uid of the symlink and follower
* match, or when the directory owner matches the symlink's owner.
*
* Returns 0 if following the symlink is allowed, -ve on error.
*/
static inline int may_follow_link(struct nameidata *nd)
{
const struct inode *inode;
const struct inode *parent;
kuid_t puid;
if (!sysctl_protected_symlinks)
return 0;
/* Allowed if owner and follower match. */
inode = nd->stack[0].inode;
if (uid_eq(current_cred()->fsuid, inode->i_uid))
return 0;
/* Allowed if parent directory not sticky and world-writable. */
parent = nd->inode;
if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
return 0;
/* Allowed if parent directory and link owner match. */
puid = parent->i_uid;
if (uid_valid(puid) && uid_eq(puid, inode->i_uid))
return 0;
if (nd->flags & LOOKUP_RCU)
return -ECHILD;
audit_log_link_denied("follow_link", &nd->stack[0].link);
return -EACCES;
}
/**
* safe_hardlink_source - Check for safe hardlink conditions
* @inode: the source inode to hardlink from
*
* Return false if at least one of the following conditions:
* - inode is not a regular file
* - inode is setuid
* - inode is setgid and group-exec
* - access failure for read and write
*
* Otherwise returns true.
*/
static bool safe_hardlink_source(struct inode *inode)
{
umode_t mode = inode->i_mode;
/* Special files should not get pinned to the filesystem. */
if (!S_ISREG(mode))
return false;
/* Setuid files should not get pinned to the filesystem. */
if (mode & S_ISUID)
return false;
/* Executable setgid files should not get pinned to the filesystem. */
if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
return false;
/* Hardlinking to unreadable or unwritable sources is dangerous. */
if (inode_permission(inode, MAY_READ | MAY_WRITE))
return false;
return true;
}
/**
* may_linkat - Check permissions for creating a hardlink
* @link: the source to hardlink from
*
* Block hardlink when all of:
* - sysctl_protected_hardlinks enabled
* - fsuid does not match inode
* - hardlink source is unsafe (see safe_hardlink_source() above)
* - not CAP_FOWNER in a namespace with the inode owner uid mapped
*
* Returns 0 if successful, -ve on error.
*/
static int may_linkat(struct path *link)
{
struct inode *inode;
if (!sysctl_protected_hardlinks)
return 0;
inode = link->dentry->d_inode;
/* Source inode owner (or CAP_FOWNER) can hardlink all they like,
* otherwise, it must be a safe source.
*/
if (inode_owner_or_capable(inode) || safe_hardlink_source(inode))
return 0;
audit_log_link_denied("linkat", link);
return -EPERM;
}
static __always_inline
const char *get_link(struct nameidata *nd)
{
struct saved *last = nd->stack + nd->depth - 1;
struct dentry *dentry = last->link.dentry;
struct inode *inode = last->inode;
int error;
const char *res;
if (!(nd->flags & LOOKUP_RCU)) {
touch_atime(&last->link);
cond_resched();
} else if (atime_needs_update(&last->link, inode)) {
if (unlikely(unlazy_walk(nd, NULL, 0)))
return ERR_PTR(-ECHILD);
touch_atime(&last->link);
}
error = security_inode_follow_link(dentry, inode,
nd->flags & LOOKUP_RCU);
if (unlikely(error))
return ERR_PTR(error);
nd->last_type = LAST_BIND;
res = inode->i_link;
if (!res) {
if (nd->flags & LOOKUP_RCU) {
if (unlikely(unlazy_walk(nd, NULL, 0)))
return ERR_PTR(-ECHILD);
}
res = inode->i_op->follow_link(dentry, &last->cookie);
if (IS_ERR_OR_NULL(res)) {
last->cookie = NULL;
return res;
}
}
if (*res == '/') {
if (nd->flags & LOOKUP_RCU) {
struct dentry *d;
if (!nd->root.mnt)
set_root_rcu(nd);
nd->path = nd->root;
d = nd->path.dentry;
nd->inode = d->d_inode;
nd->seq = nd->root_seq;
if (unlikely(read_seqcount_retry(&d->d_seq, nd->seq)))
return ERR_PTR(-ECHILD);
} else {
if (!nd->root.mnt)
set_root(nd);
path_put(&nd->path);
nd->path = nd->root;
path_get(&nd->root);
nd->inode = nd->path.dentry->d_inode;
}
nd->flags |= LOOKUP_JUMPED;
while (unlikely(*++res == '/'))
;
}
if (!*res)
res = NULL;
return res;
}
/*
* follow_up - Find the mountpoint of path's vfsmount
*
* Given a path, find the mountpoint of its source file system.
* Replace @path with the path of the mountpoint in the parent mount.
* Up is towards /.
*
* Return 1 if we went up a level and 0 if we were already at the
* root.
*/
int follow_up(struct path *path)
{
struct mount *mnt = real_mount(path->mnt);
struct mount *parent;
struct dentry *mountpoint;
read_seqlock_excl(&mount_lock);
parent = mnt->mnt_parent;
if (parent == mnt) {
read_sequnlock_excl(&mount_lock);
return 0;
}
mntget(&parent->mnt);
mountpoint = dget(mnt->mnt_mountpoint);
read_sequnlock_excl(&mount_lock);
dput(path->dentry);
path->dentry = mountpoint;
mntput(path->mnt);
path->mnt = &parent->mnt;
return 1;
}
EXPORT_SYMBOL(follow_up);
/*
* Perform an automount
* - return -EISDIR to tell follow_managed() to stop and return the path we
* were called with.
*/
static int follow_automount(struct path *path, struct nameidata *nd,
bool *need_mntput)
{
struct vfsmount *mnt;
const struct cred *old_cred;
int err;
if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
return -EREMOTE;
/* We don't want to mount if someone's just doing a stat -
* unless they're stat'ing a directory and appended a '/' to
* the name.
*
* We do, however, want to mount if someone wants to open or
* create a file of any type under the mountpoint, wants to
* traverse through the mountpoint or wants to open the
* mounted directory. Also, autofs may mark negative dentries
* as being automount points. These will need the attentions
* of the daemon to instantiate them before they can be used.
*/
if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
path->dentry->d_inode)
return -EISDIR;
if (path->dentry->d_sb->s_user_ns != &init_user_ns)
return -EACCES;
nd->total_link_count++;
if (nd->total_link_count >= 40)
return -ELOOP;
old_cred = override_creds(&init_cred);
mnt = path->dentry->d_op->d_automount(path);
revert_creds(old_cred);
if (IS_ERR(mnt)) {
/*
* The filesystem is allowed to return -EISDIR here to indicate
* it doesn't want to automount. For instance, autofs would do
* this so that its userspace daemon can mount on this dentry.
*
* However, we can only permit this if it's a terminal point in
* the path being looked up; if it wasn't then the remainder of
* the path is inaccessible and we should say so.
*/
if (PTR_ERR(mnt) == -EISDIR && (nd->flags & LOOKUP_PARENT))
return -EREMOTE;
return PTR_ERR(mnt);
}
if (!mnt) /* mount collision */
return 0;
if (!*need_mntput) {
/* lock_mount() may release path->mnt on error */
mntget(path->mnt);
*need_mntput = true;
}
err = finish_automount(mnt, path);
switch (err) {
case -EBUSY:
/* Someone else made a mount here whilst we were busy */
return 0;
case 0:
path_put(path);
path->mnt = mnt;
path->dentry = dget(mnt->mnt_root);
return 0;
default:
return err;
}
}
/*
* Handle a dentry that is managed in some way.
* - Flagged for transit management (autofs)
* - Flagged as mountpoint
* - Flagged as automount point
*
* This may only be called in refwalk mode.
*
* Serialization is taken care of in namespace.c
*/
static int follow_managed(struct path *path, struct nameidata *nd)
{
struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
unsigned managed;
bool need_mntput = false;
int ret = 0;
/* Given that we're not holding a lock here, we retain the value in a
* local variable for each dentry as we look at it so that we don't see
* the components of that value change under us */
while (managed = ACCESS_ONCE(path->dentry->d_flags),
managed &= DCACHE_MANAGED_DENTRY,
unlikely(managed != 0)) {
/* Allow the filesystem to manage the transit without i_mutex
* being held. */
if (managed & DCACHE_MANAGE_TRANSIT) {
BUG_ON(!path->dentry->d_op);
BUG_ON(!path->dentry->d_op->d_manage);
ret = path->dentry->d_op->d_manage(path->dentry, false);
if (ret < 0)
break;
}
/* Transit to a mounted filesystem. */
if (managed & DCACHE_MOUNTED) {
struct vfsmount *mounted = lookup_mnt(path);
if (mounted) {
dput(path->dentry);
if (need_mntput)
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
need_mntput = true;
continue;
}
/* Something is mounted on this dentry in another
* namespace and/or whatever was mounted there in this
* namespace got unmounted before lookup_mnt() could
* get it */
}
/* Handle an automount point */
if (managed & DCACHE_NEED_AUTOMOUNT) {
ret = follow_automount(path, nd, &need_mntput);
if (ret < 0)
break;
continue;
}
/* We didn't change the current path point */
break;
}
if (need_mntput && path->mnt == mnt)
mntput(path->mnt);
if (ret == -EISDIR)
ret = 0;
if (need_mntput)
nd->flags |= LOOKUP_JUMPED;
if (unlikely(ret < 0))
path_put_conditional(path, nd);
return ret;
}
int follow_down_one(struct path *path)
{
struct vfsmount *mounted;
mounted = lookup_mnt(path);
if (mounted) {
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
return 1;
}
return 0;
}
EXPORT_SYMBOL(follow_down_one);
static inline int managed_dentry_rcu(struct dentry *dentry)
{
return (dentry->d_flags & DCACHE_MANAGE_TRANSIT) ?
dentry->d_op->d_manage(dentry, true) : 0;
}
/*
* Try to skip to top of mountpoint pile in rcuwalk mode. Fail if
* we meet a managed dentry that would need blocking.
*/
static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
struct inode **inode, unsigned *seqp)
{
for (;;) {
struct mount *mounted;
/*
* Don't forget we might have a non-mountpoint managed dentry
* that wants to block transit.
*/
switch (managed_dentry_rcu(path->dentry)) {
case -ECHILD:
default:
return false;
case -EISDIR:
return true;
case 0:
break;
}
if (!d_mountpoint(path->dentry))
return !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
mounted = __lookup_mnt(path->mnt, path->dentry);
if (!mounted)
break;
path->mnt = &mounted->mnt;
path->dentry = mounted->mnt.mnt_root;
nd->flags |= LOOKUP_JUMPED;
*seqp = read_seqcount_begin(&path->dentry->d_seq);
/*
* Update the inode too. We don't need to re-check the
* dentry sequence number here after this d_inode read,
* because a mount-point is always pinned.
*/
*inode = path->dentry->d_inode;
}
return !read_seqretry(&mount_lock, nd->m_seq) &&
!(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
}
static int follow_dotdot_rcu(struct nameidata *nd)
{
struct inode *inode = nd->inode;
if (!nd->root.mnt)
set_root_rcu(nd);
while (1) {
if (path_equal(&nd->path, &nd->root))
break;
if (nd->path.dentry != nd->path.mnt->mnt_root) {
struct dentry *old = nd->path.dentry;
struct dentry *parent = old->d_parent;
unsigned seq;
inode = parent->d_inode;
seq = read_seqcount_begin(&parent->d_seq);
if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq)))
return -ECHILD;
nd->path.dentry = parent;
nd->seq = seq;
if (unlikely(!path_connected(&nd->path)))
return -ENOENT;
break;
} else {
struct mount *mnt = real_mount(nd->path.mnt);
struct mount *mparent = mnt->mnt_parent;
struct dentry *mountpoint = mnt->mnt_mountpoint;
struct inode *inode2 = mountpoint->d_inode;
unsigned seq = read_seqcount_begin(&mountpoint->d_seq);
if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
return -ECHILD;
if (&mparent->mnt == nd->path.mnt)
break;
/* we know that mountpoint was pinned */
nd->path.dentry = mountpoint;
nd->path.mnt = &mparent->mnt;
inode = inode2;
nd->seq = seq;
}
}
while (unlikely(d_mountpoint(nd->path.dentry))) {
struct mount *mounted;
mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
return -ECHILD;
if (!mounted)
break;
nd->path.mnt = &mounted->mnt;
nd->path.dentry = mounted->mnt.mnt_root;
inode = nd->path.dentry->d_inode;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
}
nd->inode = inode;
return 0;
}
/*
* Follow down to the covering mount currently visible to userspace. At each
* point, the filesystem owning that dentry may be queried as to whether the
* caller is permitted to proceed or not.
*/
int follow_down(struct path *path)
{
unsigned managed;
int ret;
while (managed = ACCESS_ONCE(path->dentry->d_flags),
unlikely(managed & DCACHE_MANAGED_DENTRY)) {
/* Allow the filesystem to manage the transit without i_mutex
* being held.
*
* We indicate to the filesystem if someone is trying to mount
* something here. This gives autofs the chance to deny anyone
* other than its daemon the right to mount on its
* superstructure.
*
* The filesystem may sleep at this point.
*/
if (managed & DCACHE_MANAGE_TRANSIT) {
BUG_ON(!path->dentry->d_op);
BUG_ON(!path->dentry->d_op->d_manage);
ret = path->dentry->d_op->d_manage(
path->dentry, false);
if (ret < 0)
return ret == -EISDIR ? 0 : ret;
}
/* Transit to a mounted filesystem. */
if (managed & DCACHE_MOUNTED) {
struct vfsmount *mounted = lookup_mnt(path);
if (!mounted)
break;
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
continue;
}
/* Don't handle automount points here */
break;
}
return 0;
}
EXPORT_SYMBOL(follow_down);
/*
* Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
*/
static void follow_mount(struct path *path)
{
while (d_mountpoint(path->dentry)) {
struct vfsmount *mounted = lookup_mnt(path);
if (!mounted)
break;
dput(path->dentry);
mntput(path->mnt);
path->mnt = mounted;
path->dentry = dget(mounted->mnt_root);
}
}
static int follow_dotdot(struct nameidata *nd)
{
if (!nd->root.mnt)
set_root(nd);
while(1) {
struct dentry *old = nd->path.dentry;
if (nd->path.dentry == nd->root.dentry &&
nd->path.mnt == nd->root.mnt) {
break;
}
if (nd->path.dentry != nd->path.mnt->mnt_root) {
/* rare case of legitimate dget_parent()... */
nd->path.dentry = dget_parent(nd->path.dentry);
dput(old);
if (unlikely(!path_connected(&nd->path)))
return -ENOENT;
break;
}
if (!follow_up(&nd->path))
break;
}
follow_mount(&nd->path);
nd->inode = nd->path.dentry->d_inode;
return 0;
}
/*
* This looks up the name in dcache, possibly revalidates the old dentry and
* allocates a new one if not found or not valid. In the need_lookup argument
* returns whether i_op->lookup is necessary.
*
* dir->d_inode->i_mutex must be held
*/
static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
unsigned int flags, bool *need_lookup)
{
struct dentry *dentry;
int error;
*need_lookup = false;
dentry = d_lookup(dir, name);
if (dentry) {
if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
error = d_revalidate(dentry, flags);
if (unlikely(error <= 0)) {
if (error < 0) {
dput(dentry);
return ERR_PTR(error);
} else {
d_invalidate(dentry);
dput(dentry);
dentry = NULL;
}
}
}
}
if (!dentry) {
dentry = d_alloc(dir, name);
if (unlikely(!dentry))
return ERR_PTR(-ENOMEM);
*need_lookup = true;
}
return dentry;
}
/*
* Call i_op->lookup on the dentry. The dentry must be negative and
* unhashed.
*
* dir->d_inode->i_mutex must be held
*/
static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct dentry *old;
/* Don't create child dentry for a dead directory. */
if (unlikely(IS_DEADDIR(dir))) {
dput(dentry);
return ERR_PTR(-ENOENT);
}
old = dir->i_op->lookup(dir, dentry, flags);
if (unlikely(old)) {
dput(dentry);
dentry = old;
}
return dentry;
}
static struct dentry *__lookup_hash(struct qstr *name,
struct dentry *base, unsigned int flags)
{
bool need_lookup;
struct dentry *dentry;
dentry = lookup_dcache(name, base, flags, &need_lookup);
if (!need_lookup)
return dentry;
return lookup_real(base->d_inode, dentry, flags);
}
/*
* It's more convoluted than I'd like it to be, but... it's still fairly
* small and for now I'd prefer to have fast path as straight as possible.
* It _is_ time-critical.
*/
static int lookup_fast(struct nameidata *nd,
struct path *path, struct inode **inode,
unsigned *seqp)
{
struct vfsmount *mnt = nd->path.mnt;
struct dentry *dentry, *parent = nd->path.dentry;
int need_reval = 1;
int status = 1;
int err;
/*
* Rename seqlock is not required here because in the off chance
* of a false negative due to a concurrent rename, we're going to
* do the non-racy lookup, below.
*/
if (nd->flags & LOOKUP_RCU) {
unsigned seq;
bool negative;
dentry = __d_lookup_rcu(parent, &nd->last, &seq);
if (!dentry)
goto unlazy;
/*
* This sequence count validates that the inode matches
* the dentry name information from lookup.
*/
*inode = d_backing_inode(dentry);
negative = d_is_negative(dentry);
if (read_seqcount_retry(&dentry->d_seq, seq))
return -ECHILD;
/*
* This sequence count validates that the parent had no
* changes while we did the lookup of the dentry above.
*
* The memory barrier in read_seqcount_begin of child is
* enough, we can use __read_seqcount_retry here.
*/
if (__read_seqcount_retry(&parent->d_seq, nd->seq))
return -ECHILD;
*seqp = seq;
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
status = d_revalidate(dentry, nd->flags);
if (unlikely(status <= 0)) {
if (status != -ECHILD)
need_reval = 0;
goto unlazy;
}
}
/*
* Note: do negative dentry check after revalidation in
* case that drops it.
*/
if (negative)
return -ENOENT;
path->mnt = mnt;
path->dentry = dentry;
if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
return 0;
unlazy:
if (unlazy_walk(nd, dentry, seq))
return -ECHILD;
} else {
dentry = __d_lookup(parent, &nd->last);
}
if (unlikely(!dentry))
goto need_lookup;
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
status = d_revalidate(dentry, nd->flags);
if (unlikely(status <= 0)) {
if (status < 0) {
dput(dentry);
return status;
}
d_invalidate(dentry);
dput(dentry);
goto need_lookup;
}
if (unlikely(d_is_negative(dentry))) {
dput(dentry);
return -ENOENT;
}
path->mnt = mnt;
path->dentry = dentry;
err = follow_managed(path, nd);
if (likely(!err))
*inode = d_backing_inode(path->dentry);
return err;
need_lookup:
return 1;
}
/* Fast lookup failed, do it the slow way */
static int lookup_slow(struct nameidata *nd, struct path *path)
{
struct dentry *dentry, *parent;
parent = nd->path.dentry;
BUG_ON(nd->inode != parent->d_inode);
mutex_lock(&parent->d_inode->i_mutex);
dentry = __lookup_hash(&nd->last, parent, nd->flags);
mutex_unlock(&parent->d_inode->i_mutex);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
path->mnt = nd->path.mnt;
path->dentry = dentry;
return follow_managed(path, nd);
}
static inline int may_lookup(struct nameidata *nd)
{
if (nd->flags & LOOKUP_RCU) {
int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
if (err != -ECHILD)
return err;
if (unlazy_walk(nd, NULL, 0))
return -ECHILD;
}
return inode_permission(nd->inode, MAY_EXEC);
}
static inline int handle_dots(struct nameidata *nd, int type)
{
if (type == LAST_DOTDOT) {
if (nd->flags & LOOKUP_RCU) {
return follow_dotdot_rcu(nd);
} else
return follow_dotdot(nd);
}
return 0;
}
static int pick_link(struct nameidata *nd, struct path *link,
struct inode *inode, unsigned seq)
{
int error;
struct saved *last;
if (unlikely(nd->total_link_count++ >= MAXSYMLINKS)) {
path_to_nameidata(link, nd);
return -ELOOP;
}
if (!(nd->flags & LOOKUP_RCU)) {
if (link->mnt == nd->path.mnt)
mntget(link->mnt);
}
error = nd_alloc_stack(nd);
if (unlikely(error)) {
if (error == -ECHILD) {
if (unlikely(unlazy_link(nd, link, seq)))
return -ECHILD;
error = nd_alloc_stack(nd);
}
if (error) {
path_put(link);
return error;
}
}
last = nd->stack + nd->depth++;
last->link = *link;
last->cookie = NULL;
last->inode = inode;
last->seq = seq;
return 1;
}
/*
* Do we need to follow links? We _really_ want to be able
* to do this check without having to look at inode->i_op,
* so we keep a cache of "no, this doesn't need follow_link"
* for the common case.
*/
static inline int should_follow_link(struct nameidata *nd, struct path *link,
int follow,
struct inode *inode, unsigned seq)
{
if (likely(!d_is_symlink(link->dentry)))
return 0;
if (!follow)
return 0;
/* make sure that d_is_symlink above matches inode */
if (nd->flags & LOOKUP_RCU) {
if (read_seqcount_retry(&link->dentry->d_seq, seq))
return -ECHILD;
}
return pick_link(nd, link, inode, seq);
}
enum {WALK_GET = 1, WALK_PUT = 2};
static int walk_component(struct nameidata *nd, int flags)
{
struct path path;
struct inode *inode;
unsigned seq;
int err;
/*
* "." and ".." are special - ".." especially so because it has
* to be able to know about the current root directory and
* parent relationships.
*/
if (unlikely(nd->last_type != LAST_NORM)) {
err = handle_dots(nd, nd->last_type);
if (flags & WALK_PUT)
put_link(nd);
return err;
}
err = lookup_fast(nd, &path, &inode, &seq);
if (unlikely(err)) {
if (err < 0)
return err;
err = lookup_slow(nd, &path);
if (err < 0)
return err;
seq = 0; /* we are already out of RCU mode */
err = -ENOENT;
if (d_is_negative(path.dentry))
goto out_path_put;
inode = d_backing_inode(path.dentry);
}
if (flags & WALK_PUT)
put_link(nd);
err = should_follow_link(nd, &path, flags & WALK_GET, inode, seq);
if (unlikely(err))
return err;
path_to_nameidata(&path, nd);
nd->inode = inode;
nd->seq = seq;
return 0;
out_path_put:
path_to_nameidata(&path, nd);
return err;
}
/*
* We can do the critical dentry name comparison and hashing
* operations one word at a time, but we are limited to:
*
* - Architectures with fast unaligned word accesses. We could
* do a "get_unaligned()" if this helps and is sufficiently
* fast.
*
* - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
* do not trap on the (extremely unlikely) case of a page
* crossing operation.
*
* - Furthermore, we need an efficient 64-bit compile for the
* 64-bit case in order to generate the "number of bytes in
* the final mask". Again, that could be replaced with a
* efficient population count instruction or similar.
*/
#ifdef CONFIG_DCACHE_WORD_ACCESS
#include <asm/word-at-a-time.h>
#ifdef CONFIG_64BIT
static inline unsigned int fold_hash(unsigned long hash)
{
return hash_64(hash, 32);
}
#else /* 32-bit case */
#define fold_hash(x) (x)
#endif
unsigned int full_name_hash(const unsigned char *name, unsigned int len)
{
unsigned long a, mask;
unsigned long hash = 0;
for (;;) {
a = load_unaligned_zeropad(name);
if (len < sizeof(unsigned long))
break;
hash += a;
hash *= 9;
name += sizeof(unsigned long);
len -= sizeof(unsigned long);
if (!len)
goto done;
}
mask = bytemask_from_count(len);
hash += mask & a;
done:
return fold_hash(hash);
}
EXPORT_SYMBOL(full_name_hash);
/*
* Calculate the length and hash of the path component, and
* return the "hash_len" as the result.
*/
static inline u64 hash_name(const char *name)
{
unsigned long a, b, adata, bdata, mask, hash, len;
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
hash = a = 0;
len = -sizeof(unsigned long);
do {
hash = (hash + a) * 9;
len += sizeof(unsigned long);
a = load_unaligned_zeropad(name+len);
b = a ^ REPEAT_BYTE('/');
} while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
adata = prep_zero_mask(a, adata, &constants);
bdata = prep_zero_mask(b, bdata, &constants);
mask = create_zero_mask(adata | bdata);
hash += a & zero_bytemask(mask);
len += find_zero(mask);
return hashlen_create(fold_hash(hash), len);
}
#else
unsigned int full_name_hash(const unsigned char *name, unsigned int len)
{
unsigned long hash = init_name_hash();
while (len--)
hash = partial_name_hash(*name++, hash);
return end_name_hash(hash);
}
EXPORT_SYMBOL(full_name_hash);
/*
* We know there's a real path component here of at least
* one character.
*/
static inline u64 hash_name(const char *name)
{
unsigned long hash = init_name_hash();
unsigned long len = 0, c;
c = (unsigned char)*name;
do {
len++;
hash = partial_name_hash(c, hash);
c = (unsigned char)name[len];
} while (c && c != '/');
return hashlen_create(end_name_hash(hash), len);
}
#endif
/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
* the final dentry. We expect 'base' to be positive and a directory.
*
* Returns 0 and nd will have valid dentry and mnt on success.
* Returns error and drops reference to input namei data on failure.
*/
static int link_path_walk(const char *name, struct nameidata *nd)
{
int err;
while (*name=='/')
name++;
if (!*name)
return 0;
/* At this point we know we have a real path component. */
for(;;) {
u64 hash_len;
int type;
err = may_lookup(nd);
if (err)
return err;
hash_len = hash_name(name);
type = LAST_NORM;
if (name[0] == '.') switch (hashlen_len(hash_len)) {
case 2:
if (name[1] == '.') {
type = LAST_DOTDOT;
nd->flags |= LOOKUP_JUMPED;
}
break;
case 1:
type = LAST_DOT;
}
if (likely(type == LAST_NORM)) {
struct dentry *parent = nd->path.dentry;
nd->flags &= ~LOOKUP_JUMPED;
if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
struct qstr this = { { .hash_len = hash_len }, .name = name };
err = parent->d_op->d_hash(parent, &this);
if (err < 0)
return err;
hash_len = this.hash_len;
name = this.name;
}
}
nd->last.hash_len = hash_len;
nd->last.name = name;
nd->last_type = type;
name += hashlen_len(hash_len);
if (!*name)
goto OK;
/*
* If it wasn't NUL, we know it was '/'. Skip that
* slash, and continue until no more slashes.
*/
do {
name++;
} while (unlikely(*name == '/'));
if (unlikely(!*name)) {
OK:
/* pathname body, done */
if (!nd->depth)
return 0;
name = nd->stack[nd->depth - 1].name;
/* trailing symlink, done */
if (!name)
return 0;
/* last component of nested symlink */
err = walk_component(nd, WALK_GET | WALK_PUT);
} else {
err = walk_component(nd, WALK_GET);
}
if (err < 0)
return err;
if (err) {
const char *s = get_link(nd);
if (IS_ERR(s))
return PTR_ERR(s);
err = 0;
if (unlikely(!s)) {
/* jumped */
put_link(nd);
} else {
nd->stack[nd->depth - 1].name = name;
name = s;
continue;
}
}
if (unlikely(!d_can_lookup(nd->path.dentry))) {
if (nd->flags & LOOKUP_RCU) {
if (unlazy_walk(nd, NULL, 0))
return -ECHILD;
}
return -ENOTDIR;
}
}
}
static const char *path_init(struct nameidata *nd, unsigned flags)
{
int retval = 0;
const char *s = nd->name->name;
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
nd->depth = 0;
if (flags & LOOKUP_ROOT) {
struct dentry *root = nd->root.dentry;
struct inode *inode = root->d_inode;
if (*s) {
if (!d_can_lookup(root))
return ERR_PTR(-ENOTDIR);
retval = inode_permission(inode, MAY_EXEC);
if (retval)
return ERR_PTR(retval);
}
nd->path = nd->root;
nd->inode = inode;
if (flags & LOOKUP_RCU) {
rcu_read_lock();
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
nd->root_seq = nd->seq;
nd->m_seq = read_seqbegin(&mount_lock);
} else {
path_get(&nd->path);
}
return s;
}
nd->root.mnt = NULL;
nd->m_seq = read_seqbegin(&mount_lock);
if (*s == '/') {
if (flags & LOOKUP_RCU) {
rcu_read_lock();
set_root_rcu(nd);
nd->seq = nd->root_seq;
} else {
set_root(nd);
path_get(&nd->root);
}
nd->path = nd->root;
} else if (nd->dfd == AT_FDCWD) {
if (flags & LOOKUP_RCU) {
struct fs_struct *fs = current->fs;
unsigned seq;
rcu_read_lock();
do {
seq = read_seqcount_begin(&fs->seq);
nd->path = fs->pwd;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
} while (read_seqcount_retry(&fs->seq, seq));
} else {
get_fs_pwd(current->fs, &nd->path);
}
} else {
/* Caller must check execute permissions on the starting path component */
struct fd f = fdget_raw(nd->dfd);
struct dentry *dentry;
if (!f.file)
return ERR_PTR(-EBADF);
dentry = f.file->f_path.dentry;
if (*s) {
if (!d_can_lookup(dentry)) {
fdput(f);
return ERR_PTR(-ENOTDIR);
}
}
nd->path = f.file->f_path;
if (flags & LOOKUP_RCU) {
rcu_read_lock();
nd->inode = nd->path.dentry->d_inode;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
} else {
path_get(&nd->path);
nd->inode = nd->path.dentry->d_inode;
}
fdput(f);
return s;
}
nd->inode = nd->path.dentry->d_inode;
if (!(flags & LOOKUP_RCU))
return s;
if (likely(!read_seqcount_retry(&nd->path.dentry->d_seq, nd->seq)))
return s;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
rcu_read_unlock();
return ERR_PTR(-ECHILD);
}
static const char *trailing_symlink(struct nameidata *nd)
{
const char *s;
int error = may_follow_link(nd);
if (unlikely(error))
return ERR_PTR(error);
nd->flags |= LOOKUP_PARENT;
nd->stack[0].name = NULL;
s = get_link(nd);
return s ? s : "";
}
static inline int lookup_last(struct nameidata *nd)
{
if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
nd->flags &= ~LOOKUP_PARENT;
return walk_component(nd,
nd->flags & LOOKUP_FOLLOW
? nd->depth
? WALK_PUT | WALK_GET
: WALK_GET
: 0);
}
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path)
{
const char *s = path_init(nd, flags);
int err;
if (IS_ERR(s))
return PTR_ERR(s);
while (!(err = link_path_walk(s, nd))
&& ((err = lookup_last(nd)) > 0)) {
s = trailing_symlink(nd);
if (IS_ERR(s)) {
err = PTR_ERR(s);
break;
}
}
if (!err)
err = complete_walk(nd);
if (!err && nd->flags & LOOKUP_DIRECTORY)
if (!d_can_lookup(nd->path.dentry))
err = -ENOTDIR;
if (!err) {
*path = nd->path;
nd->path.mnt = NULL;
nd->path.dentry = NULL;
}
terminate_walk(nd);
return err;
}
static int filename_lookup(int dfd, struct filename *name, unsigned flags,
struct path *path, struct path *root)
{
int retval;
struct nameidata nd;
if (IS_ERR(name))
return PTR_ERR(name);
if (unlikely(root)) {
nd.root = *root;
flags |= LOOKUP_ROOT;
}
set_nameidata(&nd, dfd, name);
retval = path_lookupat(&nd, flags | LOOKUP_RCU, path);
if (unlikely(retval == -ECHILD))
retval = path_lookupat(&nd, flags, path);
if (unlikely(retval == -ESTALE))
retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path);
if (likely(!retval))
audit_inode(name, path->dentry, flags & LOOKUP_PARENT);
restore_nameidata();
putname(name);
return retval;
}
/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
static int path_parentat(struct nameidata *nd, unsigned flags,
struct path *parent)
{
const char *s = path_init(nd, flags);
int err;
if (IS_ERR(s))
return PTR_ERR(s);
err = link_path_walk(s, nd);
if (!err)
err = complete_walk(nd);
if (!err) {
*parent = nd->path;
nd->path.mnt = NULL;
nd->path.dentry = NULL;
}
terminate_walk(nd);
return err;
}
static struct filename *filename_parentat(int dfd, struct filename *name,
unsigned int flags, struct path *parent,
struct qstr *last, int *type)
{
int retval;
struct nameidata nd;
if (IS_ERR(name))
return name;
set_nameidata(&nd, dfd, name);
retval = path_parentat(&nd, flags | LOOKUP_RCU, parent);
if (unlikely(retval == -ECHILD))
retval = path_parentat(&nd, flags, parent);
if (unlikely(retval == -ESTALE))
retval = path_parentat(&nd, flags | LOOKUP_REVAL, parent);
if (likely(!retval)) {
*last = nd.last;
*type = nd.last_type;
audit_inode(name, parent->dentry, LOOKUP_PARENT);
} else {
putname(name);
name = ERR_PTR(retval);
}
restore_nameidata();
return name;
}
/* does lookup, returns the object with parent locked */
struct dentry *kern_path_locked(const char *name, struct path *path)
{
struct filename *filename;
struct dentry *d;
struct qstr last;
int type;
filename = filename_parentat(AT_FDCWD, getname_kernel(name), 0, path,
&last, &type);
if (IS_ERR(filename))
return ERR_CAST(filename);
if (unlikely(type != LAST_NORM)) {
path_put(path);
putname(filename);
return ERR_PTR(-EINVAL);
}
mutex_lock_nested(&path->dentry->d_inode->i_mutex, I_MUTEX_PARENT);
d = __lookup_hash(&last, path->dentry, 0);
if (IS_ERR(d)) {
mutex_unlock(&path->dentry->d_inode->i_mutex);
path_put(path);
}
putname(filename);
return d;
}
int kern_path(const char *name, unsigned int flags, struct path *path)
{
return filename_lookup(AT_FDCWD, getname_kernel(name),
flags, path, NULL);
}
EXPORT_SYMBOL(kern_path);
/**
* vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
* @dentry: pointer to dentry of the base directory
* @mnt: pointer to vfs mount of the base directory
* @name: pointer to file name
* @flags: lookup flags
* @path: pointer to struct path to fill
*/
int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
const char *name, unsigned int flags,
struct path *path)
{
struct path root = {.mnt = mnt, .dentry = dentry};
/* the first argument of filename_lookup() is ignored with root */
return filename_lookup(AT_FDCWD, getname_kernel(name),
flags , path, &root);
}
EXPORT_SYMBOL(vfs_path_lookup);
/**
* lookup_one_len - filesystem helper to lookup single pathname component
* @name: pathname component to lookup
* @base: base directory to lookup from
* @len: maximum length @len should be interpreted to
*
* Note that this routine is purely a helper for filesystem usage and should
* not be called by generic code.
*/
struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
{
struct qstr this;
unsigned int c;
int err;
WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
this.name = name;
this.len = len;
this.hash = full_name_hash(name, len);
if (!len)
return ERR_PTR(-EACCES);
if (unlikely(name[0] == '.')) {
if (len < 2 || (len == 2 && name[1] == '.'))
return ERR_PTR(-EACCES);
}
while (len--) {
c = *(const unsigned char *)name++;
if (c == '/' || c == '\0')
return ERR_PTR(-EACCES);
}
/*
* See if the low-level filesystem might want
* to use its own hash..
*/
if (base->d_flags & DCACHE_OP_HASH) {
int err = base->d_op->d_hash(base, &this);
if (err < 0)
return ERR_PTR(err);
}
err = inode_permission(base->d_inode, MAY_EXEC);
if (err)
return ERR_PTR(err);
return __lookup_hash(&this, base, 0);
}
EXPORT_SYMBOL(lookup_one_len);
int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
struct path *path, int *empty)
{
return filename_lookup(dfd, getname_flags(name, flags, empty),
flags, path, NULL);
}
EXPORT_SYMBOL(user_path_at_empty);
/*
* NB: most callers don't do anything directly with the reference to the
* to struct filename, but the nd->last pointer points into the name string
* allocated by getname. So we must hold the reference to it until all
* path-walking is complete.
*/
static inline struct filename *
user_path_parent(int dfd, const char __user *path,
struct path *parent,
struct qstr *last,
int *type,
unsigned int flags)
{
/* only LOOKUP_REVAL is allowed in extra flags */
return filename_parentat(dfd, getname(path), flags & LOOKUP_REVAL,
parent, last, type);
}
/**
* mountpoint_last - look up last component for umount
* @nd: pathwalk nameidata - currently pointing at parent directory of "last"
* @path: pointer to container for result
*
* This is a special lookup_last function just for umount. In this case, we
* need to resolve the path without doing any revalidation.
*
* The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since
* mountpoints are always pinned in the dcache, their ancestors are too. Thus,
* in almost all cases, this lookup will be served out of the dcache. The only
* cases where it won't are if nd->last refers to a symlink or the path is
* bogus and it doesn't exist.
*
* Returns:
* -error: if there was an error during lookup. This includes -ENOENT if the
* lookup found a negative dentry. The nd->path reference will also be
* put in this case.
*
* 0: if we successfully resolved nd->path and found it to not to be a
* symlink that needs to be followed. "path" will also be populated.
* The nd->path reference will also be put.
*
* 1: if we successfully resolved nd->last and found it to be a symlink
* that needs to be followed. "path" will be populated with the path
* to the link, and nd->path will *not* be put.
*/
static int
mountpoint_last(struct nameidata *nd, struct path *path)
{
int error = 0;
struct dentry *dentry;
struct dentry *dir = nd->path.dentry;
/* If we're in rcuwalk, drop out of it to handle last component */
if (nd->flags & LOOKUP_RCU) {
if (unlazy_walk(nd, NULL, 0))
return -ECHILD;
}
nd->flags &= ~LOOKUP_PARENT;
if (unlikely(nd->last_type != LAST_NORM)) {
error = handle_dots(nd, nd->last_type);
if (error)
return error;
dentry = dget(nd->path.dentry);
goto done;
}
mutex_lock(&dir->d_inode->i_mutex);
dentry = d_lookup(dir, &nd->last);
if (!dentry) {
/*
* No cached dentry. Mounted dentries are pinned in the cache,
* so that means that this dentry is probably a symlink or the
* path doesn't actually point to a mounted dentry.
*/
dentry = d_alloc(dir, &nd->last);
if (!dentry) {
mutex_unlock(&dir->d_inode->i_mutex);
return -ENOMEM;
}
dentry = lookup_real(dir->d_inode, dentry, nd->flags);
if (IS_ERR(dentry)) {
mutex_unlock(&dir->d_inode->i_mutex);
return PTR_ERR(dentry);
}
}
mutex_unlock(&dir->d_inode->i_mutex);
done:
if (d_is_negative(dentry)) {
dput(dentry);
return -ENOENT;
}
if (nd->depth)
put_link(nd);
path->dentry = dentry;
path->mnt = nd->path.mnt;
error = should_follow_link(nd, path, nd->flags & LOOKUP_FOLLOW,
d_backing_inode(dentry), 0);
if (unlikely(error))
return error;
mntget(path->mnt);
follow_mount(path);
return 0;
}
/**
* path_mountpoint - look up a path to be umounted
* @nd: lookup context
* @flags: lookup flags
* @path: pointer to container for result
*
* Look up the given name, but don't attempt to revalidate the last component.
* Returns 0 and "path" will be valid on success; Returns error otherwise.
*/
static int
path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path)
{
const char *s = path_init(nd, flags);
int err;
if (IS_ERR(s))
return PTR_ERR(s);
while (!(err = link_path_walk(s, nd)) &&
(err = mountpoint_last(nd, path)) > 0) {
s = trailing_symlink(nd);
if (IS_ERR(s)) {
err = PTR_ERR(s);
break;
}
}
terminate_walk(nd);
return err;
}
static int
filename_mountpoint(int dfd, struct filename *name, struct path *path,
unsigned int flags)
{
struct nameidata nd;
int error;
if (IS_ERR(name))
return PTR_ERR(name);
set_nameidata(&nd, dfd, name);
error = path_mountpoint(&nd, flags | LOOKUP_RCU, path);
if (unlikely(error == -ECHILD))
error = path_mountpoint(&nd, flags, path);
if (unlikely(error == -ESTALE))
error = path_mountpoint(&nd, flags | LOOKUP_REVAL, path);
if (likely(!error))
audit_inode(name, path->dentry, 0);
restore_nameidata();
putname(name);
return error;
}
/**
* user_path_mountpoint_at - lookup a path from userland in order to umount it
* @dfd: directory file descriptor
* @name: pathname from userland
* @flags: lookup flags
* @path: pointer to container to hold result
*
* A umount is a special case for path walking. We're not actually interested
* in the inode in this situation, and ESTALE errors can be a problem. We
* simply want track down the dentry and vfsmount attached at the mountpoint
* and avoid revalidating the last component.
*
* Returns 0 and populates "path" on success.
*/
int
user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags,
struct path *path)
{
return filename_mountpoint(dfd, getname(name), path, flags);
}
int
kern_path_mountpoint(int dfd, const char *name, struct path *path,
unsigned int flags)
{
return filename_mountpoint(dfd, getname_kernel(name), path, flags);
}
EXPORT_SYMBOL(kern_path_mountpoint);
int __check_sticky(struct inode *dir, struct inode *inode)
{
kuid_t fsuid = current_fsuid();
if (uid_eq(inode->i_uid, fsuid))
return 0;
if (uid_eq(dir->i_uid, fsuid))
return 0;
return !capable_wrt_inode_uidgid(inode, CAP_FOWNER);
}
EXPORT_SYMBOL(__check_sticky);
/*
* Check whether we can remove a link victim from directory dir, check
* whether the type of victim is right.
* 1. We can't do it if dir is read-only (done in permission())
* 2. We should have write and exec permissions on dir
* 3. We can't remove anything from append-only dir
* 4. We can't do anything with immutable dir (done in permission())
* 5. If the sticky bit on dir is set we should either
* a. be owner of dir, or
* b. be owner of victim, or
* c. have CAP_FOWNER capability
* 6. If the victim is append-only or immutable we can't do antyhing with
* links pointing to it.
* 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
* 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
* 9. We can't remove a root or mountpoint.
* 10. We don't allow removal of NFS sillyrenamed files; it's handled by
* nfs_async_unlink().
*/
static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
{
struct inode *inode = d_backing_inode(victim);
int error;
if (d_is_negative(victim))
return -ENOENT;
BUG_ON(!inode);
BUG_ON(victim->d_parent->d_inode != dir);
audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
if (error)
return error;
if (IS_APPEND(dir))
return -EPERM;
if (check_sticky(dir, inode) || IS_APPEND(inode) ||
IS_IMMUTABLE(inode) || IS_SWAPFILE(inode))
return -EPERM;
if (isdir) {
if (!d_is_dir(victim))
return -ENOTDIR;
if (IS_ROOT(victim))
return -EBUSY;
} else if (d_is_dir(victim))
return -EISDIR;
if (IS_DEADDIR(dir))
return -ENOENT;
if (victim->d_flags & DCACHE_NFSFS_RENAMED)
return -EBUSY;
return 0;
}
/* Check whether we can create an object with dentry child in directory
* dir.
* 1. We can't do it if child already exists (open has special treatment for
* this case, but since we are inlined it's OK)
* 2. We can't do it if dir is read-only (done in permission())
* 3. We should have write and exec permissions on dir
* 4. We can't do it if dir is immutable (done in permission())
*/
static inline int may_create(struct inode *dir, struct dentry *child)
{
audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
if (child->d_inode)
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
return inode_permission(dir, MAY_WRITE | MAY_EXEC);
}
/*
* p1 and p2 should be directories on the same fs.
*/
struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
{
struct dentry *p;
if (p1 == p2) {
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
return NULL;
}
mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
p = d_ancestor(p2, p1);
if (p) {
mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD);
return p;
}
p = d_ancestor(p1, p2);
if (p) {
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
return p;
}
mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT2);
return NULL;
}
EXPORT_SYMBOL(lock_rename);
void unlock_rename(struct dentry *p1, struct dentry *p2)
{
mutex_unlock(&p1->d_inode->i_mutex);
if (p1 != p2) {
mutex_unlock(&p2->d_inode->i_mutex);
mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
}
}
EXPORT_SYMBOL(unlock_rename);
int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool want_excl)
{
int error = may_create(dir, dentry);
if (error)
return error;
if (!dir->i_op->create)
return -EACCES; /* shouldn't it be ENOSYS? */
mode &= S_IALLUGO;
mode |= S_IFREG;
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
error = dir->i_op->create(dir, dentry, mode, want_excl);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_create);
static int may_open(struct path *path, int acc_mode, int flag)
{
struct dentry *dentry = path->dentry;
struct inode *inode = dentry->d_inode;
int error;
/* O_PATH? */
if (!acc_mode)
return 0;
if (!inode)
return -ENOENT;
switch (inode->i_mode & S_IFMT) {
case S_IFLNK:
return -ELOOP;
case S_IFDIR:
if (acc_mode & MAY_WRITE)
return -EISDIR;
break;
case S_IFBLK:
case S_IFCHR:
if (path->mnt->mnt_flags & MNT_NODEV)
return -EACCES;
/*FALLTHRU*/
case S_IFIFO:
case S_IFSOCK:
flag &= ~O_TRUNC;
break;
}
error = inode_permission(inode, acc_mode);
if (error)
return error;
/*
* An append-only file must be opened in append mode for writing.
*/
if (IS_APPEND(inode)) {
if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
return -EPERM;
if (flag & O_TRUNC)
return -EPERM;
}
/* O_NOATIME can only be set by the owner or superuser */
if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
return 0;
}
static int handle_truncate(struct file *filp)
{
struct path *path = &filp->f_path;
struct inode *inode = path->dentry->d_inode;
int error = get_write_access(inode);
if (error)
return error;
/*
* Refuse to truncate files with mandatory locks held on them.
*/
error = locks_verify_locked(filp);
if (!error)
error = security_path_truncate(path);
if (!error) {
error = do_truncate(path->dentry, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
filp);
}
put_write_access(inode);
return error;
}
static inline int open_to_namei_flags(int flag)
{
if ((flag & O_ACCMODE) == 3)
flag--;
return flag;
}
static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode)
{
int error = security_path_mknod(dir, dentry, mode, 0);
if (error)
return error;
error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
if (error)
return error;
return security_inode_create(dir->dentry->d_inode, dentry, mode);
}
/*
* Attempt to atomically look up, create and open a file from a negative
* dentry.
*
* Returns 0 if successful. The file will have been created and attached to
* @file by the filesystem calling finish_open().
*
* Returns 1 if the file was looked up only or didn't need creating. The
* caller will need to perform the open themselves. @path will have been
* updated to point to the new dentry. This may be negative.
*
* Returns an error code otherwise.
*/
static int atomic_open(struct nameidata *nd, struct dentry *dentry,
struct path *path, struct file *file,
const struct open_flags *op,
bool got_write, bool need_lookup,
int *opened)
{
struct inode *dir = nd->path.dentry->d_inode;
unsigned open_flag = open_to_namei_flags(op->open_flag);
umode_t mode;
int error;
int acc_mode;
int create_error = 0;
struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
bool excl;
BUG_ON(dentry->d_inode);
/* Don't create child dentry for a dead directory. */
if (unlikely(IS_DEADDIR(dir))) {
error = -ENOENT;
goto out;
}
mode = op->mode;
if ((open_flag & O_CREAT) && !IS_POSIXACL(dir))
mode &= ~current_umask();
excl = (open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT);
if (excl)
open_flag &= ~O_TRUNC;
/*
* Checking write permission is tricky, bacuse we don't know if we are
* going to actually need it: O_CREAT opens should work as long as the
* file exists. But checking existence breaks atomicity. The trick is
* to check access and if not granted clear O_CREAT from the flags.
*
* Another problem is returing the "right" error value (e.g. for an
* O_EXCL open we want to return EEXIST not EROFS).
*/
if (((open_flag & (O_CREAT | O_TRUNC)) ||
(open_flag & O_ACCMODE) != O_RDONLY) && unlikely(!got_write)) {
if (!(open_flag & O_CREAT)) {
/*
* No O_CREATE -> atomicity not a requirement -> fall
* back to lookup + open
*/
goto no_open;
} else if (open_flag & (O_EXCL | O_TRUNC)) {
/* Fall back and fail with the right error */
create_error = -EROFS;
goto no_open;
} else {
/* No side effects, safe to clear O_CREAT */
create_error = -EROFS;
open_flag &= ~O_CREAT;
}
}
if (open_flag & O_CREAT) {
error = may_o_create(&nd->path, dentry, mode);
if (error) {
create_error = error;
if (open_flag & O_EXCL)
goto no_open;
open_flag &= ~O_CREAT;
}
}
if (nd->flags & LOOKUP_DIRECTORY)
open_flag |= O_DIRECTORY;
file->f_path.dentry = DENTRY_NOT_SET;
file->f_path.mnt = nd->path.mnt;
error = dir->i_op->atomic_open(dir, dentry, file, open_flag, mode,
opened);
if (error < 0) {
if (create_error && error == -ENOENT)
error = create_error;
goto out;
}
if (error) { /* returned 1, that is */
if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
error = -EIO;
goto out;
}
if (file->f_path.dentry) {
dput(dentry);
dentry = file->f_path.dentry;
}
if (*opened & FILE_CREATED)
fsnotify_create(dir, dentry);
if (!dentry->d_inode) {
WARN_ON(*opened & FILE_CREATED);
if (create_error) {
error = create_error;
goto out;
}
} else {
if (excl && !(*opened & FILE_CREATED)) {
error = -EEXIST;
goto out;
}
}
goto looked_up;
}
/*
* We didn't have the inode before the open, so check open permission
* here.
*/
acc_mode = op->acc_mode;
if (*opened & FILE_CREATED) {
WARN_ON(!(open_flag & O_CREAT));
fsnotify_create(dir, dentry);
acc_mode = MAY_OPEN;
}
error = may_open(&file->f_path, acc_mode, open_flag);
if (error)
fput(file);
out:
dput(dentry);
return error;
no_open:
if (need_lookup) {
dentry = lookup_real(dir, dentry, nd->flags);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
}
if (create_error && !dentry->d_inode) {
error = create_error;
goto out;
}
looked_up:
path->dentry = dentry;
path->mnt = nd->path.mnt;
return 1;
}
/*
* Look up and maybe create and open the last component.
*
* Must be called with i_mutex held on parent.
*
* Returns 0 if the file was successfully atomically created (if necessary) and
* opened. In this case the file will be returned attached to @file.
*
* Returns 1 if the file was not completely opened at this time, though lookups
* and creations will have been performed and the dentry returned in @path will
* be positive upon return if O_CREAT was specified. If O_CREAT wasn't
* specified then a negative dentry may be returned.
*
* An error code is returned otherwise.
*
* FILE_CREATE will be set in @*opened if the dentry was created and will be
* cleared otherwise prior to returning.
*/
static int lookup_open(struct nameidata *nd, struct path *path,
struct file *file,
const struct open_flags *op,
bool got_write, int *opened)
{
struct dentry *dir = nd->path.dentry;
struct inode *dir_inode = dir->d_inode;
struct dentry *dentry;
int error;
bool need_lookup;
*opened &= ~FILE_CREATED;
dentry = lookup_dcache(&nd->last, dir, nd->flags, &need_lookup);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
/* Cached positive dentry: will open in f_op->open */
if (!need_lookup && dentry->d_inode)
goto out_no_open;
if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) {
return atomic_open(nd, dentry, path, file, op, got_write,
need_lookup, opened);
}
if (need_lookup) {
BUG_ON(dentry->d_inode);
dentry = lookup_real(dir_inode, dentry, nd->flags);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
}
/* Negative dentry, just create the file */
if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
umode_t mode = op->mode;
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
/*
* This write is needed to ensure that a
* rw->ro transition does not occur between
* the time when the file is created and when
* a permanent write count is taken through
* the 'struct file' in finish_open().
*/
if (!got_write) {
error = -EROFS;
goto out_dput;
}
*opened |= FILE_CREATED;
error = security_path_mknod(&nd->path, dentry, mode, 0);
if (error)
goto out_dput;
error = vfs_create(dir->d_inode, dentry, mode,
nd->flags & LOOKUP_EXCL);
if (error)
goto out_dput;
}
out_no_open:
path->dentry = dentry;
path->mnt = nd->path.mnt;
return 1;
out_dput:
dput(dentry);
return error;
}
/*
* Handle the last step of open()
*/
static int do_last(struct nameidata *nd,
struct file *file, const struct open_flags *op,
int *opened)
{
struct dentry *dir = nd->path.dentry;
int open_flag = op->open_flag;
bool will_truncate = (open_flag & O_TRUNC) != 0;
bool got_write = false;
int acc_mode = op->acc_mode;
unsigned seq;
struct inode *inode;
struct path save_parent = { .dentry = NULL, .mnt = NULL };
struct path path;
bool retried = false;
int error;
nd->flags &= ~LOOKUP_PARENT;
nd->flags |= op->intent;
if (nd->last_type != LAST_NORM) {
error = handle_dots(nd, nd->last_type);
if (unlikely(error))
return error;
goto finish_open;
}
if (!(open_flag & O_CREAT)) {
if (nd->last.name[nd->last.len])
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
/* we _can_ be in RCU mode here */
error = lookup_fast(nd, &path, &inode, &seq);
if (likely(!error))
goto finish_lookup;
if (error < 0)
return error;
BUG_ON(nd->inode != dir->d_inode);
} else {
/* create side of things */
/*
* This will *only* deal with leaving RCU mode - LOOKUP_JUMPED
* has been cleared when we got to the last component we are
* about to look up
*/
error = complete_walk(nd);
if (error)
return error;
audit_inode(nd->name, dir, LOOKUP_PARENT);
/* trailing slashes? */
if (unlikely(nd->last.name[nd->last.len]))
return -EISDIR;
}
retry_lookup:
if (op->open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
error = mnt_want_write(nd->path.mnt);
if (!error)
got_write = true;
/*
* do _not_ fail yet - we might not need that or fail with
* a different error; let lookup_open() decide; we'll be
* dropping this one anyway.
*/
}
mutex_lock(&dir->d_inode->i_mutex);
error = lookup_open(nd, &path, file, op, got_write, opened);
mutex_unlock(&dir->d_inode->i_mutex);
if (error <= 0) {
if (error)
goto out;
if ((*opened & FILE_CREATED) ||
!S_ISREG(file_inode(file)->i_mode))
will_truncate = false;
audit_inode(nd->name, file->f_path.dentry, 0);
goto opened;
}
if (*opened & FILE_CREATED) {
/* Don't check for write permission, don't truncate */
open_flag &= ~O_TRUNC;
will_truncate = false;
acc_mode = MAY_OPEN;
path_to_nameidata(&path, nd);
goto finish_open_created;
}
/*
* create/update audit record if it already exists.
*/
if (d_is_positive(path.dentry))
audit_inode(nd->name, path.dentry, 0);
/*
* If atomic_open() acquired write access it is dropped now due to
* possible mount and symlink following (this might be optimized away if
* necessary...)
*/
if (got_write) {
mnt_drop_write(nd->path.mnt);
got_write = false;
}
if (unlikely((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))) {
path_to_nameidata(&path, nd);
return -EEXIST;
}
error = follow_managed(&path, nd);
if (unlikely(error < 0))
return error;
BUG_ON(nd->flags & LOOKUP_RCU);
seq = 0; /* out of RCU mode, so the value doesn't matter */
if (unlikely(d_is_negative(path.dentry))) {
path_to_nameidata(&path, nd);
return -ENOENT;
}
inode = d_backing_inode(path.dentry);
finish_lookup:
if (nd->depth)
put_link(nd);
error = should_follow_link(nd, &path, nd->flags & LOOKUP_FOLLOW,
inode, seq);
if (unlikely(error))
return error;
if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
path_to_nameidata(&path, nd);
} else {
save_parent.dentry = nd->path.dentry;
save_parent.mnt = mntget(path.mnt);
nd->path.dentry = path.dentry;
}
nd->inode = inode;
nd->seq = seq;
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
finish_open:
error = complete_walk(nd);
if (error) {
path_put(&save_parent);
return error;
}
audit_inode(nd->name, nd->path.dentry, 0);
if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
error = -ELOOP;
goto out;
}
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
error = -ENOTDIR;
if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
goto out;
if (!d_is_reg(nd->path.dentry))
will_truncate = false;
if (will_truncate) {
error = mnt_want_write(nd->path.mnt);
if (error)
goto out;
got_write = true;
}
finish_open_created:
error = may_open(&nd->path, acc_mode, open_flag);
if (error)
goto out;
BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
error = vfs_open(&nd->path, file, current_cred());
if (!error) {
*opened |= FILE_OPENED;
} else {
if (error == -EOPENSTALE)
goto stale_open;
goto out;
}
opened:
error = open_check_o_direct(file);
if (error)
goto exit_fput;
error = ima_file_check(file, op->acc_mode, *opened);
if (error)
goto exit_fput;
if (will_truncate) {
error = handle_truncate(file);
if (error)
goto exit_fput;
}
out:
if (unlikely(error > 0)) {
WARN_ON(1);
error = -EINVAL;
}
if (got_write)
mnt_drop_write(nd->path.mnt);
path_put(&save_parent);
return error;
exit_fput:
fput(file);
goto out;
stale_open:
/* If no saved parent or already retried then can't retry */
if (!save_parent.dentry || retried)
goto out;
BUG_ON(save_parent.dentry != dir);
path_put(&nd->path);
nd->path = save_parent;
nd->inode = dir->d_inode;
save_parent.mnt = NULL;
save_parent.dentry = NULL;
if (got_write) {
mnt_drop_write(nd->path.mnt);
got_write = false;
}
retried = true;
goto retry_lookup;
}
static int do_tmpfile(struct nameidata *nd, unsigned flags,
const struct open_flags *op,
struct file *file, int *opened)
{
static const struct qstr name = QSTR_INIT("/", 1);
struct dentry *child;
struct inode *dir;
struct path path;
int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path);
if (unlikely(error))
return error;
error = mnt_want_write(path.mnt);
if (unlikely(error))
goto out;
dir = path.dentry->d_inode;
/* we want directory to be writable */
error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
if (error)
goto out2;
if (!dir->i_op->tmpfile) {
error = -EOPNOTSUPP;
goto out2;
}
child = d_alloc(path.dentry, &name);
if (unlikely(!child)) {
error = -ENOMEM;
goto out2;
}
dput(path.dentry);
path.dentry = child;
error = dir->i_op->tmpfile(dir, child, op->mode);
if (error)
goto out2;
audit_inode(nd->name, child, 0);
/* Don't check for other permissions, the inode was just created */
error = may_open(&path, MAY_OPEN, op->open_flag);
if (error)
goto out2;
file->f_path.mnt = path.mnt;
error = finish_open(file, child, NULL, opened);
if (error)
goto out2;
error = open_check_o_direct(file);
if (error) {
fput(file);
} else if (!(op->open_flag & O_EXCL)) {
struct inode *inode = file_inode(file);
spin_lock(&inode->i_lock);
inode->i_state |= I_LINKABLE;
spin_unlock(&inode->i_lock);
}
out2:
mnt_drop_write(path.mnt);
out:
path_put(&path);
return error;
}
static struct file *path_openat(struct nameidata *nd,
const struct open_flags *op, unsigned flags)
{
const char *s;
struct file *file;
int opened = 0;
int error;
file = get_empty_filp();
if (IS_ERR(file))
return file;
file->f_flags = op->open_flag;
if (unlikely(file->f_flags & __O_TMPFILE)) {
error = do_tmpfile(nd, flags, op, file, &opened);
goto out2;
}
s = path_init(nd, flags);
if (IS_ERR(s)) {
put_filp(file);
return ERR_CAST(s);
}
while (!(error = link_path_walk(s, nd)) &&
(error = do_last(nd, file, op, &opened)) > 0) {
nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
s = trailing_symlink(nd);
if (IS_ERR(s)) {
error = PTR_ERR(s);
break;
}
}
terminate_walk(nd);
out2:
if (!(opened & FILE_OPENED)) {
BUG_ON(!error);
put_filp(file);
}
if (unlikely(error)) {
if (error == -EOPENSTALE) {
if (flags & LOOKUP_RCU)
error = -ECHILD;
else
error = -ESTALE;
}
file = ERR_PTR(error);
}
return file;
}
struct file *do_filp_open(int dfd, struct filename *pathname,
const struct open_flags *op)
{
struct nameidata nd;
int flags = op->lookup_flags;
struct file *filp;
set_nameidata(&nd, dfd, pathname);
filp = path_openat(&nd, op, flags | LOOKUP_RCU);
if (unlikely(filp == ERR_PTR(-ECHILD)))
filp = path_openat(&nd, op, flags);
if (unlikely(filp == ERR_PTR(-ESTALE)))
filp = path_openat(&nd, op, flags | LOOKUP_REVAL);
restore_nameidata();
return filp;
}
struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
const char *name, const struct open_flags *op)
{
struct nameidata nd;
struct file *file;
struct filename *filename;
int flags = op->lookup_flags | LOOKUP_ROOT;
nd.root.mnt = mnt;
nd.root.dentry = dentry;
if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN)
return ERR_PTR(-ELOOP);
filename = getname_kernel(name);
if (IS_ERR(filename))
return ERR_CAST(filename);
set_nameidata(&nd, -1, filename);
file = path_openat(&nd, op, flags | LOOKUP_RCU);
if (unlikely(file == ERR_PTR(-ECHILD)))
file = path_openat(&nd, op, flags);
if (unlikely(file == ERR_PTR(-ESTALE)))
file = path_openat(&nd, op, flags | LOOKUP_REVAL);
restore_nameidata();
putname(filename);
return file;
}
static struct dentry *filename_create(int dfd, struct filename *name,
struct path *path, unsigned int lookup_flags)
{
struct dentry *dentry = ERR_PTR(-EEXIST);
struct qstr last;
int type;
int err2;
int error;
bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
/*
* Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
* other flags passed in are ignored!
*/
lookup_flags &= LOOKUP_REVAL;
name = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
if (IS_ERR(name))
return ERR_CAST(name);
/*
* Yucky last component or no last component at all?
* (foo/., foo/.., /////)
*/
if (unlikely(type != LAST_NORM))
goto out;
/* don't fail immediately if it's r/o, at least try to report other errors */
err2 = mnt_want_write(path->mnt);
/*
* Do the final lookup.
*/
lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL;
mutex_lock_nested(&path->dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = __lookup_hash(&last, path->dentry, lookup_flags);
if (IS_ERR(dentry))
goto unlock;
error = -EEXIST;
if (d_is_positive(dentry))
goto fail;
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
* all is fine. Let's be bastards - you had / on the end, you've
* been asking for (non-existent) directory. -ENOENT for you.
*/
if (unlikely(!is_dir && last.name[last.len])) {
error = -ENOENT;
goto fail;
}
if (unlikely(err2)) {
error = err2;
goto fail;
}
putname(name);
return dentry;
fail:
dput(dentry);
dentry = ERR_PTR(error);
unlock:
mutex_unlock(&path->dentry->d_inode->i_mutex);
if (!err2)
mnt_drop_write(path->mnt);
out:
path_put(path);
putname(name);
return dentry;
}
struct dentry *kern_path_create(int dfd, const char *pathname,
struct path *path, unsigned int lookup_flags)
{
return filename_create(dfd, getname_kernel(pathname),
path, lookup_flags);
}
EXPORT_SYMBOL(kern_path_create);
void done_path_create(struct path *path, struct dentry *dentry)
{
dput(dentry);
mutex_unlock(&path->dentry->d_inode->i_mutex);
mnt_drop_write(path->mnt);
path_put(path);
}
EXPORT_SYMBOL(done_path_create);
inline struct dentry *user_path_create(int dfd, const char __user *pathname,
struct path *path, unsigned int lookup_flags)
{
return filename_create(dfd, getname(pathname), path, lookup_flags);
}
EXPORT_SYMBOL(user_path_create);
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
if (error)
return error;
if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
return -EPERM;
if (!dir->i_op->mknod)
return -EPERM;
error = devcgroup_inode_mknod(mode, dev);
if (error)
return error;
error = security_inode_mknod(dir, dentry, mode, dev);
if (error)
return error;
error = dir->i_op->mknod(dir, dentry, mode, dev);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_mknod);
static int may_mknod(umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFREG:
case S_IFCHR:
case S_IFBLK:
case S_IFIFO:
case S_IFSOCK:
case 0: /* zero mode translates to S_IFREG */
return 0;
case S_IFDIR:
return -EPERM;
default:
return -EINVAL;
}
}
SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
unsigned, dev)
{
struct dentry *dentry;
struct path path;
int error;
unsigned int lookup_flags = 0;
error = may_mknod(mode);
if (error)
return error;
retry:
dentry = user_path_create(dfd, filename, &path, lookup_flags);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out;
switch (mode & S_IFMT) {
case 0: case S_IFREG:
error = vfs_create(path.dentry->d_inode,dentry,mode,true);
break;
case S_IFCHR: case S_IFBLK:
error = vfs_mknod(path.dentry->d_inode,dentry,mode,
new_decode_dev(dev));
break;
case S_IFIFO: case S_IFSOCK:
error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
break;
}
out:
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
return error;
}
SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev)
{
return sys_mknodat(AT_FDCWD, filename, mode, dev);
}
int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int error = may_create(dir, dentry);
unsigned max_links = dir->i_sb->s_max_links;
if (error)
return error;
if (!dir->i_op->mkdir)
return -EPERM;
mode &= (S_IRWXUGO|S_ISVTX);
error = security_inode_mkdir(dir, dentry, mode);
if (error)
return error;
if (max_links && dir->i_nlink >= max_links)
return -EMLINK;
error = dir->i_op->mkdir(dir, dentry, mode);
if (!error)
fsnotify_mkdir(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_mkdir);
SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
{
struct dentry *dentry;
struct path path;
int error;
unsigned int lookup_flags = LOOKUP_DIRECTORY;
retry:
dentry = user_path_create(dfd, pathname, &path, lookup_flags);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
error = security_path_mkdir(&path, dentry, mode);
if (!error)
error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
return error;
}
SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
{
return sys_mkdirat(AT_FDCWD, pathname, mode);
}
/*
* The dentry_unhash() helper will try to drop the dentry early: we
* should have a usage count of 1 if we're the only user of this
* dentry, and if that is true (possibly after pruning the dcache),
* then we drop the dentry now.
*
* A low-level filesystem can, if it choses, legally
* do a
*
* if (!d_unhashed(dentry))
* return -EBUSY;
*
* if it cannot handle the case of removing a directory
* that is still in use by something else..
*/
void dentry_unhash(struct dentry *dentry)
{
shrink_dcache_parent(dentry);
spin_lock(&dentry->d_lock);
if (dentry->d_lockref.count == 1)
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
EXPORT_SYMBOL(dentry_unhash);
int vfs_rmdir(struct inode *dir, struct dentry *dentry)
{
int error = may_delete(dir, dentry, 1);
if (error)
return error;
if (!dir->i_op->rmdir)
return -EPERM;
dget(dentry);
mutex_lock(&dentry->d_inode->i_mutex);
error = -EBUSY;
if (is_local_mountpoint(dentry))
goto out;
error = security_inode_rmdir(dir, dentry);
if (error)
goto out;
shrink_dcache_parent(dentry);
error = dir->i_op->rmdir(dir, dentry);
if (error)
goto out;
dentry->d_inode->i_flags |= S_DEAD;
dont_mount(dentry);
detach_mounts(dentry);
out:
mutex_unlock(&dentry->d_inode->i_mutex);
dput(dentry);
if (!error)
d_delete(dentry);
return error;
}
EXPORT_SYMBOL(vfs_rmdir);
static long do_rmdir(int dfd, const char __user *pathname)
{
int error = 0;
struct filename *name;
struct dentry *dentry;
struct path path;
struct qstr last;
int type;
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname,
&path, &last, &type, lookup_flags);
if (IS_ERR(name))
return PTR_ERR(name);
switch (type) {
case LAST_DOTDOT:
error = -ENOTEMPTY;
goto exit1;
case LAST_DOT:
error = -EINVAL;
goto exit1;
case LAST_ROOT:
error = -EBUSY;
goto exit1;
}
error = mnt_want_write(path.mnt);
if (error)
goto exit1;
mutex_lock_nested(&path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = __lookup_hash(&last, path.dentry, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto exit2;
if (!dentry->d_inode) {
error = -ENOENT;
goto exit3;
}
error = security_path_rmdir(&path, dentry);
if (error)
goto exit3;
error = vfs_rmdir(path.dentry->d_inode, dentry);
exit3:
dput(dentry);
exit2:
mutex_unlock(&path.dentry->d_inode->i_mutex);
mnt_drop_write(path.mnt);
exit1:
path_put(&path);
putname(name);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
return error;
}
SYSCALL_DEFINE1(rmdir, const char __user *, pathname)
{
return do_rmdir(AT_FDCWD, pathname);
}
/**
* vfs_unlink - unlink a filesystem object
* @dir: parent directory
* @dentry: victim
* @delegated_inode: returns victim inode, if the inode is delegated.
*
* The caller must hold dir->i_mutex.
*
* If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and
* return a reference to the inode in delegated_inode. The caller
* should then break the delegation on that inode and retry. Because
* breaking a delegation may take a long time, the caller should drop
* dir->i_mutex before doing so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
* to be NFS exported.
*/
int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode)
{
struct inode *target = dentry->d_inode;
int error = may_delete(dir, dentry, 0);
if (error)
return error;
if (!dir->i_op->unlink)
return -EPERM;
mutex_lock(&target->i_mutex);
if (is_local_mountpoint(dentry))
error = -EBUSY;
else {
error = security_inode_unlink(dir, dentry);
if (!error) {
error = try_break_deleg(target, delegated_inode);
if (error)
goto out;
error = dir->i_op->unlink(dir, dentry);
if (!error) {
dont_mount(dentry);
detach_mounts(dentry);
}
}
}
out:
mutex_unlock(&target->i_mutex);
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
fsnotify_link_count(target);
d_delete(dentry);
}
return error;
}
EXPORT_SYMBOL(vfs_unlink);
/*
* Make sure that the actual truncation of the file will occur outside its
* directory's i_mutex. Truncate can take a long time if there is a lot of
* writeout happening, and we don't want to prevent access to the directory
* while waiting on the I/O.
*/
static long do_unlinkat(int dfd, const char __user *pathname)
{
int error;
struct filename *name;
struct dentry *dentry;
struct path path;
struct qstr last;
int type;
struct inode *inode = NULL;
struct inode *delegated_inode = NULL;
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname,
&path, &last, &type, lookup_flags);
if (IS_ERR(name))
return PTR_ERR(name);
error = -EISDIR;
if (type != LAST_NORM)
goto exit1;
error = mnt_want_write(path.mnt);
if (error)
goto exit1;
retry_deleg:
mutex_lock_nested(&path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = __lookup_hash(&last, path.dentry, lookup_flags);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
/* Why not before? Because we want correct error value */
if (last.name[last.len])
goto slashes;
inode = dentry->d_inode;
if (d_is_negative(dentry))
goto slashes;
ihold(inode);
error = security_path_unlink(&path, dentry);
if (error)
goto exit2;
error = vfs_unlink(path.dentry->d_inode, dentry, &delegated_inode);
exit2:
dput(dentry);
}
mutex_unlock(&path.dentry->d_inode->i_mutex);
if (inode)
iput(inode); /* truncate the inode here */
inode = NULL;
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
}
mnt_drop_write(path.mnt);
exit1:
path_put(&path);
putname(name);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
inode = NULL;
goto retry;
}
return error;
slashes:
if (d_is_negative(dentry))
error = -ENOENT;
else if (d_is_dir(dentry))
error = -EISDIR;
else
error = -ENOTDIR;
goto exit2;
}
SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag)
{
if ((flag & ~AT_REMOVEDIR) != 0)
return -EINVAL;
if (flag & AT_REMOVEDIR)
return do_rmdir(dfd, pathname);
return do_unlinkat(dfd, pathname);
}
SYSCALL_DEFINE1(unlink, const char __user *, pathname)
{
return do_unlinkat(AT_FDCWD, pathname);
}
int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
{
int error = may_create(dir, dentry);
if (error)
return error;
if (!dir->i_op->symlink)
return -EPERM;
error = security_inode_symlink(dir, dentry, oldname);
if (error)
return error;
error = dir->i_op->symlink(dir, dentry, oldname);
if (!error)
fsnotify_create(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_symlink);
SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
int, newdfd, const char __user *, newname)
{
int error;
struct filename *from;
struct dentry *dentry;
struct path path;
unsigned int lookup_flags = 0;
from = getname(oldname);
if (IS_ERR(from))
return PTR_ERR(from);
retry:
dentry = user_path_create(newdfd, newname, &path, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_putname;
error = security_path_symlink(&path, dentry, from->name);
if (!error)
error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
out_putname:
putname(from);
return error;
}
SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname)
{
return sys_symlinkat(oldname, AT_FDCWD, newname);
}
/**
* vfs_link - create a new link
* @old_dentry: object to be linked
* @dir: new parent
* @new_dentry: where to create the new link
* @delegated_inode: returns inode needing a delegation break
*
* The caller must hold dir->i_mutex
*
* If vfs_link discovers a delegation on the to-be-linked file in need
* of breaking, it will return -EWOULDBLOCK and return a reference to the
* inode in delegated_inode. The caller should then break the delegation
* and retry. Because breaking a delegation may take a long time, the
* caller should drop the i_mutex before doing so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
* to be NFS exported.
*/
int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode)
{
struct inode *inode = old_dentry->d_inode;
unsigned max_links = dir->i_sb->s_max_links;
int error;
if (!inode)
return -ENOENT;
error = may_create(dir, new_dentry);
if (error)
return error;
if (dir->i_sb != inode->i_sb)
return -EXDEV;
/*
* A link to an append-only or immutable file cannot be created.
*/
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return -EPERM;
if (!dir->i_op->link)
return -EPERM;
if (S_ISDIR(inode->i_mode))
return -EPERM;
error = security_inode_link(old_dentry, dir, new_dentry);
if (error)
return error;
mutex_lock(&inode->i_mutex);
/* Make sure we don't allow creating hardlink to an unlinked file */
if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE))
error = -ENOENT;
else if (max_links && inode->i_nlink >= max_links)
error = -EMLINK;
else {
error = try_break_deleg(inode, delegated_inode);
if (!error)
error = dir->i_op->link(old_dentry, dir, new_dentry);
}
if (!error && (inode->i_state & I_LINKABLE)) {
spin_lock(&inode->i_lock);
inode->i_state &= ~I_LINKABLE;
spin_unlock(&inode->i_lock);
}
mutex_unlock(&inode->i_mutex);
if (!error)
fsnotify_link(dir, inode, new_dentry);
return error;
}
EXPORT_SYMBOL(vfs_link);
/*
* Hardlinks are often used in delicate situations. We avoid
* security-related surprises by not following symlinks on the
* newname. --KAB
*
* We don't follow them on the oldname either to be compatible
* with linux 2.0, and to avoid hard-linking to directories
* and other special files. --ADM
*/
SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
int, newdfd, const char __user *, newname, int, flags)
{
struct dentry *new_dentry;
struct path old_path, new_path;
struct inode *delegated_inode = NULL;
int how = 0;
int error;
if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
return -EINVAL;
/*
* To use null names we require CAP_DAC_READ_SEARCH
* This ensures that not everyone will be able to create
* handlink using the passed filedescriptor.
*/
if (flags & AT_EMPTY_PATH) {
if (!capable(CAP_DAC_READ_SEARCH))
return -ENOENT;
how = LOOKUP_EMPTY;
}
if (flags & AT_SYMLINK_FOLLOW)
how |= LOOKUP_FOLLOW;
retry:
error = user_path_at(olddfd, oldname, how, &old_path);
if (error)
return error;
new_dentry = user_path_create(newdfd, newname, &new_path,
(how & LOOKUP_REVAL));
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto out;
error = -EXDEV;
if (old_path.mnt != new_path.mnt)
goto out_dput;
error = may_linkat(&old_path);
if (unlikely(error))
goto out_dput;
error = security_path_link(old_path.dentry, &new_path, new_dentry);
if (error)
goto out_dput;
error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
out_dput:
done_path_create(&new_path, new_dentry);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
if (!error) {
path_put(&old_path);
goto retry;
}
}
if (retry_estale(error, how)) {
path_put(&old_path);
how |= LOOKUP_REVAL;
goto retry;
}
out:
path_put(&old_path);
return error;
}
SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname)
{
return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
}
/**
* vfs_rename - rename a filesystem object
* @old_dir: parent of source
* @old_dentry: source
* @new_dir: parent of destination
* @new_dentry: destination
* @delegated_inode: returns an inode needing a delegation break
* @flags: rename flags
*
* The caller must hold multiple mutexes--see lock_rename()).
*
* If vfs_rename discovers a delegation in need of breaking at either
* the source or destination, it will return -EWOULDBLOCK and return a
* reference to the inode in delegated_inode. The caller should then
* break the delegation and retry. Because breaking a delegation may
* take a long time, the caller should drop all locks before doing
* so.
*
* Alternatively, a caller may pass NULL for delegated_inode. This may
* be appropriate for callers that expect the underlying filesystem not
* to be NFS exported.
*
* The worst of all namespace operations - renaming directory. "Perverted"
* doesn't even start to describe it. Somebody in UCB had a heck of a trip...
* Problems:
* a) we can get into loop creation.
* b) race potential - two innocent renames can create a loop together.
* That's where 4.4 screws up. Current fix: serialization on
* sb->s_vfs_rename_mutex. We might be more accurate, but that's another
* story.
* c) we have to lock _four_ objects - parents and victim (if it exists),
* and source (if it is not a directory).
* And that - after we got ->i_mutex on parents (until then we don't know
* whether the target exists). Solution: try to be smart with locking
* order for inodes. We rely on the fact that tree topology may change
* only under ->s_vfs_rename_mutex _and_ that parent of the object we
* move will be locked. Thus we can rank directories by the tree
* (ancestors first) and rank all non-directories after them.
* That works since everybody except rename does "lock parent, lookup,
* lock child" and rename is under ->s_vfs_rename_mutex.
* HOWEVER, it relies on the assumption that any object with ->lookup()
* has no more than 1 dentry. If "hybrid" objects will ever appear,
* we'd better make sure that there's no link(2) for them.
* d) conversion from fhandle to dentry may come in the wrong moment - when
* we are removing the target. Solution: we will have to grab ->i_mutex
* in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
* ->i_mutex on parents, which works but leads to some truly excessive
* locking].
*/
int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
struct inode **delegated_inode, unsigned int flags)
{
int error;
bool is_dir = d_is_dir(old_dentry);
const unsigned char *old_name;
struct inode *source = old_dentry->d_inode;
struct inode *target = new_dentry->d_inode;
bool new_is_dir = false;
unsigned max_links = new_dir->i_sb->s_max_links;
/*
* Check source == target.
* On overlayfs need to look at underlying inodes.
*/
if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
return 0;
error = may_delete(old_dir, old_dentry, is_dir);
if (error)
return error;
if (!target) {
error = may_create(new_dir, new_dentry);
} else {
new_is_dir = d_is_dir(new_dentry);
if (!(flags & RENAME_EXCHANGE))
error = may_delete(new_dir, new_dentry, is_dir);
else
error = may_delete(new_dir, new_dentry, new_is_dir);
}
if (error)
return error;
if (!old_dir->i_op->rename && !old_dir->i_op->rename2)
return -EPERM;
if (flags && !old_dir->i_op->rename2)
return -EINVAL;
/*
* If we are going to change the parent - check write permissions,
* we'll need to flip '..'.
*/
if (new_dir != old_dir) {
if (is_dir) {
error = inode_permission(source, MAY_WRITE);
if (error)
return error;
}
if ((flags & RENAME_EXCHANGE) && new_is_dir) {
error = inode_permission(target, MAY_WRITE);
if (error)
return error;
}
}
error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry,
flags);
if (error)
return error;
old_name = fsnotify_oldname_init(old_dentry->d_name.name);
dget(new_dentry);
if (!is_dir || (flags & RENAME_EXCHANGE))
lock_two_nondirectories(source, target);
else if (target)
mutex_lock(&target->i_mutex);
error = -EBUSY;
if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry))
goto out;
if (max_links && new_dir != old_dir) {
error = -EMLINK;
if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links)
goto out;
if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir &&
old_dir->i_nlink >= max_links)
goto out;
}
if (is_dir && !(flags & RENAME_EXCHANGE) && target)
shrink_dcache_parent(new_dentry);
if (!is_dir) {
error = try_break_deleg(source, delegated_inode);
if (error)
goto out;
}
if (target && !new_is_dir) {
error = try_break_deleg(target, delegated_inode);
if (error)
goto out;
}
if (!old_dir->i_op->rename2) {
error = old_dir->i_op->rename(old_dir, old_dentry,
new_dir, new_dentry);
} else {
WARN_ON(old_dir->i_op->rename != NULL);
error = old_dir->i_op->rename2(old_dir, old_dentry,
new_dir, new_dentry, flags);
}
if (error)
goto out;
if (!(flags & RENAME_EXCHANGE) && target) {
if (is_dir)
target->i_flags |= S_DEAD;
dont_mount(new_dentry);
detach_mounts(new_dentry);
}
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) {
if (!(flags & RENAME_EXCHANGE))
d_move(old_dentry, new_dentry);
else
d_exchange(old_dentry, new_dentry);
}
out:
if (!is_dir || (flags & RENAME_EXCHANGE))
unlock_two_nondirectories(source, target);
else if (target)
mutex_unlock(&target->i_mutex);
dput(new_dentry);
if (!error) {
fsnotify_move(old_dir, new_dir, old_name, is_dir,
!(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry);
if (flags & RENAME_EXCHANGE) {
fsnotify_move(new_dir, old_dir, old_dentry->d_name.name,
new_is_dir, NULL, new_dentry);
}
}
fsnotify_oldname_free(old_name);
return error;
}
EXPORT_SYMBOL(vfs_rename);
SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
int, newdfd, const char __user *, newname, unsigned int, flags)
{
struct dentry *old_dentry, *new_dentry;
struct dentry *trap;
struct path old_path, new_path;
struct qstr old_last, new_last;
int old_type, new_type;
struct inode *delegated_inode = NULL;
struct filename *from;
struct filename *to;
unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET;
bool should_retry = false;
int error;
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return -EINVAL;
if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) &&
(flags & RENAME_EXCHANGE))
return -EINVAL;
if ((flags & RENAME_WHITEOUT) && !capable(CAP_MKNOD))
return -EPERM;
if (flags & RENAME_EXCHANGE)
target_flags = 0;
retry:
from = user_path_parent(olddfd, oldname,
&old_path, &old_last, &old_type, lookup_flags);
if (IS_ERR(from)) {
error = PTR_ERR(from);
goto exit;
}
to = user_path_parent(newdfd, newname,
&new_path, &new_last, &new_type, lookup_flags);
if (IS_ERR(to)) {
error = PTR_ERR(to);
goto exit1;
}
error = -EXDEV;
if (old_path.mnt != new_path.mnt)
goto exit2;
error = -EBUSY;
if (old_type != LAST_NORM)
goto exit2;
if (flags & RENAME_NOREPLACE)
error = -EEXIST;
if (new_type != LAST_NORM)
goto exit2;
error = mnt_want_write(old_path.mnt);
if (error)
goto exit2;
retry_deleg:
trap = lock_rename(new_path.dentry, old_path.dentry);
old_dentry = __lookup_hash(&old_last, old_path.dentry, lookup_flags);
error = PTR_ERR(old_dentry);
if (IS_ERR(old_dentry))
goto exit3;
/* source must exist */
error = -ENOENT;
if (d_is_negative(old_dentry))
goto exit4;
new_dentry = __lookup_hash(&new_last, new_path.dentry, lookup_flags | target_flags);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto exit4;
error = -EEXIST;
if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry))
goto exit5;
if (flags & RENAME_EXCHANGE) {
error = -ENOENT;
if (d_is_negative(new_dentry))
goto exit5;
if (!d_is_dir(new_dentry)) {
error = -ENOTDIR;
if (new_last.name[new_last.len])
goto exit5;
}
}
/* unless the source is a directory trailing slashes give -ENOTDIR */
if (!d_is_dir(old_dentry)) {
error = -ENOTDIR;
if (old_last.name[old_last.len])
goto exit5;
if (!(flags & RENAME_EXCHANGE) && new_last.name[new_last.len])
goto exit5;
}
/* source should not be ancestor of target */
error = -EINVAL;
if (old_dentry == trap)
goto exit5;
/* target should not be an ancestor of source */
if (!(flags & RENAME_EXCHANGE))
error = -ENOTEMPTY;
if (new_dentry == trap)
goto exit5;
error = security_path_rename(&old_path, old_dentry,
&new_path, new_dentry, flags);
if (error)
goto exit5;
error = vfs_rename(old_path.dentry->d_inode, old_dentry,
new_path.dentry->d_inode, new_dentry,
&delegated_inode, flags);
exit5:
dput(new_dentry);
exit4:
dput(old_dentry);
exit3:
unlock_rename(new_path.dentry, old_path.dentry);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
if (!error)
goto retry_deleg;
}
mnt_drop_write(old_path.mnt);
exit2:
if (retry_estale(error, lookup_flags))
should_retry = true;
path_put(&new_path);
putname(to);
exit1:
path_put(&old_path);
putname(from);
if (should_retry) {
should_retry = false;
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
exit:
return error;
}
SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
int, newdfd, const char __user *, newname)
{
return sys_renameat2(olddfd, oldname, newdfd, newname, 0);
}
SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
{
return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
}
int vfs_whiteout(struct inode *dir, struct dentry *dentry)
{
int error = may_create(dir, dentry);
if (error)
return error;
if (!dir->i_op->mknod)
return -EPERM;
return dir->i_op->mknod(dir, dentry,
S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
}
EXPORT_SYMBOL(vfs_whiteout);
int readlink_copy(char __user *buffer, int buflen, const char *link)
{
int len = PTR_ERR(link);
if (IS_ERR(link))
goto out;
len = strlen(link);
if (len > (unsigned) buflen)
len = buflen;
if (copy_to_user(buffer, link, len))
len = -EFAULT;
out:
return len;
}
EXPORT_SYMBOL(readlink_copy);
/*
* A helper for ->readlink(). This should be used *ONLY* for symlinks that
* have ->follow_link() touching nd only in nd_set_link(). Using (or not
* using) it for any given inode is up to filesystem.
*/
int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
void *cookie;
struct inode *inode = d_inode(dentry);
const char *link = inode->i_link;
int res;
if (!link) {
link = inode->i_op->follow_link(dentry, &cookie);
if (IS_ERR(link))
return PTR_ERR(link);
}
res = readlink_copy(buffer, buflen, link);
if (inode->i_op->put_link)
inode->i_op->put_link(inode, cookie);
return res;
}
EXPORT_SYMBOL(generic_readlink);
/* get the link contents into pagecache */
static char *page_getlink(struct dentry * dentry, struct page **ppage)
{
char *kaddr;
struct page *page;
struct address_space *mapping = dentry->d_inode->i_mapping;
page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
return (char*)page;
*ppage = page;
kaddr = kmap(page);
nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1);
return kaddr;
}
int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct page *page = NULL;
int res = readlink_copy(buffer, buflen, page_getlink(dentry, &page));
if (page) {
kunmap(page);
page_cache_release(page);
}
return res;
}
EXPORT_SYMBOL(page_readlink);
const char *page_follow_link_light(struct dentry *dentry, void **cookie)
{
struct page *page = NULL;
char *res = page_getlink(dentry, &page);
if (!IS_ERR(res))
*cookie = page;
return res;
}
EXPORT_SYMBOL(page_follow_link_light);
void page_put_link(struct inode *unused, void *cookie)
{
struct page *page = cookie;
kunmap(page);
page_cache_release(page);
}
EXPORT_SYMBOL(page_put_link);
/*
* The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
*/
int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
void *fsdata;
int err;
char *kaddr;
unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE;
if (nofs)
flags |= AOP_FLAG_NOFS;
retry:
err = pagecache_write_begin(NULL, mapping, 0, len-1,
flags, &page, &fsdata);
if (err)
goto fail;
kaddr = kmap_atomic(page);
memcpy(kaddr, symname, len-1);
kunmap_atomic(kaddr);
err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
page, fsdata);
if (err < 0)
goto fail;
if (err < len-1)
goto retry;
mark_inode_dirty(inode);
return 0;
fail:
return err;
}
EXPORT_SYMBOL(__page_symlink);
int page_symlink(struct inode *inode, const char *symname, int len)
{
return __page_symlink(inode, symname, len,
!mapping_gfp_constraint(inode->i_mapping, __GFP_FS));
}
EXPORT_SYMBOL(page_symlink);
const struct inode_operations page_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
};
EXPORT_SYMBOL(page_symlink_inode_operations);
| gpl-2.0 |
FreeOptimusProject/android_kernel_lge_p970 | kernel/sched.c | 222998 | /*
* kernel/sched.c
*
* Kernel scheduler and related syscalls
*
* Copyright (C) 1991-2002 Linus Torvalds
*
* 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
* make semaphores SMP safe
* 1998-11-19 Implemented schedule_timeout() and related stuff
* by Andrea Arcangeli
* 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
* hybrid priority-list and round-robin design with
* an array-switch method of distributing timeslices
* and per-CPU runqueues. Cleanups and useful suggestions
* by Davide Libenzi, preemptible kernel bits by Robert Love.
* 2003-09-03 Interactivity tuning by Con Kolivas.
* 2004-04-02 Scheduler domains code by Nick Piggin
* 2007-04-15 Work begun on replacing all interactivity tuning with a
* fair scheduling design by Con Kolivas.
* 2007-05-05 Load balancing (smp-nice) and other improvements
* by Peter Williams
* 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
* 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
* 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
* Thomas Gleixner, Mike Kravetz
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <linux/smp_lock.h>
#include <asm/mmu_context.h>
#include <linux/interrupt.h>
#include <linux/capability.h>
#include <linux/completion.h>
#include <linux/kernel_stat.h>
#include <linux/debug_locks.h>
#include <linux/perf_event.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/profile.h>
#include <linux/freezer.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/pid_namespace.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/timer.h>
#include <linux/rcupdate.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/percpu.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stop_machine.h>
#include <linux/sysctl.h>
#include <linux/syscalls.h>
#include <linux/times.h>
#include <linux/tsacct_kern.h>
#include <linux/kprobes.h>
#include <linux/delayacct.h>
#include <linux/unistd.h>
#include <linux/pagemap.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/debugfs.h>
#include <linux/ctype.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
#include <linux/cpuacct.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
#include "sched_cpupri.h"
#ifdef CONFIG_LGE_DVFS
#include <asm/current.h>
#include <linux/dvs_suite.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/sched.h>
#endif // CONFIG_LGE_DVFS
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
* and back.
*/
#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
/*
* 'User priority' is the nice value converted to something we
* can work with better when scaling various scheduler parameters,
* it's a [ 0 ... 39 ] range.
*/
#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
/*
* Helpers for converting nanosecond timing to jiffy resolution
*/
#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
#define NICE_0_LOAD SCHED_LOAD_SCALE
#define NICE_0_SHIFT SCHED_LOAD_SHIFT
/*
* These are the 'tuning knobs' of the scheduler:
*
* default timeslice is 100 msecs (used only for SCHED_RR tasks).
* Timeslices get refilled after they expire.
*/
#define DEF_TIMESLICE (100 * HZ / 1000)
/*
* single value that denotes runtime == period, ie unlimited time.
*/
#define RUNTIME_INF ((u64)~0ULL)
static inline int rt_policy(int policy)
{
if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
return 1;
return 0;
}
static inline int task_has_rt_policy(struct task_struct *p)
{
return rt_policy(p->policy);
}
/*
* This is the priority-queue data structure of the RT scheduling class:
*/
struct rt_prio_array {
DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
struct list_head queue[MAX_RT_PRIO];
};
struct rt_bandwidth {
/* nests inside the rq lock: */
raw_spinlock_t rt_runtime_lock;
ktime_t rt_period;
u64 rt_runtime;
struct hrtimer rt_period_timer;
};
static struct rt_bandwidth def_rt_bandwidth;
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
struct rt_bandwidth *rt_b =
container_of(timer, struct rt_bandwidth, rt_period_timer);
ktime_t now;
int overrun;
int idle = 0;
for (;;) {
now = hrtimer_cb_get_time(timer);
overrun = hrtimer_forward(timer, now, rt_b->rt_period);
if (!overrun)
break;
idle = do_sched_rt_period_timer(rt_b, overrun);
}
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}
static
void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
{
rt_b->rt_period = ns_to_ktime(period);
rt_b->rt_runtime = runtime;
raw_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
static inline int rt_bandwidth_enabled(void)
{
return sysctl_sched_rt_runtime >= 0;
}
static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
ktime_t now;
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
return;
if (hrtimer_active(&rt_b->rt_period_timer))
return;
raw_spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
unsigned long delta;
ktime_t soft, hard;
if (hrtimer_active(&rt_b->rt_period_timer))
break;
now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
hard = hrtimer_get_expires(&rt_b->rt_period_timer);
delta = ktime_to_ns(ktime_sub(hard, soft));
__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
HRTIMER_MODE_ABS_PINNED, 0);
}
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
#ifdef CONFIG_RT_GROUP_SCHED
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
{
hrtimer_cancel(&rt_b->rt_period_timer);
}
#endif
/*
* sched_domains_mutex serializes calls to arch_init_sched_domains,
* detach_destroy_domains and partition_sched_domains.
*/
static DEFINE_MUTEX(sched_domains_mutex);
#ifdef CONFIG_CGROUP_SCHED
#include <linux/cgroup.h>
struct cfs_rq;
static LIST_HEAD(task_groups);
/* task group related information */
struct task_group {
struct cgroup_subsys_state css;
#ifdef CONFIG_FAIR_GROUP_SCHED
/* schedulable entities of this group on each cpu */
struct sched_entity **se;
/* runqueue "owned" by this group on each cpu */
struct cfs_rq **cfs_rq;
unsigned long shares;
#endif
#ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity **rt_se;
struct rt_rq **rt_rq;
struct rt_bandwidth rt_bandwidth;
#endif
struct rcu_head rcu;
struct list_head list;
struct task_group *parent;
struct list_head siblings;
struct list_head children;
};
#define root_task_group init_task_group
/* task_group_lock serializes add/remove of task groups and also changes to
* a task group's cpu shares.
*/
static DEFINE_SPINLOCK(task_group_lock);
#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_SMP
static int root_task_group_empty(void)
{
return list_empty(&root_task_group.children);
}
#endif
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
/*
* A weight of 0 or 1 can cause arithmetics problems.
* A weight of a cfs_rq is the sum of weights of which entities
* are queued on this cfs_rq, so a weight of a entity should not be
* too large, so as the shares value of a task group.
* (The default weight is 1024 - so there's no practical
* limitation from this.)
*/
#define MIN_SHARES 2
#define MAX_SHARES (1UL << 18)
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
#endif
/* Default task group.
* Every task in system belong to this group at bootup.
*/
struct task_group init_task_group;
#endif /* CONFIG_CGROUP_SCHED */
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct load_weight load;
unsigned long nr_running;
u64 exec_clock;
u64 min_vruntime;
struct rb_root tasks_timeline;
struct rb_node *rb_leftmost;
struct list_head tasks;
struct list_head *balance_iterator;
/*
* 'curr' points to currently running entity on this cfs_rq.
* It is set to NULL otherwise (i.e when none are currently running).
*/
struct sched_entity *curr, *next, *last;
unsigned int nr_spread_over;
#ifdef CONFIG_FAIR_GROUP_SCHED
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
/*
* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities
* (like users, containers etc.)
*
* leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
* list is used during load balance.
*/
struct list_head leaf_cfs_rq_list;
struct task_group *tg; /* group that "owns" this runqueue */
#ifdef CONFIG_SMP
/*
* the part of load.weight contributed by tasks
*/
unsigned long task_weight;
/*
* h_load = weight * f(tg)
*
* Where f(tg) is the recursive weight fraction assigned to
* this group.
*/
unsigned long h_load;
/*
* this cpu's part of tg->shares
*/
unsigned long shares;
/*
* load.weight at the time we set shares
*/
unsigned long rq_weight;
#endif
#endif
};
/* Real-Time classes' related field in a runqueue: */
struct rt_rq {
struct rt_prio_array active;
unsigned long rt_nr_running;
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
struct {
int curr; /* highest queued rt task prio */
#ifdef CONFIG_SMP
int next; /* next highest */
#endif
} highest_prio;
#endif
#ifdef CONFIG_SMP
unsigned long rt_nr_migratory;
unsigned long rt_nr_total;
int overloaded;
struct plist_head pushable_tasks;
#endif
int rt_throttled;
u64 rt_time;
u64 rt_runtime;
/* Nests inside the rq lock: */
raw_spinlock_t rt_runtime_lock;
#ifdef CONFIG_RT_GROUP_SCHED
unsigned long rt_nr_boosted;
struct rq *rq;
struct list_head leaf_rt_rq_list;
struct task_group *tg;
#endif
};
#ifdef CONFIG_SMP
/*
* We add the notion of a root-domain which will be used to define per-domain
* variables. Each exclusive cpuset essentially defines an island domain by
* fully partitioning the member cpus from any other cpuset. Whenever a new
* exclusive cpuset is created, we also create and attach a new root-domain
* object.
*
*/
struct root_domain {
atomic_t refcount;
cpumask_var_t span;
cpumask_var_t online;
/*
* The "RT overload" flag: it gets set if a CPU has more than
* one runnable RT task.
*/
cpumask_var_t rto_mask;
atomic_t rto_count;
#ifdef CONFIG_SMP
struct cpupri cpupri;
#endif
};
/*
* By default the system creates a single root-domain with all cpus as
* members (mimicking the global state we have today).
*/
static struct root_domain def_root_domain;
#endif
/*
* This is the main, per-CPU runqueue data structure.
*
* Locking rule: those places that want to lock multiple runqueues
* (such as the load balancing or the thread migration code), lock
* acquire operations must be ordered by ascending &runqueue.
*/
struct rq {
/* runqueue lock: */
raw_spinlock_t lock;
/*
* nr_running and cpu_load should be in the same cacheline because
* remote CPUs use both these fields when doing load calculation.
*/
unsigned long nr_running;
#define CPU_LOAD_IDX_MAX 5
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
#ifdef CONFIG_NO_HZ
u64 nohz_stamp;
unsigned char in_nohz_recently;
#endif
unsigned int skip_clock_update;
/* capture load from *all* tasks on this cpu: */
struct load_weight load;
unsigned long nr_load_updates;
u64 nr_switches;
struct cfs_rq cfs;
struct rt_rq rt;
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
#endif
#ifdef CONFIG_RT_GROUP_SCHED
struct list_head leaf_rt_rq_list;
#endif
/*
* This is part of a global counter where only the total sum
* over all CPUs matters. A task can increase this counter on
* one CPU and if it got migrated afterwards it may decrease
* it on another CPU. Always updated under the runqueue lock:
*/
unsigned long nr_uninterruptible;
struct task_struct *curr, *idle;
unsigned long next_balance;
struct mm_struct *prev_mm;
u64 clock;
atomic_t nr_iowait;
#ifdef CONFIG_SMP
struct root_domain *rd;
struct sched_domain *sd;
unsigned long cpu_power;
unsigned char idle_at_tick;
/* For active balancing */
int post_schedule;
int active_balance;
int push_cpu;
struct cpu_stop_work active_balance_work;
/* cpu of this runqueue: */
int cpu;
int online;
unsigned long avg_load_per_task;
u64 rt_avg;
u64 age_stamp;
u64 idle_stamp;
u64 avg_idle;
#endif
/* calc_load related fields */
unsigned long calc_load_update;
long calc_load_active;
#ifdef CONFIG_SCHED_HRTICK
#ifdef CONFIG_SMP
int hrtick_csd_pending;
struct call_single_data hrtick_csd;
#endif
struct hrtimer hrtick_timer;
#endif
#ifdef CONFIG_SCHEDSTATS
/* latency stats */
struct sched_info rq_sched_info;
unsigned long long rq_cpu_time;
/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
/* sys_sched_yield() stats */
unsigned int yld_count;
/* schedule() stats */
unsigned int sched_switch;
unsigned int sched_count;
unsigned int sched_goidle;
/* try_to_wake_up() stats */
unsigned int ttwu_count;
unsigned int ttwu_local;
/* BKL stats */
unsigned int bkl_count;
#endif
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
static inline
void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
{
rq->curr->sched_class->check_preempt_curr(rq, p, flags);
/*
* A queue event has occurred, and we're going to schedule. In
* this case, we can save a useless back to back clock update.
*/
if (test_tsk_need_resched(p))
rq->skip_clock_update = 1;
}
static inline int cpu_of(struct rq *rq)
{
#ifdef CONFIG_SMP
return rq->cpu;
#else
return 0;
#endif
}
#define rcu_dereference_check_sched_domain(p) \
rcu_dereference_check((p), \
rcu_read_lock_sched_held() || \
lockdep_is_held(&sched_domains_mutex))
/*
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
* See detach_destroy_domains: synchronize_sched for details.
*
* The domain tree of any CPU may only be accessed from within
* preempt-disabled sections.
*/
#define for_each_domain(cpu, __sd) \
for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
#define this_rq() (&__get_cpu_var(runqueues))
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() (&__raw_get_cpu_var(runqueues))
#ifdef CONFIG_CGROUP_SCHED
/*
* Return the group to which this tasks belongs.
*
* We use task_subsys_state_check() and extend the RCU verification
* with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach()
* holds that lock for each task it moves into the cgroup. Therefore
* by holding that lock, we pin the task to the current cgroup.
*/
static inline struct task_group *task_group(struct task_struct *p)
{
struct cgroup_subsys_state *css;
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
lockdep_is_held(&task_rq(p)->lock));
return container_of(css, struct task_group, css);
}
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
p->se.parent = task_group(p)->se[cpu];
#endif
#ifdef CONFIG_RT_GROUP_SCHED
p->rt.rt_rq = task_group(p)->rt_rq[cpu];
p->rt.parent = task_group(p)->rt_se[cpu];
#endif
}
#else /* CONFIG_CGROUP_SCHED */
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
static inline struct task_group *task_group(struct task_struct *p)
{
return NULL;
}
#endif /* CONFIG_CGROUP_SCHED */
inline void update_rq_clock(struct rq *rq)
{
if (!rq->skip_clock_update)
rq->clock = sched_clock_cpu(cpu_of(rq));
}
/*
* Tunables that become constants when CONFIG_SCHED_DEBUG is off:
*/
#ifdef CONFIG_SCHED_DEBUG
# define const_debug __read_mostly
#else
# define const_debug static const
#endif
/**
* runqueue_is_locked
* @cpu: the processor in question.
*
* Returns true if the current cpu runqueue is locked.
* This interface allows printk to be called with the runqueue lock
* held and know whether or not it is OK to wake up the klogd.
*/
int runqueue_is_locked(int cpu)
{
return raw_spin_is_locked(&cpu_rq(cpu)->lock);
}
/*
* Debugging: various feature bits
*/
#define SCHED_FEAT(name, enabled) \
__SCHED_FEAT_##name ,
enum {
#include "sched_features.h"
};
#undef SCHED_FEAT
#define SCHED_FEAT(name, enabled) \
(1UL << __SCHED_FEAT_##name) * enabled |
const_debug unsigned int sysctl_sched_features =
#include "sched_features.h"
0;
#undef SCHED_FEAT
#ifdef CONFIG_SCHED_DEBUG
#define SCHED_FEAT(name, enabled) \
#name ,
static __read_mostly char *sched_feat_names[] = {
#include "sched_features.h"
NULL
};
#undef SCHED_FEAT
static int sched_feat_show(struct seq_file *m, void *v)
{
int i;
for (i = 0; sched_feat_names[i]; i++) {
if (!(sysctl_sched_features & (1UL << i)))
seq_puts(m, "NO_");
seq_printf(m, "%s ", sched_feat_names[i]);
}
seq_puts(m, "\n");
return 0;
}
static ssize_t
sched_feat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
char *cmp = buf;
int neg = 0;
int i;
if (cnt > 63)
cnt = 63;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
if (strncmp(buf, "NO_", 3) == 0) {
neg = 1;
cmp += 3;
}
for (i = 0; sched_feat_names[i]; i++) {
int len = strlen(sched_feat_names[i]);
if (strncmp(cmp, sched_feat_names[i], len) == 0) {
if (neg)
sysctl_sched_features &= ~(1UL << i);
else
sysctl_sched_features |= (1UL << i);
break;
}
}
if (!sched_feat_names[i])
return -EINVAL;
*ppos += cnt;
return cnt;
}
static int sched_feat_open(struct inode *inode, struct file *filp)
{
return single_open(filp, sched_feat_show, NULL);
}
static const struct file_operations sched_feat_fops = {
.open = sched_feat_open,
.write = sched_feat_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static __init int sched_init_debug(void)
{
debugfs_create_file("sched_features", 0644, NULL, NULL,
&sched_feat_fops);
return 0;
}
late_initcall(sched_init_debug);
#endif
#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
/*
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
const_debug unsigned int sysctl_sched_nr_migrate = 32;
/*
* ratelimit for updating the group shares.
* default: 0.25ms
*/
unsigned int sysctl_sched_shares_ratelimit = 250000;
unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
/*
* Inject some fuzzyness into changing the per-cpu group shares
* this avoids remote rq-locks at the expense of fairness.
* default: 4
*/
unsigned int sysctl_sched_shares_thresh = 4;
/*
* period over which we average the RT time consumption, measured
* in ms.
*
* default: 1s
*/
const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
/*
* period over which we measure -rt task cpu usage in us.
* default: 1s
*/
unsigned int sysctl_sched_rt_period = 1000000;
static __read_mostly int scheduler_running;
/*
* part of the period that we allow rt tasks to run in us.
* default: 0.95s
*/
int sysctl_sched_rt_runtime = 950000;
static inline u64 global_rt_period(void)
{
return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
}
static inline u64 global_rt_runtime(void)
{
if (sysctl_sched_rt_runtime < 0)
return RUNTIME_INF;
return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
}
#ifndef prepare_arch_switch
# define prepare_arch_switch(next) do { } while (0)
#endif
#ifndef finish_arch_switch
# define finish_arch_switch(prev) do { } while (0)
#endif
static inline int task_current(struct rq *rq, struct task_struct *p)
{
return rq->curr == p;
}
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline int task_running(struct rq *rq, struct task_struct *p)
{
return task_current(rq, p);
}
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{
}
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
rq->lock.owner = current;
#endif
/*
* If we are tracking spinlock dependencies then we have to
* fix up the runqueue lock - which gets 'carried over' from
* prev into current:
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
raw_spin_unlock_irq(&rq->lock);
}
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
static inline int task_running(struct rq *rq, struct task_struct *p)
{
#ifdef CONFIG_SMP
return p->oncpu;
#else
return task_current(rq, p);
#endif
}
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
{
#ifdef CONFIG_SMP
/*
* We can optimise this out completely for !SMP, because the
* SMP rebalancing from interrupt is the only thing that cares
* here.
*/
next->oncpu = 1;
#endif
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
raw_spin_unlock_irq(&rq->lock);
#else
raw_spin_unlock(&rq->lock);
#endif
}
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{
#ifdef CONFIG_SMP
/*
* After ->oncpu is cleared, the task can be moved to a different CPU.
* We must ensure this doesn't happen until the switch is completely
* finished.
*/
smp_wmb();
prev->oncpu = 0;
#endif
#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
local_irq_enable();
#endif
}
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
/*
* Check whether the task is waking, we use this to synchronize ->cpus_allowed
* against ttwu().
*/
static inline int task_is_waking(struct task_struct *p)
{
return unlikely(p->state == TASK_WAKING);
}
/*
* __task_rq_lock - lock the runqueue a given task resides on.
* Must be called interrupts disabled.
*/
static inline struct rq *__task_rq_lock(struct task_struct *p)
__acquires(rq->lock)
{
struct rq *rq;
for (;;) {
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
raw_spin_unlock(&rq->lock);
}
}
/*
* task_rq_lock - lock the runqueue a given task resides on and disable
* interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
*/
static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
__acquires(rq->lock)
{
struct rq *rq;
for (;;) {
local_irq_save(*flags);
rq = task_rq(p);
raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
raw_spin_unlock_irqrestore(&rq->lock, *flags);
}
}
static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
raw_spin_unlock(&rq->lock);
}
static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
__releases(rq->lock)
{
raw_spin_unlock_irqrestore(&rq->lock, *flags);
}
/*
* this_rq_lock - lock this runqueue and disable interrupts.
*/
static struct rq *this_rq_lock(void)
__acquires(rq->lock)
{
struct rq *rq;
local_irq_disable();
rq = this_rq();
raw_spin_lock(&rq->lock);
return rq;
}
#ifdef CONFIG_SCHED_HRTICK
/*
* Use HR-timers to deliver accurate preemption points.
*
* Its all a bit involved since we cannot program an hrt while holding the
* rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
* reschedule event.
*
* When we get rescheduled we reprogram the hrtick_timer outside of the
* rq->lock.
*/
/*
* Use hrtick when:
* - enabled by features
* - hrtimer is actually high res
*/
static inline int hrtick_enabled(struct rq *rq)
{
if (!sched_feat(HRTICK))
return 0;
if (!cpu_active(cpu_of(rq)))
return 0;
return hrtimer_is_hres_active(&rq->hrtick_timer);
}
static void hrtick_clear(struct rq *rq)
{
if (hrtimer_active(&rq->hrtick_timer))
hrtimer_cancel(&rq->hrtick_timer);
}
/*
* High-resolution timer tick.
* Runs from hardirq context with interrupts disabled.
*/
static enum hrtimer_restart hrtick(struct hrtimer *timer)
{
struct rq *rq = container_of(timer, struct rq, hrtick_timer);
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
raw_spin_unlock(&rq->lock);
return HRTIMER_NORESTART;
}
#ifdef CONFIG_SMP
/*
* called from hardirq (IPI) context
*/
static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
raw_spin_lock(&rq->lock);
hrtimer_restart(&rq->hrtick_timer);
rq->hrtick_csd_pending = 0;
raw_spin_unlock(&rq->lock);
}
/*
* Called to set the hrtick timer state.
*
* called with rq->lock held and irqs disabled
*/
static void hrtick_start(struct rq *rq, u64 delay)
{
struct hrtimer *timer = &rq->hrtick_timer;
ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
hrtimer_set_expires(timer, time);
if (rq == this_rq()) {
hrtimer_restart(timer);
} else if (!rq->hrtick_csd_pending) {
__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
rq->hrtick_csd_pending = 1;
}
}
static int
hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int cpu = (int)(long)hcpu;
switch (action) {
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
hrtick_clear(cpu_rq(cpu));
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
static __init void init_hrtick(void)
{
hotcpu_notifier(hotplug_hrtick, 0);
}
#else
/*
* Called to set the hrtick timer state.
*
* called with rq->lock held and irqs disabled
*/
static void hrtick_start(struct rq *rq, u64 delay)
{
__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
HRTIMER_MODE_REL_PINNED, 0);
}
static inline void init_hrtick(void)
{
}
#endif /* CONFIG_SMP */
static void init_rq_hrtick(struct rq *rq)
{
#ifdef CONFIG_SMP
rq->hrtick_csd_pending = 0;
rq->hrtick_csd.flags = 0;
rq->hrtick_csd.func = __hrtick_start;
rq->hrtick_csd.info = rq;
#endif
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
{
}
static inline void init_rq_hrtick(struct rq *rq)
{
}
static inline void init_hrtick(void)
{
}
#endif /* CONFIG_SCHED_HRTICK */
/*
* resched_task - mark a task 'to be rescheduled now'.
*
* On UP this means the setting of the need_resched flag, on SMP it
* might also involve a cross-CPU call to trigger the scheduler on
* the target CPU.
*/
#ifdef CONFIG_SMP
#ifndef tsk_is_polling
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
#endif
static void resched_task(struct task_struct *p)
{
int cpu;
assert_raw_spin_locked(&task_rq(p)->lock);
if (test_tsk_need_resched(p))
return;
set_tsk_need_resched(p);
cpu = task_cpu(p);
if (cpu == smp_processor_id())
return;
/* NEED_RESCHED must be visible before we test polling */
smp_mb();
if (!tsk_is_polling(p))
smp_send_reschedule(cpu);
}
static void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
if (!raw_spin_trylock_irqsave(&rq->lock, flags))
return;
resched_task(cpu_curr(cpu));
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
#ifdef CONFIG_NO_HZ
/*
* When add_timer_on() enqueues a timer into the timer wheel of an
* idle CPU then this timer might expire before the next timer event
* which is scheduled to wake up that CPU. In case of a completely
* idle system the next event might even be infinite time into the
* future. wake_up_idle_cpu() ensures that the CPU is woken up and
* leaves the inner idle loop so the newly added timer is taken into
* account when the CPU goes back to idle and evaluates the timer
* wheel for the next timer event.
*/
void wake_up_idle_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
if (cpu == smp_processor_id())
return;
/*
* This is safe, as this function is called with the timer
* wheel base lock of (cpu) held. When the CPU is on the way
* to idle and has not yet set rq->curr to idle then it will
* be serialized on the timer wheel base lock and take the new
* timer into account automatically.
*/
if (rq->curr != rq->idle)
return;
/*
* We can set TIF_RESCHED on the idle task of the other CPU
* lockless. The worst case is that the other CPU runs the
* idle task through an additional NOOP schedule()
*/
set_tsk_need_resched(rq->idle);
/* NEED_RESCHED must be visible before we test polling */
smp_mb();
if (!tsk_is_polling(rq->idle))
smp_send_reschedule(cpu);
}
#endif /* CONFIG_NO_HZ */
static u64 sched_avg_period(void)
{
return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
}
static void sched_avg_update(struct rq *rq)
{
s64 period = sched_avg_period();
while ((s64)(rq->clock - rq->age_stamp) > period) {
/*
* Inline assembly required to prevent the compiler
* optimising this loop into a divmod call.
* See __iter_div_u64_rem() for another example of this.
*/
asm("" : "+rm" (rq->age_stamp));
rq->age_stamp += period;
rq->rt_avg /= 2;
}
}
static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
rq->rt_avg += rt_delta;
sched_avg_update(rq);
}
#else /* !CONFIG_SMP */
static void resched_task(struct task_struct *p)
{
assert_raw_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
}
#endif /* CONFIG_SMP */
#if BITS_PER_LONG == 32
# define WMULT_CONST (~0UL)
#else
# define WMULT_CONST (1UL << 32)
#endif
#define WMULT_SHIFT 32
/*
* Shift right and round:
*/
#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
/*
* delta *= weight / lw
*/
static unsigned long
calc_delta_mine(unsigned long delta_exec, unsigned long weight,
struct load_weight *lw)
{
u64 tmp;
if (!lw->inv_weight) {
if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
lw->inv_weight = 1;
else
lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
/ (lw->weight+1);
}
tmp = (u64)delta_exec * weight;
/*
* Check whether we'd overflow the 64-bit multiplication:
*/
if (unlikely(tmp > WMULT_CONST))
tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
WMULT_SHIFT/2);
else
tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
}
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
lw->inv_weight = 0;
}
static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
{
lw->weight -= dec;
lw->inv_weight = 0;
}
/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
* of tasks with abnormal "nice" values across CPUs the contribution that
* each task makes to its run queue's load is weighted according to its
* scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
* scaled version of the new time slice allocation that they receive on time
* slice expiry etc.
*/
#define WEIGHT_IDLEPRIO 3
#define WMULT_IDLEPRIO 1431655765
/*
* Nice levels are multiplicative, with a gentle 10% change for every
* nice level changed. I.e. when a CPU-bound task goes from nice 0 to
* nice 1, it will get ~10% less CPU time than another CPU-bound task
* that remained on nice 0.
*
* The "10% effect" is relative and cumulative: from _any_ nice level,
* if you go up 1 level, it's -10% CPU usage, if you go down 1 level
* it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
* If a task goes up by ~10% and another task goes down by ~10% then
* the relative distance between them is ~25%.)
*/
static const int prio_to_weight[40] = {
/* -20 */ 88761, 71755, 56483, 46273, 36291,
/* -15 */ 29154, 23254, 18705, 14949, 11916,
/* -10 */ 9548, 7620, 6100, 4904, 3906,
/* -5 */ 3121, 2501, 1991, 1586, 1277,
/* 0 */ 1024, 820, 655, 526, 423,
/* 5 */ 335, 272, 215, 172, 137,
/* 10 */ 110, 87, 70, 56, 45,
/* 15 */ 36, 29, 23, 18, 15,
};
/*
* Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
*
* In cases where the weight does not change often, we can use the
* precalculated inverse to speed up arithmetics by turning divisions
* into multiplications:
*/
static const u32 prio_to_wmult[40] = {
/* -20 */ 48388, 59856, 76040, 92818, 118348,
/* -15 */ 147320, 184698, 229616, 287308, 360437,
/* -10 */ 449829, 563644, 704093, 875809, 1099582,
/* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
/* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
/* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
/* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
};
/* Time spent by the tasks of the cpu accounting group executing in ... */
enum cpuacct_stat_index {
CPUACCT_STAT_USER, /* ... user mode */
CPUACCT_STAT_SYSTEM, /* ... kernel mode */
CPUACCT_STAT_NSTATS,
};
#ifdef CONFIG_CGROUP_CPUACCT
static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
static void cpuacct_update_stats(struct task_struct *tsk,
enum cpuacct_stat_index idx, cputime_t val);
#else
static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
static inline void cpuacct_update_stats(struct task_struct *tsk,
enum cpuacct_stat_index idx, cputime_t val) {}
#endif
static inline void inc_cpu_load(struct rq *rq, unsigned long load)
{
update_load_add(&rq->load, load);
}
static inline void dec_cpu_load(struct rq *rq, unsigned long load)
{
update_load_sub(&rq->load, load);
}
#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
typedef int (*tg_visitor)(struct task_group *, void *);
/*
* Iterate the full tree, calling @down when first entering a node and @up when
* leaving it for the final time.
*/
static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
{
struct task_group *parent, *child;
int ret;
rcu_read_lock();
parent = &root_task_group;
down:
ret = (*down)(parent, data);
if (ret)
goto out_unlock;
list_for_each_entry_rcu(child, &parent->children, siblings) {
parent = child;
goto down;
up:
continue;
}
ret = (*up)(parent, data);
if (ret)
goto out_unlock;
child = parent;
parent = parent->parent;
if (parent)
goto up;
out_unlock:
rcu_read_unlock();
return ret;
}
static int tg_nop(struct task_group *tg, void *data)
{
return 0;
}
#endif
#ifdef CONFIG_SMP
/* Used instead of source_load when we know the type == 0 */
static unsigned long weighted_cpuload(const int cpu)
{
return cpu_rq(cpu)->load.weight;
}
/*
* Return a low guess at the load of a migration-source cpu weighted
* according to the scheduling class and "nice" value.
*
* We want to under-estimate the load of migration sources, to
* balance conservatively.
*/
static unsigned long source_load(int cpu, int type)
{
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
if (type == 0 || !sched_feat(LB_BIAS))
return total;
return min(rq->cpu_load[type-1], total);
}
/*
* Return a high guess at the load of a migration-target cpu weighted
* according to the scheduling class and "nice" value.
*/
static unsigned long target_load(int cpu, int type)
{
struct rq *rq = cpu_rq(cpu);
unsigned long total = weighted_cpuload(cpu);
if (type == 0 || !sched_feat(LB_BIAS))
return total;
return max(rq->cpu_load[type-1], total);
}
static unsigned long power_of(int cpu)
{
return cpu_rq(cpu)->cpu_power;
}
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
if (nr_running)
rq->avg_load_per_task = rq->load.weight / nr_running;
else
rq->avg_load_per_task = 0;
return rq->avg_load_per_task;
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static __read_mostly unsigned long __percpu *update_shares_data;
static void __set_se_shares(struct sched_entity *se, unsigned long shares);
/*
* Calculate and set the cpu's group shares.
*/
static void update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long sd_shares,
unsigned long sd_rq_weight,
unsigned long *usd_rq_weight)
{
unsigned long shares, rq_weight;
int boost = 0;
rq_weight = usd_rq_weight[cpu];
if (!rq_weight) {
boost = 1;
rq_weight = NICE_0_LOAD;
}
/*
* \Sum_j shares_j * rq_weight_i
* shares_i = -----------------------------
* \Sum_j rq_weight_j
*/
shares = (sd_shares * rq_weight) / sd_rq_weight;
shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
if (abs(shares - tg->se[cpu]->load.weight) >
sysctl_sched_shares_thresh) {
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
__set_se_shares(tg->se[cpu], shares);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
}
/*
* Re-compute the task group their per cpu shares over the given domain.
* This needs to be done in a bottom-up fashion because the rq weight of a
* parent group depends on the shares of its child groups.
*/
static int tg_shares_up(struct task_group *tg, void *data)
{
unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
unsigned long *usd_rq_weight;
struct sched_domain *sd = data;
unsigned long flags;
int i;
if (!tg->se[0])
return 0;
local_irq_save(flags);
usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
for_each_cpu(i, sched_domain_span(sd)) {
weight = tg->cfs_rq[i]->load.weight;
usd_rq_weight[i] = weight;
rq_weight += weight;
/*
* If there are currently no tasks on the cpu pretend there
* is one of average load so that when a new task gets to
* run here it will not get delayed by group starvation.
*/
if (!weight)
weight = NICE_0_LOAD;
sum_weight += weight;
shares += tg->cfs_rq[i]->shares;
}
if (!rq_weight)
rq_weight = sum_weight;
if ((!shares && rq_weight) || shares > tg->shares)
shares = tg->shares;
if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
shares = tg->shares;
for_each_cpu(i, sched_domain_span(sd))
update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
local_irq_restore(flags);
return 0;
}
/*
* Compute the cpu's hierarchical load factor for each task group.
* This needs to be done in a top-down fashion because the load of a child
* group is a fraction of its parents load.
*/
static int tg_load_down(struct task_group *tg, void *data)
{
unsigned long load;
long cpu = (long)data;
if (!tg->parent) {
load = cpu_rq(cpu)->load.weight;
} else {
load = tg->parent->cfs_rq[cpu]->h_load;
load *= tg->cfs_rq[cpu]->shares;
load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
}
tg->cfs_rq[cpu]->h_load = load;
return 0;
}
static void update_shares(struct sched_domain *sd)
{
s64 elapsed;
u64 now;
if (root_task_group_empty())
return;
now = cpu_clock(raw_smp_processor_id());
elapsed = now - sd->last_update;
if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
sd->last_update = now;
walk_tg_tree(tg_nop, tg_shares_up, sd);
}
}
static void update_h_load(long cpu)
{
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
}
#else
static inline void update_shares(struct sched_domain *sd)
{
}
#endif
#ifdef CONFIG_PREEMPT
static void double_rq_lock(struct rq *rq1, struct rq *rq2);
/*
* fair double_lock_balance: Safely acquires both rq->locks in a fair
* way at the expense of forcing extra atomic operations in all
* invocations. This assures that the double_lock is acquired using the
* same underlying policy as the spinlock_t on this architecture, which
* reduces latency compared to the unfair variant below. However, it
* also adds more overhead and therefore may reduce throughput.
*/
static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__releases(this_rq->lock)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
raw_spin_unlock(&this_rq->lock);
double_rq_lock(this_rq, busiest);
return 1;
}
#else
/*
* Unfair double_lock_balance: Optimizes throughput at the expense of
* latency by eliminating extra atomic operations when the locks are
* already in proper order on entry. This favors lower cpu-ids and will
* grant the double lock to lower cpus over higher ids under contention,
* regardless of entry order into the function.
*/
static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__releases(this_rq->lock)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
int ret = 0;
if (unlikely(!raw_spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
raw_spin_unlock(&this_rq->lock);
raw_spin_lock(&busiest->lock);
raw_spin_lock_nested(&this_rq->lock,
SINGLE_DEPTH_NESTING);
ret = 1;
} else
raw_spin_lock_nested(&busiest->lock,
SINGLE_DEPTH_NESTING);
}
return ret;
}
#endif /* CONFIG_PREEMPT */
/*
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
*/
static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
raw_spin_unlock(&this_rq->lock);
BUG_ON(1);
}
return _double_lock_balance(this_rq, busiest);
}
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
raw_spin_unlock(&busiest->lock);
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
}
/*
* double_rq_lock - safely lock two runqueues
*
* Note this does not disable interrupts like task_rq_lock,
* you need to do so manually before calling.
*/
static void double_rq_lock(struct rq *rq1, struct rq *rq2)
__acquires(rq1->lock)
__acquires(rq2->lock)
{
BUG_ON(!irqs_disabled());
if (rq1 == rq2) {
raw_spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
if (rq1 < rq2) {
raw_spin_lock(&rq1->lock);
raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
} else {
raw_spin_lock(&rq2->lock);
raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
}
}
}
/*
* double_rq_unlock - safely unlock two runqueues
*
* Note this does not restore interrupts like task_rq_unlock,
* you need to do so manually after calling.
*/
static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__releases(rq1->lock)
__releases(rq2->lock)
{
raw_spin_unlock(&rq1->lock);
if (rq1 != rq2)
raw_spin_unlock(&rq2->lock);
else
__release(rq2->lock);
}
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
{
#ifdef CONFIG_SMP
cfs_rq->shares = shares;
#endif
}
#endif
static void calc_load_account_idle(struct rq *this_rq);
static void update_sysctl(void);
static int get_update_sysctl_factor(void);
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
{
set_task_rq(p, cpu);
#ifdef CONFIG_SMP
/*
* After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
* successfuly executed on another CPU. We must ensure that updates of
* per-task data have been completed by this moment.
*/
smp_wmb();
task_thread_info(p)->cpu = cpu;
#endif
}
static const struct sched_class rt_sched_class;
#define sched_class_highest (&rt_sched_class)
#define for_each_class(class) \
for (class = sched_class_highest; class; class = class->next)
#include "sched_stats.h"
static void inc_nr_running(struct rq *rq)
{
rq->nr_running++;
}
static void dec_nr_running(struct rq *rq)
{
rq->nr_running--;
}
static void set_load_weight(struct task_struct *p)
{
if (task_has_rt_policy(p)) {
p->se.load.weight = 0;
p->se.load.inv_weight = WMULT_CONST;
return;
}
/*
* SCHED_IDLE tasks get minimal weight:
*/
if (p->policy == SCHED_IDLE) {
p->se.load.weight = WEIGHT_IDLEPRIO;
p->se.load.inv_weight = WMULT_IDLEPRIO;
return;
}
p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
}
static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
{
update_rq_clock(rq);
sched_info_queued(p);
p->sched_class->enqueue_task(rq, p, flags);
p->se.on_rq = 1;
}
static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
update_rq_clock(rq);
sched_info_dequeued(p);
p->sched_class->dequeue_task(rq, p, flags);
p->se.on_rq = 0;
}
/*
* activate_task - move a task to the runqueue.
*/
static void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
enqueue_task(rq, p, flags);
inc_nr_running(rq);
}
/*
* deactivate_task - remove a task from the runqueue.
*/
static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_contributes_to_load(p))
rq->nr_uninterruptible++;
dequeue_task(rq, p, flags);
dec_nr_running(rq);
}
#include "sched_idletask.c"
#include "sched_fair.c"
#include "sched_rt.c"
#ifdef CONFIG_SCHED_DEBUG
# include "sched_debug.c"
#endif
/*
* __normal_prio - return the priority that is based on the static prio
*/
static inline int __normal_prio(struct task_struct *p)
{
return p->static_prio;
}
/*
* Calculate the expected normal priority: i.e. priority
* without taking RT-inheritance into account. Might be
* boosted by interactivity modifiers. Changes upon fork,
* setprio syscalls, and whenever the interactivity
* estimator recalculates.
*/
static inline int normal_prio(struct task_struct *p)
{
int prio;
if (task_has_rt_policy(p))
prio = MAX_RT_PRIO-1 - p->rt_priority;
else
prio = __normal_prio(p);
return prio;
}
/*
* Calculate the current priority, i.e. the priority
* taken into account by the scheduler. This value might
* be boosted by RT tasks, or might be boosted by
* interactivity modifiers. Will be RT if the task got
* RT-boosted. If not then it returns p->normal_prio.
*/
static int effective_prio(struct task_struct *p)
{
p->normal_prio = normal_prio(p);
/*
* If we are RT tasks or we were boosted to RT priority,
* keep the priority unchanged. Otherwise, update priority
* to the normal priority:
*/
if (!rt_prio(p->prio))
return p->normal_prio;
return p->prio;
}
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
*/
inline int task_curr(const struct task_struct *p)
{
return cpu_curr(task_cpu(p)) == p;
}
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
const struct sched_class *prev_class,
int oldprio, int running)
{
if (prev_class != p->sched_class) {
if (prev_class->switched_from)
prev_class->switched_from(rq, p, running);
p->sched_class->switched_to(rq, p, running);
} else
p->sched_class->prio_changed(rq, p, oldprio, running);
}
#ifdef CONFIG_SMP
/*
* Is this task likely cache-hot:
*/
static int
task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
{
s64 delta;
if (p->sched_class != &fair_sched_class)
return 0;
/*
* Buddy candidates are cache hot:
*/
if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
(&p->se == cfs_rq_of(&p->se)->next ||
&p->se == cfs_rq_of(&p->se)->last))
return 1;
if (sysctl_sched_migration_cost == -1)
return 1;
if (sysctl_sched_migration_cost == 0)
return 0;
delta = now - p->se.exec_start;
return delta < (s64)sysctl_sched_migration_cost;
}
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
#ifdef CONFIG_SCHED_DEBUG
/*
* We should never call set_task_cpu() on a blocked task,
* ttwu() will sort out the placement.
*/
WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
!(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
#endif
trace_sched_migrate_task(p, new_cpu);
if (task_cpu(p) != new_cpu) {
p->se.nr_migrations++;
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
}
__set_task_cpu(p, new_cpu);
}
struct migration_arg {
struct task_struct *task;
int dest_cpu;
};
static int migration_cpu_stop(void *data);
/*
* The task's runqueue lock must be held.
* Returns true if you have to wait for migration thread.
*/
static bool migrate_task(struct task_struct *p, int dest_cpu)
{
struct rq *rq = task_rq(p);
/*
* If the task is not on a runqueue (and not running), then
* the next wake-up will properly place the task.
*/
return p->se.on_rq || task_running(rq, p);
}
/*
* wait_task_inactive - wait for a thread to unschedule.
*
* If @match_state is nonzero, it's the @p->state value just checked and
* not expected to change. If it changes, i.e. @p might have woken up,
* then return zero. When we succeed in waiting for @p to be off its CPU,
* we return a positive number (its total switch count). If a second call
* a short while later returns the same number, the caller can be sure that
* @p has remained unscheduled the whole time.
*
* The caller must ensure that the task *will* unschedule sometime soon,
* else this function might spin for a *long* time. This function can't
* be called with interrupts off, or it may introduce deadlock with
* smp_call_function() if an IPI is sent by the same process we are
* waiting to become inactive.
*/
unsigned long wait_task_inactive(struct task_struct *p, long match_state)
{
unsigned long flags;
int running, on_rq;
unsigned long ncsw;
struct rq *rq;
for (;;) {
/*
* We do the initial early heuristics without holding
* any task-queue locks at all. We'll only try to get
* the runqueue lock when things look like they will
* work out!
*/
rq = task_rq(p);
/*
* If the task is actively running on another CPU
* still, just relax and busy-wait without holding
* any locks.
*
* NOTE! Since we don't hold any locks, it's not
* even sure that "rq" stays as the right runqueue!
* But we don't care, since "task_running()" will
* return false if the runqueue has changed and p
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
if (match_state && unlikely(p->state != match_state))
return 0;
cpu_relax();
}
/*
* Ok, time to look more closely! We need the rq
* lock now, to be *sure*. If we're wrong, we'll
* just go back and repeat.
*/
rq = task_rq_lock(p, &flags);
trace_sched_wait_task(p);
running = task_running(rq, p);
on_rq = p->se.on_rq;
ncsw = 0;
if (!match_state || p->state == match_state)
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, &flags);
/*
* If it changed from the expected state, bail out now.
*/
if (unlikely(!ncsw))
break;
/*
* Was it really running after all now that we
* checked with the proper locks actually held?
*
* Oops. Go back and try again..
*/
if (unlikely(running)) {
cpu_relax();
continue;
}
/*
* It's not enough that it's not actively running,
* it must be off the runqueue _entirely_, and not
* preempted!
*
* So if it was still runnable (but just not actively
* running right now), it's preempted, and we should
* yield - it could be a while.
*/
if (unlikely(on_rq)) {
schedule_timeout_uninterruptible(1);
continue;
}
/*
* Ahh, all good. It wasn't running, and it wasn't
* runnable, which means that it will never become
* running in the future either. We're all done!
*/
break;
}
return ncsw;
}
/***
* kick_process - kick a running thread to enter/exit the kernel
* @p: the to-be-kicked thread
*
* Cause a process which is running on another CPU to enter
* kernel-mode, without any delay. (to get signals handled.)
*
* NOTE: this function doesnt have to take the runqueue lock,
* because all it wants to ensure is that the remote task enters
* the kernel. If the IPI races and the task has been migrated
* to another CPU then no harm is done and the purpose has been
* achieved as well.
*/
void kick_process(struct task_struct *p)
{
int cpu;
preempt_disable();
cpu = task_cpu(p);
if ((cpu != smp_processor_id()) && task_curr(p))
smp_send_reschedule(cpu);
preempt_enable();
}
EXPORT_SYMBOL_GPL(kick_process);
#endif /* CONFIG_SMP */
/**
* task_oncpu_function_call - call a function on the cpu on which a task runs
* @p: the task to evaluate
* @func: the function to be called
* @info: the function call argument
*
* Calls the function @func when the task is currently running. This might
* be on the current CPU, which just calls the function directly
*/
void task_oncpu_function_call(struct task_struct *p,
void (*func) (void *info), void *info)
{
int cpu;
preempt_disable();
cpu = task_cpu(p);
if (task_curr(p))
smp_call_function_single(cpu, func, info, 1);
preempt_enable();
}
#ifdef CONFIG_SMP
/*
* ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
*/
static int select_fallback_rq(int cpu, struct task_struct *p)
{
int dest_cpu;
const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
/* Look for allowed, online CPU in same node. */
for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
return dest_cpu;
/* Any allowed, online CPU? */
dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
if (dest_cpu < nr_cpu_ids)
return dest_cpu;
/* No more Mr. Nice Guy. */
if (unlikely(dest_cpu >= nr_cpu_ids)) {
dest_cpu = cpuset_cpus_allowed_fallback(p);
/*
* Don't tell them about moving exiting tasks or
* kernel threads (both mm NULL), since they never
* leave kernel.
*/
if (p->mm && printk_ratelimit()) {
printk(KERN_INFO "process %d (%s) no "
"longer affine to cpu%d\n",
task_pid_nr(p), p->comm, cpu);
}
}
return dest_cpu;
}
/*
* The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
*/
static inline
int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
{
int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
/*
* In order not to call set_task_cpu() on a blocking task we need
* to rely on ttwu() to place the task on a valid ->cpus_allowed
* cpu.
*
* Since this is common to all placement strategies, this lives here.
*
* [ this allows ->select_task() to simply return task_cpu(p) and
* not worry about this generic constraint ]
*/
if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
!cpu_online(cpu)))
cpu = select_fallback_rq(task_cpu(p), p);
return cpu;
}
static void update_avg(u64 *avg, u64 sample)
{
s64 diff = sample - *avg;
*avg += diff >> 3;
}
#endif
/***
* try_to_wake_up - wake up a thread
* @p: the to-be-woken-up thread
* @state: the mask of task states that can be woken
* @sync: do a synchronous wakeup?
*
* Put it on the run-queue if it's not already there. The "current"
* thread is always on the run-queue (except when the actual
* re-schedule is in progress), and as such you're allowed to do
* the simpler "current->state = TASK_RUNNING" to mark yourself
* runnable without the overhead of this.
*
* returns failure only if the task is already active.
*/
static int try_to_wake_up(struct task_struct *p, unsigned int state,
int wake_flags)
{
int cpu, orig_cpu, this_cpu, success = 0;
unsigned long flags;
unsigned long en_flags = ENQUEUE_WAKEUP;
struct rq *rq;
this_cpu = get_cpu();
smp_wmb();
rq = task_rq_lock(p, &flags);
if (!(p->state & state))
goto out;
if (p->se.on_rq)
goto out_running;
cpu = task_cpu(p);
orig_cpu = cpu;
#ifdef CONFIG_SMP
if (unlikely(task_running(rq, p)))
goto out_activate;
/*
* In order to handle concurrent wakeups and release the rq->lock
* we put the task in TASK_WAKING state.
*
* First fix up the nr_uninterruptible count:
*/
if (task_contributes_to_load(p)) {
if (likely(cpu_online(orig_cpu)))
rq->nr_uninterruptible--;
else
this_rq()->nr_uninterruptible--;
}
p->state = TASK_WAKING;
if (p->sched_class->task_waking) {
p->sched_class->task_waking(rq, p);
en_flags |= ENQUEUE_WAKING;
}
cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
if (cpu != orig_cpu)
set_task_cpu(p, cpu);
__task_rq_unlock(rq);
rq = cpu_rq(cpu);
raw_spin_lock(&rq->lock);
/*
* We migrated the task without holding either rq->lock, however
* since the task is not on the task list itself, nobody else
* will try and migrate the task, hence the rq should match the
* cpu we just moved it to.
*/
WARN_ON(task_cpu(p) != cpu);
WARN_ON(p->state != TASK_WAKING);
#ifdef CONFIG_SCHEDSTATS
schedstat_inc(rq, ttwu_count);
if (cpu == this_cpu)
schedstat_inc(rq, ttwu_local);
else {
struct sched_domain *sd;
for_each_domain(this_cpu, sd) {
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
schedstat_inc(sd, ttwu_wake_remote);
break;
}
}
}
#endif /* CONFIG_SCHEDSTATS */
out_activate:
#endif /* CONFIG_SMP */
schedstat_inc(p, se.statistics.nr_wakeups);
if (wake_flags & WF_SYNC)
schedstat_inc(p, se.statistics.nr_wakeups_sync);
if (orig_cpu != cpu)
schedstat_inc(p, se.statistics.nr_wakeups_migrate);
if (cpu == this_cpu)
schedstat_inc(p, se.statistics.nr_wakeups_local);
else
schedstat_inc(p, se.statistics.nr_wakeups_remote);
activate_task(rq, p, en_flags);
success = 1;
out_running:
trace_sched_wakeup(p, success);
check_preempt_curr(rq, p, wake_flags);
p->state = TASK_RUNNING;
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
if (unlikely(rq->idle_stamp)) {
u64 delta = rq->clock - rq->idle_stamp;
u64 max = 2*sysctl_sched_migration_cost;
if (delta > max)
rq->avg_idle = max;
else
update_avg(&rq->avg_idle, delta);
rq->idle_stamp = 0;
}
#endif
out:
task_rq_unlock(rq, &flags);
put_cpu();
return success;
}
/**
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
* Attempt to wake up the nominated process and move it to the set of runnable
* processes. Returns 1 if the process was woken up, 0 if it was already
* running.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
int wake_up_process(struct task_struct *p)
{
return try_to_wake_up(p, TASK_ALL, 0);
}
EXPORT_SYMBOL(wake_up_process);
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
}
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
*
* __sched_fork() is basic setup used by init_idle() too:
*/
static void __sched_fork(struct task_struct *p)
{
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif
INIT_LIST_HEAD(&p->rt.run_list);
p->se.on_rq = 0;
INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers);
#endif
}
/*
* fork()/clone()-time setup:
*/
void sched_fork(struct task_struct *p, int clone_flags)
{
int cpu = get_cpu();
__sched_fork(p);
/*
* We mark the process as running here. This guarantees that
* nobody will actually run it, and a signal or other external
* event cannot wake it up and insert it on the runqueue either.
*/
p->state = TASK_RUNNING;
/*
* Revert to default priority/policy on fork if requested.
*/
if (unlikely(p->sched_reset_on_fork)) {
if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
p->policy = SCHED_NORMAL;
p->normal_prio = p->static_prio;
}
if (PRIO_TO_NICE(p->static_prio) < 0) {
p->static_prio = NICE_TO_PRIO(0);
p->normal_prio = p->static_prio;
set_load_weight(p);
}
/*
* We don't need the reset flag anymore after the fork. It has
* fulfilled its duty:
*/
p->sched_reset_on_fork = 0;
}
/*
* Make sure we do not leak PI boosting priority to the child.
*/
p->prio = current->normal_prio;
if (!rt_prio(p->prio))
p->sched_class = &fair_sched_class;
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
/*
* The child is not yet in the pid-hash so no cgroup attach races,
* and the cgroup is pinned to this child due to cgroup_fork()
* is ran before sched_fork().
*
* Silence PROVE_RCU.
*/
rcu_read_lock();
set_task_cpu(p, cpu);
rcu_read_unlock();
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
p->oncpu = 0;
#endif
#ifdef CONFIG_PREEMPT
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
#endif
plist_node_init(&p->pushable_tasks, MAX_PRIO);
put_cpu();
}
/*
* wake_up_new_task - wake up a newly created task for the first time.
*
* This function will do some initial scheduler statistics housekeeping
* that must be done for every newly created context, then puts the task
* on the runqueue and wakes it.
*/
void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
{
unsigned long flags;
struct rq *rq;
int cpu __maybe_unused = get_cpu();
#ifdef CONFIG_SMP
rq = task_rq_lock(p, &flags);
p->state = TASK_WAKING;
/*
* Fork balancing, do it here and not earlier because:
* - cpus_allowed can change in the fork path
* - any previously selected cpu might disappear through hotplug
*
* We set TASK_WAKING so that select_task_rq() can drop rq->lock
* without people poking at ->cpus_allowed.
*/
cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
set_task_cpu(p, cpu);
p->state = TASK_RUNNING;
task_rq_unlock(rq, &flags);
#endif
rq = task_rq_lock(p, &flags);
activate_task(rq, p, 0);
trace_sched_wakeup_new(p, 1);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
#endif
task_rq_unlock(rq, &flags);
put_cpu();
}
#ifdef CONFIG_PREEMPT_NOTIFIERS
/**
* preempt_notifier_register - tell me when current is being preempted & rescheduled
* @notifier: notifier struct to register
*/
void preempt_notifier_register(struct preempt_notifier *notifier)
{
hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
}
EXPORT_SYMBOL_GPL(preempt_notifier_register);
/**
* preempt_notifier_unregister - no longer interested in preemption notifications
* @notifier: notifier struct to unregister
*
* This is safe to call from within a preemption notifier.
*/
void preempt_notifier_unregister(struct preempt_notifier *notifier)
{
hlist_del(¬ifier->link);
}
EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
struct preempt_notifier *notifier;
struct hlist_node *node;
hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
notifier->ops->sched_in(notifier, raw_smp_processor_id());
}
static void
fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next)
{
struct preempt_notifier *notifier;
struct hlist_node *node;
hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
notifier->ops->sched_out(notifier, next);
}
#else /* !CONFIG_PREEMPT_NOTIFIERS */
static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
}
static void
fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next)
{
}
#endif /* CONFIG_PREEMPT_NOTIFIERS */
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
* @prev: the current task that is being switched out
* @next: the task we are going to switch to.
*
* This is called with the rq lock held and interrupts off. It must
* be paired with a subsequent finish_task_switch after the context
* switch.
*
* prepare_task_switch sets up locking and calls architecture specific
* hooks.
*/
static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next);
prepare_arch_switch(next);
}
/**
* finish_task_switch - clean up after a task-switch
* @rq: runqueue associated with task-switch
* @prev: the thread we just switched away from.
*
* finish_task_switch must be called after the context switch, paired
* with a prepare_task_switch call before the context switch.
* finish_task_switch will reconcile locking set up by prepare_task_switch,
* and do any other architecture-specific cleanup actions.
*
* Note that we may have delayed dropping an mm in context_switch(). If
* so, we finish that here outside of the runqueue lock. (Doing it
* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
static void finish_task_switch(struct rq *rq, struct task_struct *prev)
__releases(rq->lock)
{
struct mm_struct *mm = rq->prev_mm;
long prev_state;
rq->prev_mm = NULL;
/*
* A task struct has one reference for the use as "current".
* If a task dies, then it sets TASK_DEAD in tsk->state and calls
* schedule one last time. The schedule call will never return, and
* the scheduled task must drop that reference.
* The test for TASK_DEAD must occur while the runqueue locks are
* still held, otherwise prev could be scheduled on another cpu, die
* there before we look at prev->state, and then the reference would
* be dropped twice.
* Manfred Spraul <[email protected]>
*/
prev_state = prev->state;
finish_arch_switch(prev);
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
local_irq_disable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
perf_event_task_sched_in(current);
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
local_irq_enable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
finish_lock_switch(rq, prev);
fire_sched_in_preempt_notifiers(current);
if (mm)
mmdrop(mm);
if (unlikely(prev_state == TASK_DEAD)) {
/*
* Remove function-return probe instances associated with this
* task and put them back on the free list.
*/
kprobe_flush_task(prev);
put_task_struct(prev);
}
}
#ifdef CONFIG_SMP
/* assumes rq->lock is held */
static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
{
if (prev->sched_class->pre_schedule)
prev->sched_class->pre_schedule(rq, prev);
}
/* rq->lock is NOT held, but preemption is disabled */
static inline void post_schedule(struct rq *rq)
{
if (rq->post_schedule) {
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->curr->sched_class->post_schedule)
rq->curr->sched_class->post_schedule(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
rq->post_schedule = 0;
}
}
#else
static inline void pre_schedule(struct rq *rq, struct task_struct *p)
{
}
static inline void post_schedule(struct rq *rq)
{
}
#endif
/**
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
*/
asmlinkage void schedule_tail(struct task_struct *prev)
__releases(rq->lock)
{
struct rq *rq = this_rq();
finish_task_switch(rq, prev);
/*
* FIXME: do we need to worry about rq being invalidated by the
* task_switch?
*/
post_schedule(rq);
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
/* In this case, finish_task_switch does not reenable preemption */
preempt_enable();
#endif
if (current->set_child_tid)
put_user(task_pid_vnr(current), current->set_child_tid);
}
/*
* context_switch - switch to the new MM and the new
* thread's register state.
*/
static inline void
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
struct mm_struct *mm, *oldmm;
#ifdef CONFIG_LGE_DVFS
int ds_cpu = smp_processor_id();
#endif // CONFIG_LGE_DVFS
prepare_task_switch(rq, prev, next);
trace_sched_switch(prev, next);
mm = next->mm;
oldmm = prev->active_mm;
/*
* For paravirt, this is coupled with an exit in switch_to to
* combine the page table reload and the switch backend into
* one hypercall.
*/
arch_start_context_switch(prev);
if (likely(!mm)) {
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next);
} else
switch_mm(oldmm, mm, next);
if (likely(!prev->mm)) {
prev->active_mm = NULL;
rq->prev_mm = oldmm;
}
/*
* Since the runqueue lock will be released by the next
* task (which is an invalid locking op but in the case
* of the scheduler it's an obvious special-case), so we
* do an early lockdep release here:
*/
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#endif
#ifdef CONFIG_LGE_DVFS
if(ds_control.on_dvs == 1)
{
ds_parameter.entry_type = DS_ENTRY_SWITCH_TO;
ds_parameter.prev_p = prev;
ds_parameter.next_p = next;
ld_do_dvs_suite(ds_cpu);
}
#endif // CONFIG_LGE_DVFS
/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev);
barrier();
/*
* this_rq must be evaluated again because prev may have moved
* CPUs since it called schedule(), thus the 'rq' on its stack
* frame will be invalid.
*/
finish_task_switch(this_rq(), prev);
}
/*
* nr_running, nr_uninterruptible and nr_context_switches:
*
* externally visible scheduler statistics: current number of runnable
* threads, current number of uninterruptible-sleeping threads, total
* number of context switches performed since bootup.
*/
unsigned long nr_running(void)
{
unsigned long i, sum = 0;
for_each_online_cpu(i)
sum += cpu_rq(i)->nr_running;
return sum;
}
unsigned long nr_uninterruptible(void)
{
unsigned long i, sum = 0;
for_each_possible_cpu(i)
sum += cpu_rq(i)->nr_uninterruptible;
/*
* Since we read the counters lockless, it might be slightly
* inaccurate. Do not allow it to go below zero though:
*/
if (unlikely((long)sum < 0))
sum = 0;
return sum;
}
unsigned long long nr_context_switches(void)
{
int i;
unsigned long long sum = 0;
for_each_possible_cpu(i)
sum += cpu_rq(i)->nr_switches;
return sum;
}
unsigned long nr_iowait(void)
{
unsigned long i, sum = 0;
for_each_possible_cpu(i)
sum += atomic_read(&cpu_rq(i)->nr_iowait);
return sum;
}
unsigned long nr_iowait_cpu(int cpu)
{
struct rq *this = cpu_rq(cpu);
return atomic_read(&this->nr_iowait);
}
unsigned long this_cpu_load(void)
{
struct rq *this = this_rq();
return this->cpu_load[0];
}
/* Variables and functions for calc_load */
static atomic_long_t calc_load_tasks;
static unsigned long calc_load_update;
unsigned long avenrun[3];
EXPORT_SYMBOL(avenrun);
static long calc_load_fold_active(struct rq *this_rq)
{
long nr_active, delta = 0;
nr_active = this_rq->nr_running;
nr_active += (long) this_rq->nr_uninterruptible;
if (nr_active != this_rq->calc_load_active) {
delta = nr_active - this_rq->calc_load_active;
this_rq->calc_load_active = nr_active;
}
return delta;
}
#ifdef CONFIG_NO_HZ
/*
* For NO_HZ we delay the active fold to the next LOAD_FREQ update.
*
* When making the ILB scale, we should try to pull this in as well.
*/
static atomic_long_t calc_load_tasks_idle;
static void calc_load_account_idle(struct rq *this_rq)
{
long delta;
delta = calc_load_fold_active(this_rq);
if (delta)
atomic_long_add(delta, &calc_load_tasks_idle);
}
static long calc_load_fold_idle(void)
{
long delta = 0;
/*
* Its got a race, we don't care...
*/
if (atomic_long_read(&calc_load_tasks_idle))
delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
return delta;
}
#else
static void calc_load_account_idle(struct rq *this_rq)
{
}
static inline long calc_load_fold_idle(void)
{
return 0;
}
#endif
/**
* get_avenrun - get the load average array
* @loads: pointer to dest load array
* @offset: offset to add
* @shift: shift count to shift the result left
*
* These values are estimates at best, so no need for locking.
*/
void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
{
loads[0] = (avenrun[0] + offset) << shift;
loads[1] = (avenrun[1] + offset) << shift;
loads[2] = (avenrun[2] + offset) << shift;
}
static unsigned long
calc_load(unsigned long load, unsigned long exp, unsigned long active)
{
load *= exp;
load += active * (FIXED_1 - exp);
return load >> FSHIFT;
}
/*
* calc_load - update the avenrun load estimates 10 ticks after the
* CPUs have updated calc_load_tasks.
*/
void calc_global_load(void)
{
unsigned long upd = calc_load_update + 10;
long active;
if (time_before(jiffies, upd))
return;
active = atomic_long_read(&calc_load_tasks);
active = active > 0 ? active * FIXED_1 : 0;
avenrun[0] = calc_load(avenrun[0], EXP_1, active);
avenrun[1] = calc_load(avenrun[1], EXP_5, active);
avenrun[2] = calc_load(avenrun[2], EXP_15, active);
calc_load_update += LOAD_FREQ;
}
/*
* Called from update_cpu_load() to periodically update this CPU's
* active count.
*/
static void calc_load_account_active(struct rq *this_rq)
{
long delta;
if (time_before(jiffies, this_rq->calc_load_update))
return;
delta = calc_load_fold_active(this_rq);
delta += calc_load_fold_idle();
if (delta)
atomic_long_add(delta, &calc_load_tasks);
this_rq->calc_load_update += LOAD_FREQ;
}
/*
* Update rq->cpu_load[] statistics. This function is usually called every
* scheduler tick (TICK_NSEC).
*/
static void update_cpu_load(struct rq *this_rq)
{
unsigned long this_load = this_rq->load.weight;
int i, scale;
this_rq->nr_load_updates++;
/* Update our load: */
for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
unsigned long old_load, new_load;
/* scale is effectively 1 << i now, and >> i divides by scale */
old_load = this_rq->cpu_load[i];
new_load = this_load;
/*
* Round up the averaging division if load is increasing. This
* prevents us from getting stuck on 9 if the load is 10, for
* example.
*/
if (new_load > old_load)
new_load += scale-1;
this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
}
calc_load_account_active(this_rq);
}
#ifdef CONFIG_SMP
/*
* sched_exec - execve() is a valuable balancing opportunity, because at
* this point the task has the smallest effective memory and cache footprint.
*/
void sched_exec(void)
{
struct task_struct *p = current;
unsigned long flags;
struct rq *rq;
int dest_cpu;
rq = task_rq_lock(p, &flags);
dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
if (dest_cpu == smp_processor_id())
goto unlock;
/*
* select_task_rq() can race against ->cpus_allowed
*/
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
likely(cpu_active(dest_cpu)) && migrate_task(p, dest_cpu)) {
struct migration_arg arg = { p, dest_cpu };
task_rq_unlock(rq, &flags);
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
return;
}
unlock:
task_rq_unlock(rq, &flags);
}
#endif
DEFINE_PER_CPU(struct kernel_stat, kstat);
EXPORT_PER_CPU_SYMBOL(kstat);
/*
* Return any ns on the sched_clock that have not yet been accounted in
* @p in case that task is currently running.
*
* Called with task_rq_lock() held on @rq.
*/
static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
{
u64 ns = 0;
if (task_current(rq, p)) {
update_rq_clock(rq);
ns = rq->clock - p->se.exec_start;
if ((s64)ns < 0)
ns = 0;
}
return ns;
}
unsigned long long task_delta_exec(struct task_struct *p)
{
unsigned long flags;
struct rq *rq;
u64 ns = 0;
rq = task_rq_lock(p, &flags);
ns = do_task_delta_exec(p, rq);
task_rq_unlock(rq, &flags);
return ns;
}
/*
* Return accounted runtime for the task.
* In case the task is currently running, return the runtime plus current's
* pending runtime that have not been accounted yet.
*/
unsigned long long task_sched_runtime(struct task_struct *p)
{
unsigned long flags;
struct rq *rq;
u64 ns = 0;
rq = task_rq_lock(p, &flags);
ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
task_rq_unlock(rq, &flags);
return ns;
}
/*
* Return sum_exec_runtime for the thread group.
* In case the task is currently running, return the sum plus current's
* pending runtime that have not been accounted yet.
*
* Note that the thread group might have other running tasks as well,
* so the return value not includes other pending runtime that other
* running tasks might have.
*/
unsigned long long thread_group_sched_runtime(struct task_struct *p)
{
struct task_cputime totals;
unsigned long flags;
struct rq *rq;
u64 ns;
rq = task_rq_lock(p, &flags);
thread_group_cputime(p, &totals);
ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
task_rq_unlock(rq, &flags);
return ns;
}
/*
* Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in user space since the last update
* @cputime_scaled: cputime scaled by cpu frequency
*/
void account_user_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t tmp;
/* Add user time to process. */
p->utime = cputime_add(p->utime, cputime);
p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
account_group_user_time(p, cputime);
/* Add user time to cpustat. */
tmp = cputime_to_cputime64(cputime);
if (TASK_NICE(p) > 0)
cpustat->nice = cputime64_add(cpustat->nice, tmp);
else
cpustat->user = cputime64_add(cpustat->user, tmp);
cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
/* Account for user time used */
acct_update_integrals(p);
}
/*
* Account guest cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in virtual machine since the last update
* @cputime_scaled: cputime scaled by cpu frequency
*/
static void account_guest_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled)
{
cputime64_t tmp;
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
tmp = cputime_to_cputime64(cputime);
/* Add guest time to process. */
p->utime = cputime_add(p->utime, cputime);
p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
account_group_user_time(p, cputime);
p->gtime = cputime_add(p->gtime, cputime);
/* Add guest time to cpustat. */
if (TASK_NICE(p) > 0) {
cpustat->nice = cputime64_add(cpustat->nice, tmp);
cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
} else {
cpustat->user = cputime64_add(cpustat->user, tmp);
cpustat->guest = cputime64_add(cpustat->guest, tmp);
}
}
/*
* Account system cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
* @cputime: the cpu time spent in kernel space since the last update
* @cputime_scaled: cputime scaled by cpu frequency
*/
void account_system_time(struct task_struct *p, int hardirq_offset,
cputime_t cputime, cputime_t cputime_scaled)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t tmp;
if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
account_guest_time(p, cputime, cputime_scaled);
return;
}
/* Add system time to process. */
p->stime = cputime_add(p->stime, cputime);
p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
account_group_system_time(p, cputime);
/* Add system time to cpustat. */
tmp = cputime_to_cputime64(cputime);
if (hardirq_count() - hardirq_offset)
cpustat->irq = cputime64_add(cpustat->irq, tmp);
else if (softirq_count())
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
else
cpustat->system = cputime64_add(cpustat->system, tmp);
cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
/* Account for system time used */
acct_update_integrals(p);
}
/*
* Account for involuntary wait time.
* @steal: the cpu time spent in involuntary wait
*/
void account_steal_time(cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t cputime64 = cputime_to_cputime64(cputime);
cpustat->steal = cputime64_add(cpustat->steal, cputime64);
}
/*
* Account for idle time.
* @cputime: the cpu time spent in idle wait
*/
void account_idle_time(cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t cputime64 = cputime_to_cputime64(cputime);
struct rq *rq = this_rq();
if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
else
cpustat->idle = cputime64_add(cpustat->idle, cputime64);
}
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
/*
* Account a single tick of cpu time.
* @p: the process that the cpu time gets accounted to
* @user_tick: indicates if the tick is a user or a system tick
*/
void account_process_tick(struct task_struct *p, int user_tick)
{
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
struct rq *rq = this_rq();
if (user_tick)
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
one_jiffy_scaled);
else
account_idle_time(cputime_one_jiffy);
}
/*
* Account multiple ticks of steal time.
* @p: the process from which the cpu time has been stolen
* @ticks: number of stolen ticks
*/
void account_steal_ticks(unsigned long ticks)
{
account_steal_time(jiffies_to_cputime(ticks));
}
/*
* Account multiple ticks of idle time.
* @ticks: number of stolen ticks
*/
void account_idle_ticks(unsigned long ticks)
{
account_idle_time(jiffies_to_cputime(ticks));
}
#endif
/*
* Use precise platform statistics if available:
*/
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
*ut = p->utime;
*st = p->stime;
}
void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
struct task_cputime cputime;
thread_group_cputime(p, &cputime);
*ut = cputime.utime;
*st = cputime.stime;
}
#else
#ifndef nsecs_to_cputime
# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
#endif
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
/*
* Use CFS's precise accounting:
*/
rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
if (total) {
u64 temp = rtime;
temp *= utime;
do_div(temp, total);
utime = (cputime_t)temp;
} else
utime = rtime;
/*
* Compare with previous values, to keep monotonicity:
*/
p->prev_utime = max(p->prev_utime, utime);
p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
*ut = p->prev_utime;
*st = p->prev_stime;
}
/*
* Must be called with siglock held.
*/
void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
struct signal_struct *sig = p->signal;
struct task_cputime cputime;
cputime_t rtime, utime, total;
thread_group_cputime(p, &cputime);
total = cputime_add(cputime.utime, cputime.stime);
rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
if (total) {
u64 temp = rtime;
temp *= cputime.utime;
do_div(temp, total);
utime = (cputime_t)temp;
} else
utime = rtime;
sig->prev_utime = max(sig->prev_utime, utime);
sig->prev_stime = max(sig->prev_stime,
cputime_sub(rtime, sig->prev_utime));
*ut = sig->prev_utime;
*st = sig->prev_stime;
}
#endif
/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
*
* It also gets called by the fork code, when changing the parent's
* timeslices.
*/
void scheduler_tick(void)
{
int cpu = smp_processor_id();
struct rq *rq = cpu_rq(cpu);
struct task_struct *curr = rq->curr;
#ifdef CONFIG_LGE_DVFS
if(ds_control.on_dvs == 1){
ds_update_time_counter(cpu);
ds_parameter.entry_type = DS_ENTRY_TIMER_IRQ;
ds_parameter.prev_p = current;
ds_parameter.next_p = current;
do_dvs_suite(cpu);
if(ds_control.flag_run_dvs == 1){
if(per_cpu(ds_cpu_status, cpu).target_cpu_op_index !=
per_cpu(ds_cpu_status, cpu).current_cpu_op_index)
{
queue_work_on(cpu, dvs_suite_wq, &dvs_suite_work);
}
}
}
#endif // CONFIG_LGE_DVFS
sched_clock_tick();
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
update_cpu_load(rq);
curr->sched_class->task_tick(rq, curr, 0);
raw_spin_unlock(&rq->lock);
perf_event_task_tick(curr);
#ifdef CONFIG_SMP
rq->idle_at_tick = idle_cpu(cpu);
trigger_load_balance(rq, cpu);
#endif
}
notrace unsigned long get_parent_ip(unsigned long addr)
{
if (in_lock_functions(addr)) {
addr = CALLER_ADDR2;
if (in_lock_functions(addr))
addr = CALLER_ADDR3;
}
return addr;
}
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_PREEMPT_TRACER))
void __kprobes add_preempt_count(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
*/
if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
return;
#endif
preempt_count() += val;
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Spinlock count overflowing soon?
*/
DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
PREEMPT_MASK - 10);
#endif
if (preempt_count() == val)
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
EXPORT_SYMBOL(add_preempt_count);
void __kprobes sub_preempt_count(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
*/
if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
return;
/*
* Is the spinlock portion underflowing?
*/
if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
!(preempt_count() & PREEMPT_MASK)))
return;
#endif
if (preempt_count() == val)
trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
preempt_count() -= val;
}
EXPORT_SYMBOL(sub_preempt_count);
#endif
/*
* Print scheduling while atomic bug:
*/
static noinline void __schedule_bug(struct task_struct *prev)
{
struct pt_regs *regs = get_irq_regs();
printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
prev->comm, prev->pid, preempt_count());
debug_show_held_locks(prev);
print_modules();
if (irqs_disabled())
print_irqtrace_events(prev);
if (regs)
show_regs(regs);
else
dump_stack();
}
/*
* Various schedule()-time debugging checks and statistics:
*/
static inline void schedule_debug(struct task_struct *prev)
{
/*
* Test if we are atomic. Since do_exit() needs to call into
* schedule() atomically, we ignore that path for now.
* Otherwise, whine if we are scheduling when we should not be.
*/
if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
__schedule_bug(prev);
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
schedstat_inc(this_rq(), sched_count);
#ifdef CONFIG_SCHEDSTATS
if (unlikely(prev->lock_depth >= 0)) {
schedstat_inc(this_rq(), bkl_count);
schedstat_inc(prev, sched_info.bkl_count);
}
#endif
}
static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
if (prev->se.on_rq)
update_rq_clock(rq);
rq->skip_clock_update = 0;
prev->sched_class->put_prev_task(rq, prev);
}
/*
* Pick up the highest-prio task:
*/
static inline struct task_struct *
pick_next_task(struct rq *rq)
{
const struct sched_class *class;
struct task_struct *p;
/*
* Optimization: we know that if all tasks are in
* the fair class we can call that function directly:
*/
if (likely(rq->nr_running == rq->cfs.nr_running)) {
p = fair_sched_class.pick_next_task(rq);
if (likely(p))
return p;
}
class = sched_class_highest;
for ( ; ; ) {
p = class->pick_next_task(rq);
if (p)
return p;
/*
* Will never be NULL as the idle class always
* returns a non-NULL p:
*/
class = class->next;
}
}
/*
* schedule() is the main scheduler function.
*/
asmlinkage void __sched schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
struct rq *rq;
int cpu;
#ifdef CONFIG_LGE_DVFS
int ds_cpu = smp_processor_id();
if(ds_control.on_dvs == 1)
{
per_cpu(ds_cpu_status, ds_cpu).cpu_mode = DS_CPU_MODE_SCHEDULE;
}
#endif // CONFIG_LGE_DVFS
need_resched:
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_note_context_switch(cpu);
prev = rq->curr;
switch_count = &prev->nivcsw;
release_kernel_lock(prev);
need_resched_nonpreemptible:
schedule_debug(prev);
if (sched_feat(HRTICK))
hrtick_clear(rq);
raw_spin_lock_irq(&rq->lock);
clear_tsk_need_resched(prev);
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev)))
prev->state = TASK_RUNNING;
else
deactivate_task(rq, prev, DEQUEUE_SLEEP);
switch_count = &prev->nvcsw;
}
pre_schedule(rq, prev);
if (unlikely(!rq->nr_running))
idle_balance(cpu, rq);
put_prev_task(rq, prev);
next = pick_next_task(rq);
if (likely(prev != next)) {
sched_info_switch(prev, next);
perf_event_task_sched_out(prev, next);
rq->nr_switches++;
rq->curr = next;
++*switch_count;
context_switch(rq, prev, next); /* unlocks the rq */
/*
* the context switch might have flipped the stack from under
* us, hence refresh the local variables.
*/
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
raw_spin_unlock_irq(&rq->lock);
post_schedule(rq);
if (unlikely(reacquire_kernel_lock(current) < 0)) {
prev = rq->curr;
switch_count = &prev->nivcsw;
goto need_resched_nonpreemptible;
}
preempt_enable_no_resched();
if (need_resched())
goto need_resched;
#ifdef CONFIG_LGE_DVFS
if(ds_control.on_dvs == 1)
{
if(next->pid == 0)
per_cpu(ds_cpu_status, ds_cpu).cpu_mode = DS_CPU_MODE_IDLE;
else
per_cpu(ds_cpu_status, ds_cpu).cpu_mode = DS_CPU_MODE_TASK;
}
#endif // CONFIG_LGE_DVFS
}
EXPORT_SYMBOL(schedule);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
* Look out! "owner" is an entirely speculative pointer
* access and not reliable.
*/
int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
{
unsigned int cpu;
struct rq *rq;
if (!sched_feat(OWNER_SPIN))
return 0;
#ifdef CONFIG_DEBUG_PAGEALLOC
/*
* Need to access the cpu field knowing that
* DEBUG_PAGEALLOC could have unmapped it if
* the mutex owner just released it and exited.
*/
if (probe_kernel_address(&owner->cpu, cpu))
return 0;
#else
cpu = owner->cpu;
#endif
/*
* Even if the access succeeded (likely case),
* the cpu field may no longer be valid.
*/
if (cpu >= nr_cpumask_bits)
return 0;
/*
* We need to validate that we can do a
* get_cpu() and that we have the percpu area.
*/
if (!cpu_online(cpu))
return 0;
rq = cpu_rq(cpu);
for (;;) {
/*
* Owner changed, break to re-assess state.
*/
if (lock->owner != owner) {
/*
* If the lock has switched to a different owner,
* we likely have heavy contention. Return 0 to quit
* optimistic spinning and not contend further:
*/
if (lock->owner)
return 0;
break;
}
/*
* Is that owner really running on that cpu?
*/
if (task_thread_info(rq->curr) != owner || need_resched())
return 0;
cpu_relax();
}
return 1;
}
#endif
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
* off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
asmlinkage void __sched preempt_schedule(void)
{
struct thread_info *ti = current_thread_info();
/*
* If there is a non-zero preempt_count or interrupts are disabled,
* we do not want to preempt the current task. Just return..
*/
if (likely(ti->preempt_count || irqs_disabled()))
return;
do {
add_preempt_count(PREEMPT_ACTIVE);
schedule();
sub_preempt_count(PREEMPT_ACTIVE);
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched());
}
EXPORT_SYMBOL(preempt_schedule);
/*
* this is the entry point to schedule() from kernel preemption
* off of irq context.
* Note, that this is called and return with irqs disabled. This will
* protect us against recursive calling from irq.
*/
asmlinkage void __sched preempt_schedule_irq(void)
{
struct thread_info *ti = current_thread_info();
/* Catch callers which need to be fixed */
BUG_ON(ti->preempt_count || !irqs_disabled());
do {
add_preempt_count(PREEMPT_ACTIVE);
local_irq_enable();
schedule();
local_irq_disable();
sub_preempt_count(PREEMPT_ACTIVE);
/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
barrier();
} while (need_resched());
}
#endif /* CONFIG_PREEMPT */
int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
void *key)
{
return try_to_wake_up(curr->private, mode, wake_flags);
}
EXPORT_SYMBOL(default_wake_function);
/*
* The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
* wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
* number) then we wake all the non-exclusive tasks and one exclusive task.
*
* There are circumstances in which we can try to wake a task which has already
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, int wake_flags, void *key)
{
wait_queue_t *curr, *next;
list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
unsigned flags = curr->flags;
if (curr->func(curr, mode, wake_flags, key) &&
(flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
break;
}
}
/**
* __wake_up - wake up threads blocked on a waitqueue.
* @q: the waitqueue
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
* @key: is directly passed to the wakeup function
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
void __wake_up(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, void *key)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
__wake_up_common(q, mode, nr_exclusive, 0, key);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(__wake_up);
/*
* Same as __wake_up but called with the spinlock in wait_queue_head_t held.
*/
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
{
__wake_up_common(q, mode, 1, 0, NULL);
}
EXPORT_SYMBOL_GPL(__wake_up_locked);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
{
__wake_up_common(q, mode, 1, 0, key);
}
/**
* __wake_up_sync_key - wake up threads blocked on a waitqueue.
* @q: the waitqueue
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
* @key: opaque value to be passed to wakeup targets
*
* The sync wakeup differs that the waker knows that it will schedule
* away soon, so while the target thread will be woken up, it will not
* be migrated to another CPU - ie. the two threads are 'synchronized'
* with each other. This can prevent needless bouncing between CPUs.
*
* On UP it can prevent extra preemption.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, void *key)
{
unsigned long flags;
int wake_flags = WF_SYNC;
if (unlikely(!q))
return;
if (unlikely(!nr_exclusive))
wake_flags = 0;
spin_lock_irqsave(&q->lock, flags);
__wake_up_common(q, mode, nr_exclusive, wake_flags, key);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL_GPL(__wake_up_sync_key);
/*
* __wake_up_sync - see __wake_up_sync_key()
*/
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
{
__wake_up_sync_key(q, mode, nr_exclusive, NULL);
}
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
/**
* complete: - signals a single thread waiting on this completion
* @x: holds the state of this particular completion
*
* This will wake up a single thread waiting on this completion. Threads will be
* awakened in the same order in which they were queued.
*
* See also complete_all(), wait_for_completion() and related routines.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
void complete(struct completion *x)
{
unsigned long flags;
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete);
/**
* complete_all: - signals all threads waiting on this completion
* @x: holds the state of this particular completion
*
* This will wake up all threads waiting on this particular completion event.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
*/
void complete_all(struct completion *x)
{
unsigned long flags;
spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
__wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete_all);
static inline long __sched
do_wait_for_common(struct completion *x, long timeout, int state)
{
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);
__add_wait_queue_tail_exclusive(&x->wait, &wait);
do {
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
break;
}
__set_current_state(state);
spin_unlock_irq(&x->wait.lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&x->wait.lock);
} while (!x->done && timeout);
__remove_wait_queue(&x->wait, &wait);
if (!x->done)
return timeout;
}
x->done--;
return timeout ?: 1;
}
static long __sched
wait_for_common(struct completion *x, long timeout, int state)
{
might_sleep();
spin_lock_irq(&x->wait.lock);
timeout = do_wait_for_common(x, timeout, state);
spin_unlock_irq(&x->wait.lock);
return timeout;
}
/**
* wait_for_completion: - waits for completion of a task
* @x: holds the state of this particular completion
*
* This waits to be signaled for completion of a specific task. It is NOT
* interruptible and there is no timeout.
*
* See also similar routines (i.e. wait_for_completion_timeout()) with timeout
* and interrupt capability. Also see complete().
*/
void __sched wait_for_completion(struct completion *x)
{
wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion);
/**
* wait_for_completion_timeout: - waits for completion of a task (w/timeout)
* @x: holds the state of this particular completion
* @timeout: timeout value in jiffies
*
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible.
*/
unsigned long __sched
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
{
//here unsigned long to signed long conversion may result in negative number, so clipping timeout at LONG_MAX [email protected]
if (timeout >= LONG_MAX)
{
timeout = LONG_MAX;
}
return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion_timeout);
/**
* wait_for_completion_interruptible: - waits for completion of a task (w/intr)
* @x: holds the state of this particular completion
*
* This waits for completion of a specific task to be signaled. It is
* interruptible.
*/
int __sched wait_for_completion_interruptible(struct completion *x)
{
long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
if (t == -ERESTARTSYS)
return t;
return 0;
}
EXPORT_SYMBOL(wait_for_completion_interruptible);
/**
* wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
* @x: holds the state of this particular completion
* @timeout: timeout value in jiffies
*
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. It is interruptible. The timeout is in jiffies.
*/
unsigned long __sched
wait_for_completion_interruptible_timeout(struct completion *x,
unsigned long timeout)
{
return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
/**
* wait_for_completion_killable: - waits for completion of a task (killable)
* @x: holds the state of this particular completion
*
* This waits to be signaled for completion of a specific task. It can be
* interrupted by a kill signal.
*/
int __sched wait_for_completion_killable(struct completion *x)
{
long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
if (t == -ERESTARTSYS)
return t;
return 0;
}
EXPORT_SYMBOL(wait_for_completion_killable);
/**
* wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
* @x: holds the state of this particular completion
* @timeout: timeout value in jiffies
*
* This waits for either a completion of a specific task to be
* signaled or for a specified timeout to expire. It can be
* interrupted by a kill signal. The timeout is in jiffies.
*/
unsigned long __sched
wait_for_completion_killable_timeout(struct completion *x,
unsigned long timeout)
{
return wait_for_common(x, timeout, TASK_KILLABLE);
}
EXPORT_SYMBOL(wait_for_completion_killable_timeout);
/**
* try_wait_for_completion - try to decrement a completion without blocking
* @x: completion structure
*
* Returns: 0 if a decrement cannot be done without blocking
* 1 if a decrement succeeded.
*
* If a completion is being used as a counting completion,
* attempt to decrement the counter without blocking. This
* enables us to avoid waiting if the resource the completion
* is protecting is not available.
*/
bool try_wait_for_completion(struct completion *x)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
else
x->done--;
spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
/**
* completion_done - Test to see if a completion has any waiters
* @x: completion structure
*
* Returns: 0 if there are waiters (wait_for_completion() in progress)
* 1 if there are no waiters.
*
*/
bool completion_done(struct completion *x)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(completion_done);
static long __sched
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{
unsigned long flags;
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
__set_current_state(state);
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue(q, &wait);
spin_unlock(&q->lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&q->lock);
__remove_wait_queue(q, &wait);
spin_unlock_irqrestore(&q->lock, flags);
return timeout;
}
void __sched interruptible_sleep_on(wait_queue_head_t *q)
{
sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(interruptible_sleep_on);
long __sched
interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
}
EXPORT_SYMBOL(interruptible_sleep_on_timeout);
void __sched sleep_on(wait_queue_head_t *q)
{
sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(sleep_on);
long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
}
EXPORT_SYMBOL(sleep_on_timeout);
#ifdef CONFIG_RT_MUTEXES
/*
* rt_mutex_setprio - set the current priority of a task
* @p: task
* @prio: prio value (kernel-internal form)
*
* This function changes the 'effective' priority of a task. It does
* not touch ->normal_prio like __setscheduler().
*
* Used by the rt_mutex code to implement priority inheritance logic.
*/
void rt_mutex_setprio(struct task_struct *p, int prio)
{
unsigned long flags;
int oldprio, on_rq, running;
struct rq *rq;
const struct sched_class *prev_class;
BUG_ON(prio < 0 || prio > MAX_PRIO);
rq = task_rq_lock(p, &flags);
oldprio = p->prio;
prev_class = p->sched_class;
on_rq = p->se.on_rq;
running = task_current(rq, p);
if (on_rq)
dequeue_task(rq, p, 0);
if (running)
p->sched_class->put_prev_task(rq, p);
if (rt_prio(prio))
p->sched_class = &rt_sched_class;
else
p->sched_class = &fair_sched_class;
p->prio = prio;
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq) {
enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
check_class_changed(rq, p, prev_class, oldprio, running);
}
task_rq_unlock(rq, &flags);
}
#endif
void set_user_nice(struct task_struct *p, long nice)
{
int old_prio, delta, on_rq;
unsigned long flags;
struct rq *rq;
if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
return;
/*
* We have to be careful, if called from sys_setpriority(),
* the task might be in the middle of scheduling on another CPU.
*/
rq = task_rq_lock(p, &flags);
/*
* The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected
* it wont have any effect on scheduling until the task is
* SCHED_FIFO/SCHED_RR:
*/
if (task_has_rt_policy(p)) {
p->static_prio = NICE_TO_PRIO(nice);
goto out_unlock;
}
on_rq = p->se.on_rq;
if (on_rq)
dequeue_task(rq, p, 0);
p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p);
old_prio = p->prio;
p->prio = effective_prio(p);
delta = p->prio - old_prio;
if (on_rq) {
enqueue_task(rq, p, 0);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
*/
if (delta < 0 || (delta > 0 && task_running(rq, p)))
resched_task(rq->curr);
}
out_unlock:
task_rq_unlock(rq, &flags);
}
EXPORT_SYMBOL(set_user_nice);
/*
* can_nice - check if a task can reduce its nice value
* @p: task
* @nice: nice value
*/
int can_nice(const struct task_struct *p, const int nice)
{
/* convert nice value [19,-20] to rlimit style value [1,40] */
int nice_rlim = 20 - nice;
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
capable(CAP_SYS_NICE));
}
#ifdef __ARCH_WANT_SYS_NICE
/*
* sys_nice - change the priority of the current process.
* @increment: priority increment
*
* sys_setpriority is a more generic, but much slower function that
* does similar things.
*/
SYSCALL_DEFINE1(nice, int, increment)
{
long nice, retval;
/*
* Setpriority might change our priority at the same moment.
* We don't have to worry. Conceptually one call occurs first
* and we have a single winner.
*/
if (increment < -40)
increment = -40;
if (increment > 40)
increment = 40;
nice = TASK_NICE(current) + increment;
if (nice < -20)
nice = -20;
if (nice > 19)
nice = 19;
if (increment < 0 && !can_nice(current, nice))
return -EPERM;
retval = security_task_setnice(current, nice);
if (retval)
return retval;
set_user_nice(current, nice);
return 0;
}
#endif
/**
* task_prio - return the priority value of a given task.
* @p: the task in question.
*
* This is the priority value as seen by users in /proc.
* RT tasks are offset by -200. Normal tasks are centered
* around 0, value goes from -16 to +15.
*/
int task_prio(const struct task_struct *p)
{
return p->prio - MAX_RT_PRIO;
}
/**
* task_nice - return the nice value of a given task.
* @p: the task in question.
*/
int task_nice(const struct task_struct *p)
{
return TASK_NICE(p);
}
EXPORT_SYMBOL(task_nice);
/**
* idle_cpu - is a given cpu idle currently?
* @cpu: the processor in question.
*/
int idle_cpu(int cpu)
{
return cpu_curr(cpu) == cpu_rq(cpu)->idle;
}
/**
* idle_task - return the idle task for a given cpu.
* @cpu: the processor in question.
*/
struct task_struct *idle_task(int cpu)
{
return cpu_rq(cpu)->idle;
}
/**
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
*/
static struct task_struct *find_process_by_pid(pid_t pid)
{
return pid ? find_task_by_vpid(pid) : current;
}
/* Actually do priority change: must hold rq lock. */
static void
__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
{
BUG_ON(p->se.on_rq);
p->policy = policy;
p->rt_priority = prio;
p->normal_prio = normal_prio(p);
/* we are holding p->pi_lock already */
p->prio = rt_mutex_getprio(p);
if (rt_prio(p->prio))
p->sched_class = &rt_sched_class;
else
p->sched_class = &fair_sched_class;
set_load_weight(p);
}
/*
* check the target process has a UID that matches the current process's
*/
static bool check_same_owner(struct task_struct *p)
{
const struct cred *cred = current_cred(), *pcred;
bool match;
rcu_read_lock();
pcred = __task_cred(p);
match = (cred->euid == pcred->euid ||
cred->euid == pcred->uid);
rcu_read_unlock();
return match;
}
static int __sched_setscheduler(struct task_struct *p, int policy,
struct sched_param *param, bool user)
{
int retval, oldprio, oldpolicy = -1, on_rq, running;
unsigned long flags;
const struct sched_class *prev_class;
struct rq *rq;
int reset_on_fork;
/* may grab non-irq protected spin_locks */
BUG_ON(in_interrupt());
recheck:
/* double check policy once rq lock held */
if (policy < 0) {
reset_on_fork = p->sched_reset_on_fork;
policy = oldpolicy = p->policy;
} else {
reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
policy &= ~SCHED_RESET_ON_FORK;
if (policy != SCHED_FIFO && policy != SCHED_RR &&
policy != SCHED_NORMAL && policy != SCHED_BATCH &&
policy != SCHED_IDLE)
return -EINVAL;
}
/*
* Valid priorities for SCHED_FIFO and SCHED_RR are
* 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
* SCHED_BATCH and SCHED_IDLE is 0.
*/
if (param->sched_priority < 0 ||
(p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
(!p->mm && param->sched_priority > MAX_RT_PRIO-1))
return -EINVAL;
if (rt_policy(policy) != (param->sched_priority != 0))
return -EINVAL;
/*
* Allow unprivileged RT tasks to decrease priority:
*/
if (user && !capable(CAP_SYS_NICE)) {
if (rt_policy(policy)) {
unsigned long rlim_rtprio;
if (!lock_task_sighand(p, &flags))
return -ESRCH;
rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
unlock_task_sighand(p, &flags);
/* can't set/change the rt policy */
if (policy != p->policy && !rlim_rtprio)
return -EPERM;
/* can't increase priority */
if (param->sched_priority > p->rt_priority &&
param->sched_priority > rlim_rtprio)
return -EPERM;
}
/*
* Like positive nice levels, dont allow tasks to
* move out of SCHED_IDLE either:
*/
if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
return -EPERM;
/* can't change other user's priorities */
if (!check_same_owner(p))
return -EPERM;
/* Normal users shall not reset the sched_reset_on_fork flag */
if (p->sched_reset_on_fork && !reset_on_fork)
return -EPERM;
}
if (user) {
retval = security_task_setscheduler(p, policy, param);
if (retval)
return retval;
}
/*
* make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
/*
* To be able to change p->policy safely, the apropriate
* runqueue lock must be held.
*/
rq = __task_rq_lock(p);
#ifdef CONFIG_RT_GROUP_SCHED
if (user) {
/*
* Do not allow realtime tasks into groups that have no runtime
* assigned.
*/
if (rt_bandwidth_enabled() && rt_policy(policy) &&
task_group(p)->rt_bandwidth.rt_runtime == 0) {
__task_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
return -EPERM;
}
}
#endif
/* recheck policy now with rq lock held */
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
__task_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
goto recheck;
}
on_rq = p->se.on_rq;
running = task_current(rq, p);
if (on_rq)
deactivate_task(rq, p, 0);
if (running)
p->sched_class->put_prev_task(rq, p);
p->sched_reset_on_fork = reset_on_fork;
oldprio = p->prio;
prev_class = p->sched_class;
__setscheduler(rq, p, policy, param->sched_priority);
if (running)
p->sched_class->set_curr_task(rq);
if (on_rq) {
activate_task(rq, p, 0);
check_class_changed(rq, p, prev_class, oldprio, running);
}
__task_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
rt_mutex_adjust_pi(p);
return 0;
}
/**
* sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
* @p: the task in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*
* NOTE that the task may be already dead.
*/
int sched_setscheduler(struct task_struct *p, int policy,
struct sched_param *param)
{
return __sched_setscheduler(p, policy, param, true);
}
EXPORT_SYMBOL_GPL(sched_setscheduler);
/**
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
* @p: the task in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*
* Just like sched_setscheduler, only don't bother checking if the
* current context has permission. For example, this is needed in
* stop_machine(): we create temporary high priority worker threads,
* but our caller might not have that capability.
*/
int sched_setscheduler_nocheck(struct task_struct *p, int policy,
struct sched_param *param)
{
return __sched_setscheduler(p, policy, param, false);
}
static int
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
{
struct sched_param lparam;
struct task_struct *p;
int retval;
if (!param || pid < 0)
return -EINVAL;
if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
return -EFAULT;
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
if (p != NULL)
retval = sched_setscheduler(p, policy, &lparam);
rcu_read_unlock();
return retval;
}
/**
* sys_sched_setscheduler - set/change the scheduler policy and RT priority
* @pid: the pid in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
*/
SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
struct sched_param __user *, param)
{
/* negative values for policy are not valid */
if (policy < 0)
return -EINVAL;
return do_sched_setscheduler(pid, policy, param);
}
/**
* sys_sched_setparam - set/change the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the new RT priority.
*/
SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
{
return do_sched_setscheduler(pid, -1, param);
}
/**
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
* @pid: the pid in question.
*/
SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
{
struct task_struct *p;
int retval;
if (pid < 0)
return -EINVAL;
retval = -ESRCH;
rcu_read_lock();
p = find_process_by_pid(pid);
if (p) {
retval = security_task_getscheduler(p);
if (!retval)
retval = p->policy
| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
}
rcu_read_unlock();
return retval;
}
/**
* sys_sched_getparam - get the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the RT priority.
*/
SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
{
struct sched_param lp;
struct task_struct *p;
int retval;
if (!param || pid < 0)
return -EINVAL;
rcu_read_lock();
p = find_process_by_pid(pid);
retval = -ESRCH;
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
lp.sched_priority = p->rt_priority;
rcu_read_unlock();
/*
* This one might sleep, we cannot do it with a spinlock held ...
*/
retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
return retval;
out_unlock:
rcu_read_unlock();
return retval;
}
long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
{
cpumask_var_t cpus_allowed, new_mask;
struct task_struct *p;
int retval;
get_online_cpus();
rcu_read_lock();
p = find_process_by_pid(pid);
if (!p) {
rcu_read_unlock();
put_online_cpus();
return -ESRCH;
}
/* Prevent p going away */
get_task_struct(p);
rcu_read_unlock();
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_put_task;
}
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_free_cpus_allowed;
}
retval = -EPERM;
if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
goto out_unlock;
retval = security_task_setscheduler(p, 0, NULL);
if (retval)
goto out_unlock;
cpuset_cpus_allowed(p, cpus_allowed);
cpumask_and(new_mask, in_mask, cpus_allowed);
again:
retval = set_cpus_allowed_ptr(p, new_mask);
if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed);
if (!cpumask_subset(new_mask, cpus_allowed)) {
/*
* We must have raced with a concurrent cpuset
* update. Just reset the cpus_allowed to the
* cpuset's cpus_allowed
*/
cpumask_copy(new_mask, cpus_allowed);
goto again;
}
}
out_unlock:
free_cpumask_var(new_mask);
out_free_cpus_allowed:
free_cpumask_var(cpus_allowed);
out_put_task:
put_task_struct(p);
put_online_cpus();
return retval;
}
static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
struct cpumask *new_mask)
{
if (len < cpumask_size())
cpumask_clear(new_mask);
else if (len > cpumask_size())
len = cpumask_size();
return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
}
/**
* sys_sched_setaffinity - set the cpu affinity of a process
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to the new cpu mask
*/
SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
{
cpumask_var_t new_mask;
int retval;
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM;
retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
if (retval == 0)
retval = sched_setaffinity(pid, new_mask);
free_cpumask_var(new_mask);
return retval;
}
long sched_getaffinity(pid_t pid, struct cpumask *mask)
{
struct task_struct *p;
unsigned long flags;
struct rq *rq;
int retval;
get_online_cpus();
rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
rq = task_rq_lock(p, &flags);
cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
task_rq_unlock(rq, &flags);
out_unlock:
rcu_read_unlock();
put_online_cpus();
return retval;
}
/**
* sys_sched_getaffinity - get the cpu affinity of a process
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to hold the current cpu mask
*/
SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
{
int ret;
cpumask_var_t mask;
if ((len * BITS_PER_BYTE) < nr_cpu_ids)
return -EINVAL;
if (len & (sizeof(unsigned long)-1))
return -EINVAL;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
if (ret == 0) {
size_t retlen = min_t(size_t, len, cpumask_size());
if (copy_to_user(user_mask_ptr, mask, retlen))
ret = -EFAULT;
else
ret = retlen;
}
free_cpumask_var(mask);
return ret;
}
/**
* sys_sched_yield - yield the current processor to other threads.
*
* This function yields the current CPU to other tasks. If there are no
* other threads running on this CPU then this function will return.
*/
SYSCALL_DEFINE0(sched_yield)
{
struct rq *rq = this_rq_lock();
schedstat_inc(rq, yld_count);
current->sched_class->yield_task(rq);
/*
* Since we are going to call schedule() anyway, there's
* no need to preempt or enable interrupts:
*/
__release(rq->lock);
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
do_raw_spin_unlock(&rq->lock);
preempt_enable_no_resched();
schedule();
return 0;
}
static inline int should_resched(void)
{
return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
}
static void __cond_resched(void)
{
add_preempt_count(PREEMPT_ACTIVE);
schedule();
sub_preempt_count(PREEMPT_ACTIVE);
}
int __sched _cond_resched(void)
{
if (should_resched()) {
__cond_resched();
return 1;
}
return 0;
}
EXPORT_SYMBOL(_cond_resched);
/*
* __cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock.
*
* This works OK both with and without CONFIG_PREEMPT. We do strange low-level
* operations here to prevent schedule() from being called twice (once via
* spin_unlock(), once by hand).
*/
int __cond_resched_lock(spinlock_t *lock)
{
int resched = should_resched();
int ret = 0;
lockdep_assert_held(lock);
if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
if (resched)
__cond_resched();
else
cpu_relax();
ret = 1;
spin_lock(lock);
}
return ret;
}
EXPORT_SYMBOL(__cond_resched_lock);
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
if (should_resched()) {
local_bh_enable();
__cond_resched();
local_bh_disable();
return 1;
}
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
/**
* yield - yield the current processor to other threads.
*
* This is a shortcut for kernel-space yielding - it marks the
* thread runnable and calls sys_sched_yield().
*/
void __sched yield(void)
{
set_current_state(TASK_RUNNING);
sys_sched_yield();
}
EXPORT_SYMBOL(yield);
/*
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
*/
void __sched io_schedule(void)
{
struct rq *rq = raw_rq();
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
current->in_iowait = 1;
schedule();
current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
}
EXPORT_SYMBOL(io_schedule);
long __sched io_schedule_timeout(long timeout)
{
struct rq *rq = raw_rq();
long ret;
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
current->in_iowait = 1;
ret = schedule_timeout(timeout);
current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
return ret;
}
/**
* sys_sched_get_priority_max - return maximum RT priority.
* @policy: scheduling class.
*
* this syscall returns the maximum rt_priority that can be used
* by a given scheduling class.
*/
SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
{
int ret = -EINVAL;
switch (policy) {
case SCHED_FIFO:
case SCHED_RR:
ret = MAX_USER_RT_PRIO-1;
break;
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
ret = 0;
break;
}
return ret;
}
/**
* sys_sched_get_priority_min - return minimum RT priority.
* @policy: scheduling class.
*
* this syscall returns the minimum rt_priority that can be used
* by a given scheduling class.
*/
SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
{
int ret = -EINVAL;
switch (policy) {
case SCHED_FIFO:
case SCHED_RR:
ret = 1;
break;
case SCHED_NORMAL:
case SCHED_BATCH:
case SCHED_IDLE:
ret = 0;
}
return ret;
}
/**
* sys_sched_rr_get_interval - return the default timeslice of a process.
* @pid: pid of the process.
* @interval: userspace pointer to the timeslice value.
*
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
*/
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
struct timespec __user *, interval)
{
struct task_struct *p;
unsigned int time_slice;
unsigned long flags;
struct rq *rq;
int retval;
struct timespec t;
if (pid < 0)
return -EINVAL;
retval = -ESRCH;
rcu_read_lock();
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
rq = task_rq_lock(p, &flags);
time_slice = p->sched_class->get_rr_interval(rq, p);
task_rq_unlock(rq, &flags);
rcu_read_unlock();
jiffies_to_timespec(time_slice, &t);
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
return retval;
out_unlock:
rcu_read_unlock();
return retval;
}
static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
void sched_show_task(struct task_struct *p)
{
unsigned long free = 0;
unsigned state;
state = p->state ? __ffs(p->state) + 1 : 0;
printk(KERN_INFO "%-15.15s %c", p->comm,
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
#if BITS_PER_LONG == 32
if (state == TASK_RUNNING)
printk(KERN_CONT " running ");
else
printk(KERN_CONT " %08lx ", thread_saved_pc(p));
#else
if (state == TASK_RUNNING)
printk(KERN_CONT " running task ");
else
printk(KERN_CONT " %016lx ", thread_saved_pc(p));
#endif
#ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p);
#endif
printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
task_pid_nr(p), task_pid_nr(p->real_parent),
(unsigned long)task_thread_info(p)->flags);
show_stack(p, NULL);
}
void show_state_filter(unsigned long state_filter)
{
struct task_struct *g, *p;
#if BITS_PER_LONG == 32
printk(KERN_INFO
" task PC stack pid father\n");
#else
printk(KERN_INFO
" task PC stack pid father\n");
#endif
read_lock(&tasklist_lock);
do_each_thread(g, p) {
/*
* reset the NMI-timeout, listing all files on a slow
* console might take alot of time:
*/
touch_nmi_watchdog();
if (!state_filter || (p->state & state_filter))
sched_show_task(p);
} while_each_thread(g, p);
touch_all_softlockup_watchdogs();
#ifdef CONFIG_SCHED_DEBUG
sysrq_sched_debug_show();
#endif
read_unlock(&tasklist_lock);
/*
* Only show locks if all tasks are dumped:
*/
if (!state_filter)
debug_show_all_locks();
}
void __cpuinit init_idle_bootup_task(struct task_struct *idle)
{
idle->sched_class = &idle_sched_class;
}
/**
* init_idle - set up an idle thread for a given CPU
* @idle: task in question
* @cpu: cpu the idle task belongs to
*
* NOTE: this function does not set the idle thread's NEED_RESCHED
* flag, to make booting more robust.
*/
void __cpuinit init_idle(struct task_struct *idle, int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
__sched_fork(idle);
idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
__set_task_cpu(idle, cpu);
rq->curr = rq->idle = idle;
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
idle->oncpu = 1;
#endif
raw_spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
#if defined(CONFIG_PREEMPT)
task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
#else
task_thread_info(idle)->preempt_count = 0;
#endif
/*
* The idle tasks have their own, simple scheduling class:
*/
idle->sched_class = &idle_sched_class;
ftrace_graph_init_task(idle);
}
/*
* In a system that switches off the HZ timer nohz_cpu_mask
* indicates which cpus entered this state. This is used
* in the rcu update to wait only for active cpus. For system
* which do not switch off the HZ timer nohz_cpu_mask should
* always be CPU_BITS_NONE.
*/
cpumask_var_t nohz_cpu_mask;
/*
* Increase the granularity value when there are more CPUs,
* because with more CPUs the 'effective latency' as visible
* to users decreases. But the relationship is not linear,
* so pick a second-best guess by going with the log2 of the
* number of CPUs.
*
* This idea comes from the SD scheduler of Con Kolivas:
*/
static int get_update_sysctl_factor(void)
{
unsigned int cpus = min_t(int, num_online_cpus(), 8);
unsigned int factor;
switch (sysctl_sched_tunable_scaling) {
case SCHED_TUNABLESCALING_NONE:
factor = 1;
break;
case SCHED_TUNABLESCALING_LINEAR:
factor = cpus;
break;
case SCHED_TUNABLESCALING_LOG:
default:
factor = 1 + ilog2(cpus);
break;
}
return factor;
}
static void update_sysctl(void)
{
unsigned int factor = get_update_sysctl_factor();
#define SET_SYSCTL(name) \
(sysctl_##name = (factor) * normalized_sysctl_##name)
SET_SYSCTL(sched_min_granularity);
SET_SYSCTL(sched_latency);
SET_SYSCTL(sched_wakeup_granularity);
SET_SYSCTL(sched_shares_ratelimit);
#undef SET_SYSCTL
}
static inline void sched_init_granularity(void)
{
update_sysctl();
}
#ifdef CONFIG_SMP
/*
* This is how migration works:
*
* 1) we invoke migration_cpu_stop() on the target CPU using
* stop_one_cpu().
* 2) stopper starts to run (implicitly forcing the migrated thread
* off the CPU)
* 3) it checks whether the migrated task is still in the wrong runqueue.
* 4) if it's in the wrong runqueue then the migration thread removes
* it and puts it into the right queue.
* 5) stopper completes and stop_one_cpu() returns and the migration
* is done.
*/
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
* is removed from the allowed bitmask.
*
* NOTE: the caller must have a valid reference to the task, the
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
unsigned long flags;
struct rq *rq;
unsigned int dest_cpu;
int ret = 0;
/*
* Serialize against TASK_WAKING so that ttwu() and wunt() can
* drop the rq->lock and still rely on ->cpus_allowed.
*/
again:
while (task_is_waking(p))
cpu_relax();
rq = task_rq_lock(p, &flags);
if (task_is_waking(p)) {
task_rq_unlock(rq, &flags);
goto again;
}
if (!cpumask_intersects(new_mask, cpu_active_mask)) {
ret = -EINVAL;
goto out;
}
if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
!cpumask_equal(&p->cpus_allowed, new_mask))) {
ret = -EINVAL;
goto out;
}
if (p->sched_class->set_cpus_allowed)
p->sched_class->set_cpus_allowed(p, new_mask);
else {
cpumask_copy(&p->cpus_allowed, new_mask);
p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
}
/* Can the task run on the task's current CPU? If so, we're done */
if (cpumask_test_cpu(task_cpu(p), new_mask))
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
if (migrate_task(p, dest_cpu)) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, &flags);
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
return 0;
}
out:
task_rq_unlock(rq, &flags);
return ret;
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
/*
* Move (not current) task off this cpu, onto dest cpu. We're doing
* this because either it can't run here any more (set_cpus_allowed()
* away from this CPU, or CPU going down), or because we're
* attempting to rebalance this task on exec (sched_exec).
*
* So we race with normal scheduler movements, but that's OK, as long
* as the task is no longer on this CPU.
*
* Returns non-zero if task was successfully migrated.
*/
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
{
struct rq *rq_dest, *rq_src;
int ret = 0;
if (unlikely(!cpu_active(dest_cpu)))
return ret;
rq_src = cpu_rq(src_cpu);
rq_dest = cpu_rq(dest_cpu);
double_rq_lock(rq_src, rq_dest);
/* Already moved. */
if (task_cpu(p) != src_cpu)
goto done;
/* Affinity changed (again). */
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
goto fail;
/*
* If we're not on a rq, the next wake-up will ensure we're
* placed properly.
*/
if (p->se.on_rq) {
deactivate_task(rq_src, p, 0);
set_task_cpu(p, dest_cpu);
activate_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p, 0);
}
done:
ret = 1;
fail:
double_rq_unlock(rq_src, rq_dest);
return ret;
}
/*
* migration_cpu_stop - this will be executed by a highprio stopper thread
* and performs thread migration by bumping thread off CPU then
* 'pushing' onto another runqueue.
*/
static int migration_cpu_stop(void *data)
{
struct migration_arg *arg = data;
/*
* The original target cpu might have gone down and we might
* be on another cpu but it doesn't matter.
*/
local_irq_disable();
__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
local_irq_enable();
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* Figure out where task on dead CPU should go, use force if necessary.
*/
void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
{
struct rq *rq = cpu_rq(dead_cpu);
int needs_cpu, uninitialized_var(dest_cpu);
unsigned long flags;
local_irq_save(flags);
raw_spin_lock(&rq->lock);
needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING);
if (needs_cpu)
dest_cpu = select_fallback_rq(dead_cpu, p);
raw_spin_unlock(&rq->lock);
/*
* It can only fail if we race with set_cpus_allowed(),
* in the racer should migrate the task anyway.
*/
if (needs_cpu)
__migrate_task(p, dead_cpu, dest_cpu);
local_irq_restore(flags);
}
/*
* While a dead CPU has no uninterruptible tasks queued at this point,
* it might still have a nonzero ->nr_uninterruptible counter, because
* for performance reasons the counter is not stricly tracking tasks to
* their home CPUs. So we just add the counter to another CPU's counter,
* to keep the global sum constant after CPU-down:
*/
static void migrate_nr_uninterruptible(struct rq *rq_src)
{
struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
unsigned long flags;
local_irq_save(flags);
double_rq_lock(rq_src, rq_dest);
rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
rq_src->nr_uninterruptible = 0;
double_rq_unlock(rq_src, rq_dest);
local_irq_restore(flags);
}
/* Run through task list and migrate tasks from the dead cpu. */
static void migrate_live_tasks(int src_cpu)
{
struct task_struct *p, *t;
read_lock(&tasklist_lock);
do_each_thread(t, p) {
if (p == current)
continue;
if (task_cpu(p) == src_cpu)
move_task_off_dead_cpu(src_cpu, p);
} while_each_thread(t, p);
read_unlock(&tasklist_lock);
}
/*
* Schedules idle task to be the next runnable task on current CPU.
* It does so by boosting its priority to highest possible.
* Used by CPU offline code.
*/
void sched_idle_next(void)
{
int this_cpu = smp_processor_id();
struct rq *rq = cpu_rq(this_cpu);
struct task_struct *p = rq->idle;
unsigned long flags;
/* cpu has to be offline */
BUG_ON(cpu_online(this_cpu));
/*
* Strictly not necessary since rest of the CPUs are stopped by now
* and interrupts disabled on the current cpu.
*/
raw_spin_lock_irqsave(&rq->lock, flags);
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
activate_task(rq, p, 0);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
*/
void idle_task_exit(void)
{
struct mm_struct *mm = current->active_mm;
BUG_ON(cpu_online(smp_processor_id()));
if (mm != &init_mm)
switch_mm(mm, &init_mm, current);
mmdrop(mm);
}
/* called under rq->lock with disabled interrupts */
static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
{
struct rq *rq = cpu_rq(dead_cpu);
/* Must be exiting, otherwise would be on tasklist. */
BUG_ON(!p->exit_state);
/* Cannot have done final schedule yet: would have vanished. */
BUG_ON(p->state == TASK_DEAD);
get_task_struct(p);
/*
* Drop lock around migration; if someone else moves it,
* that's OK. No task can be added to this CPU, so iteration is
* fine.
*/
raw_spin_unlock_irq(&rq->lock);
move_task_off_dead_cpu(dead_cpu, p);
raw_spin_lock_irq(&rq->lock);
put_task_struct(p);
}
/* release_task() removes task from tasklist, so we won't find dead tasks. */
static void migrate_dead_tasks(unsigned int dead_cpu)
{
struct rq *rq = cpu_rq(dead_cpu);
struct task_struct *next;
for ( ; ; ) {
if (!rq->nr_running)
break;
next = pick_next_task(rq);
if (!next)
break;
next->sched_class->put_prev_task(rq, next);
migrate_dead(dead_cpu, next);
}
}
/*
* remove the tasks which were accounted by rq from calc_load_tasks.
*/
static void calc_global_load_remove(struct rq *rq)
{
atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
rq->calc_load_active = 0;
}
#endif /* CONFIG_HOTPLUG_CPU */
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
static struct ctl_table sd_ctl_dir[] = {
{
.procname = "sched_domain",
.mode = 0555,
},
{}
};
static struct ctl_table sd_ctl_root[] = {
{
.procname = "kernel",
.mode = 0555,
.child = sd_ctl_dir,
},
{}
};
static struct ctl_table *sd_alloc_ctl_entry(int n)
{
struct ctl_table *entry =
kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
return entry;
}
static void sd_free_ctl_entry(struct ctl_table **tablep)
{
struct ctl_table *entry;
/*
* In the intermediate directories, both the child directory and
* procname are dynamically allocated and could fail but the mode
* will always be set. In the lowest directory the names are
* static strings and all have proc handlers.
*/
for (entry = *tablep; entry->mode; entry++) {
if (entry->child)
sd_free_ctl_entry(&entry->child);
if (entry->proc_handler == NULL)
kfree(entry->procname);
}
kfree(*tablep);
*tablep = NULL;
}
static void
set_table_entry(struct ctl_table *entry,
const char *procname, void *data, int maxlen,
mode_t mode, proc_handler *proc_handler)
{
entry->procname = procname;
entry->data = data;
entry->maxlen = maxlen;
entry->mode = mode;
entry->proc_handler = proc_handler;
}
static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain *sd)
{
struct ctl_table *table = sd_alloc_ctl_entry(13);
if (table == NULL)
return NULL;
set_table_entry(&table[0], "min_interval", &sd->min_interval,
sizeof(long), 0644, proc_doulongvec_minmax);
set_table_entry(&table[1], "max_interval", &sd->max_interval,
sizeof(long), 0644, proc_doulongvec_minmax);
set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[9], "cache_nice_tries",
&sd->cache_nice_tries,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[10], "flags", &sd->flags,
sizeof(int), 0644, proc_dointvec_minmax);
set_table_entry(&table[11], "name", sd->name,
CORENAME_MAX_SIZE, 0444, proc_dostring);
/* &table[12] is terminator */
return table;
}
static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
{
struct ctl_table *entry, *table;
struct sched_domain *sd;
int domain_num = 0, i;
char buf[32];
for_each_domain(cpu, sd)
domain_num++;
entry = table = sd_alloc_ctl_entry(domain_num + 1);
if (table == NULL)
return NULL;
i = 0;
for_each_domain(cpu, sd) {
snprintf(buf, 32, "domain%d", i);
entry->procname = kstrdup(buf, GFP_KERNEL);
entry->mode = 0555;
entry->child = sd_alloc_ctl_domain_table(sd);
entry++;
i++;
}
return table;
}
static struct ctl_table_header *sd_sysctl_header;
static void register_sched_domain_sysctl(void)
{
int i, cpu_num = num_possible_cpus();
struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
char buf[32];
WARN_ON(sd_ctl_dir[0].child);
sd_ctl_dir[0].child = entry;
if (entry == NULL)
return;
for_each_possible_cpu(i) {
snprintf(buf, 32, "cpu%d", i);
entry->procname = kstrdup(buf, GFP_KERNEL);
entry->mode = 0555;
entry->child = sd_alloc_ctl_cpu_table(i);
entry++;
}
WARN_ON(sd_sysctl_header);
sd_sysctl_header = register_sysctl_table(sd_ctl_root);
}
/* may be called multiple times per register */
static void unregister_sched_domain_sysctl(void)
{
if (sd_sysctl_header)
unregister_sysctl_table(sd_sysctl_header);
sd_sysctl_header = NULL;
if (sd_ctl_dir[0].child)
sd_free_ctl_entry(&sd_ctl_dir[0].child);
}
#else
static void register_sched_domain_sysctl(void)
{
}
static void unregister_sched_domain_sysctl(void)
{
}
#endif
static void set_rq_online(struct rq *rq)
{
if (!rq->online) {
const struct sched_class *class;
cpumask_set_cpu(rq->cpu, rq->rd->online);
rq->online = 1;
for_each_class(class) {
if (class->rq_online)
class->rq_online(rq);
}
}
}
static void set_rq_offline(struct rq *rq)
{
if (rq->online) {
const struct sched_class *class;
for_each_class(class) {
if (class->rq_offline)
class->rq_offline(rq);
}
cpumask_clear_cpu(rq->cpu, rq->rd->online);
rq->online = 0;
}
}
/*
* migration_call - callback that gets triggered when a CPU is added.
* Here we can start up the necessary migration thread for the new CPU.
*/
static int __cpuinit
migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int cpu = (long)hcpu;
unsigned long flags;
struct rq *rq = cpu_rq(cpu);
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
rq->calc_load_update = calc_load_update;
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
/* Update our root-domain */
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_online(rq);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
case CPU_DEAD_FROZEN:
migrate_live_tasks(cpu);
/* Idle task back to normal (off runqueue, low prio) */
raw_spin_lock_irq(&rq->lock);
deactivate_task(rq, rq->idle, 0);
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq->idle->sched_class = &idle_sched_class;
migrate_dead_tasks(cpu);
raw_spin_unlock_irq(&rq->lock);
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
calc_global_load_remove(rq);
break;
case CPU_DYING:
case CPU_DYING_FROZEN:
/* Update our root-domain */
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#endif
}
return NOTIFY_OK;
}
/*
* Register at high priority so that task migration (migrate_all_tasks)
* happens before everything else. This has to be lower priority than
* the notifier in the perf_event subsystem, though.
*/
static struct notifier_block __cpuinitdata migration_notifier = {
.notifier_call = migration_call,
.priority = 10
};
static int __init migration_init(void)
{
void *cpu = (void *)(long)smp_processor_id();
int err;
/* Start one for the boot CPU: */
err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
BUG_ON(err == NOTIFY_BAD);
migration_call(&migration_notifier, CPU_ONLINE, cpu);
register_cpu_notifier(&migration_notifier);
return 0;
}
early_initcall(migration_init);
#endif
#ifdef CONFIG_SMP
#ifdef CONFIG_SCHED_DEBUG
static __read_mostly int sched_domain_debug_enabled;
static int __init sched_domain_debug_setup(char *str)
{
sched_domain_debug_enabled = 1;
return 0;
}
early_param("sched_debug", sched_domain_debug_setup);
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
struct cpumask *groupmask)
{
struct sched_group *group = sd->groups;
char str[256];
cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
cpumask_clear(groupmask);
printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
if (!(sd->flags & SD_LOAD_BALANCE)) {
printk("does not load-balance\n");
if (sd->parent)
printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
" has parent");
return -1;
}
printk(KERN_CONT "span %s level %s\n", str, sd->name);
if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
printk(KERN_ERR "ERROR: domain->span does not contain "
"CPU%d\n", cpu);
}
if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
printk(KERN_ERR "ERROR: domain->groups does not contain"
" CPU%d\n", cpu);
}
printk(KERN_DEBUG "%*s groups:", level + 1, "");
do {
if (!group) {
printk("\n");
printk(KERN_ERR "ERROR: group is NULL\n");
break;
}
if (!group->cpu_power) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: domain->cpu_power not "
"set\n");
break;
}
if (!cpumask_weight(sched_group_cpus(group))) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: empty group\n");
break;
}
if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: repeated CPUs\n");
break;
}
cpumask_or(groupmask, groupmask, sched_group_cpus(group));
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
printk(KERN_CONT " %s", str);
if (group->cpu_power != SCHED_LOAD_SCALE) {
printk(KERN_CONT " (cpu_power = %d)",
group->cpu_power);
}
group = group->next;
} while (group != sd->groups);
printk(KERN_CONT "\n");
if (!cpumask_equal(sched_domain_span(sd), groupmask))
printk(KERN_ERR "ERROR: groups don't span domain->span\n");
if (sd->parent &&
!cpumask_subset(groupmask, sched_domain_span(sd->parent)))
printk(KERN_ERR "ERROR: parent span is not a superset "
"of domain->span\n");
return 0;
}
static void sched_domain_debug(struct sched_domain *sd, int cpu)
{
cpumask_var_t groupmask;
int level = 0;
if (!sched_domain_debug_enabled)
return;
if (!sd) {
printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
return;
}
printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
return;
}
for (;;) {
if (sched_domain_debug_one(sd, cpu, level, groupmask))
break;
level++;
sd = sd->parent;
if (!sd)
break;
}
free_cpumask_var(groupmask);
}
#else /* !CONFIG_SCHED_DEBUG */
# define sched_domain_debug(sd, cpu) do { } while (0)
#endif /* CONFIG_SCHED_DEBUG */
static int sd_degenerate(struct sched_domain *sd)
{
if (cpumask_weight(sched_domain_span(sd)) == 1)
return 1;
/* Following flags need at least 2 groups */
if (sd->flags & (SD_LOAD_BALANCE |
SD_BALANCE_NEWIDLE |
SD_BALANCE_FORK |
SD_BALANCE_EXEC |
SD_SHARE_CPUPOWER |
SD_SHARE_PKG_RESOURCES)) {
if (sd->groups != sd->groups->next)
return 0;
}
/* Following flags don't use groups */
if (sd->flags & (SD_WAKE_AFFINE))
return 0;
return 1;
}
static int
sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
{
unsigned long cflags = sd->flags, pflags = parent->flags;
if (sd_degenerate(parent))
return 1;
if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
return 0;
/* Flags needing groups don't count if only 1 group in parent */
if (parent->groups == parent->groups->next) {
pflags &= ~(SD_LOAD_BALANCE |
SD_BALANCE_NEWIDLE |
SD_BALANCE_FORK |
SD_BALANCE_EXEC |
SD_SHARE_CPUPOWER |
SD_SHARE_PKG_RESOURCES);
if (nr_node_ids == 1)
pflags &= ~SD_SERIALIZE;
}
if (~cflags & pflags)
return 0;
return 1;
}
static void free_rootdomain(struct root_domain *rd)
{
synchronize_sched();
cpupri_cleanup(&rd->cpupri);
free_cpumask_var(rd->rto_mask);
free_cpumask_var(rd->online);
free_cpumask_var(rd->span);
kfree(rd);
}
static void rq_attach_root(struct rq *rq, struct root_domain *rd)
{
struct root_domain *old_rd = NULL;
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
old_rd = rq->rd;
if (cpumask_test_cpu(rq->cpu, old_rd->online))
set_rq_offline(rq);
cpumask_clear_cpu(rq->cpu, old_rd->span);
/*
* If we dont want to free the old_rt yet then
* set old_rd to NULL to skip the freeing later
* in this function:
*/
if (!atomic_dec_and_test(&old_rd->refcount))
old_rd = NULL;
}
atomic_inc(&rd->refcount);
rq->rd = rd;
cpumask_set_cpu(rq->cpu, rd->span);
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
if (old_rd)
free_rootdomain(old_rd);
}
static int init_rootdomain(struct root_domain *rd, bool bootmem)
{
gfp_t gfp = GFP_KERNEL;
memset(rd, 0, sizeof(*rd));
if (bootmem)
gfp = GFP_NOWAIT;
if (!alloc_cpumask_var(&rd->span, gfp))
goto out;
if (!alloc_cpumask_var(&rd->online, gfp))
goto free_span;
if (!alloc_cpumask_var(&rd->rto_mask, gfp))
goto free_online;
if (cpupri_init(&rd->cpupri, bootmem) != 0)
goto free_rto_mask;
return 0;
free_rto_mask:
free_cpumask_var(rd->rto_mask);
free_online:
free_cpumask_var(rd->online);
free_span:
free_cpumask_var(rd->span);
out:
return -ENOMEM;
}
static void init_defrootdomain(void)
{
init_rootdomain(&def_root_domain, true);
atomic_set(&def_root_domain.refcount, 1);
}
static struct root_domain *alloc_rootdomain(void)
{
struct root_domain *rd;
rd = kmalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return NULL;
if (init_rootdomain(rd, false) != 0) {
kfree(rd);
return NULL;
}
return rd;
}
/*
* Attach the domain 'sd' to 'cpu' as its base domain. Callers must
* hold the hotplug lock.
*/
static void
cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
for (tmp = sd; tmp; tmp = tmp->parent)
tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
/* Remove the sched domains which do not contribute to scheduling. */
for (tmp = sd; tmp; ) {
struct sched_domain *parent = tmp->parent;
if (!parent)
break;
if (sd_parent_degenerate(tmp, parent)) {
tmp->parent = parent->parent;
if (parent->parent)
parent->parent->child = tmp;
} else
tmp = tmp->parent;
}
if (sd && sd_degenerate(sd)) {
sd = sd->parent;
if (sd)
sd->child = NULL;
}
sched_domain_debug(sd, cpu);
rq_attach_root(rq, rd);
rcu_assign_pointer(rq->sd, sd);
}
/* cpus with isolated domains */
static cpumask_var_t cpu_isolated_map;
/* Setup the mask of cpus configured for isolated domains */
static int __init isolated_cpu_setup(char *str)
{
alloc_bootmem_cpumask_var(&cpu_isolated_map);
cpulist_parse(str, cpu_isolated_map);
return 1;
}
__setup("isolcpus=", isolated_cpu_setup);
/*
* init_sched_build_groups takes the cpumask we wish to span, and a pointer
* to a function which identifies what group(along with sched group) a CPU
* belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
* (due to the fact that we keep track of groups covered with a struct cpumask).
*
* init_sched_build_groups will build a circular linked list of the groups
* covered by the given span, and will set each group's ->cpumask correctly,
* and ->cpu_power to 0.
*/
static void
init_sched_build_groups(const struct cpumask *span,
const struct cpumask *cpu_map,
int (*group_fn)(int cpu, const struct cpumask *cpu_map,
struct sched_group **sg,
struct cpumask *tmpmask),
struct cpumask *covered, struct cpumask *tmpmask)
{
struct sched_group *first = NULL, *last = NULL;
int i;
cpumask_clear(covered);
for_each_cpu(i, span) {
struct sched_group *sg;
int group = group_fn(i, cpu_map, &sg, tmpmask);
int j;
if (cpumask_test_cpu(i, covered))
continue;
cpumask_clear(sched_group_cpus(sg));
sg->cpu_power = 0;
for_each_cpu(j, span) {
if (group_fn(j, cpu_map, NULL, tmpmask) != group)
continue;
cpumask_set_cpu(j, covered);
cpumask_set_cpu(j, sched_group_cpus(sg));
}
if (!first)
first = sg;
if (last)
last->next = sg;
last = sg;
}
last->next = first;
}
#define SD_NODES_PER_DOMAIN 16
#ifdef CONFIG_NUMA
/**
* find_next_best_node - find the next node to include in a sched_domain
* @node: node whose sched_domain we're building
* @used_nodes: nodes already in the sched_domain
*
* Find the next node to include in a given scheduling domain. Simply
* finds the closest node not already in the @used_nodes map.
*
* Should use nodemask_t.
*/
static int find_next_best_node(int node, nodemask_t *used_nodes)
{
int i, n, val, min_val, best_node = 0;
min_val = INT_MAX;
for (i = 0; i < nr_node_ids; i++) {
/* Start at @node */
n = (node + i) % nr_node_ids;
if (!nr_cpus_node(n))
continue;
/* Skip already used nodes */
if (node_isset(n, *used_nodes))
continue;
/* Simple min distance search */
val = node_distance(node, n);
if (val < min_val) {
min_val = val;
best_node = n;
}
}
node_set(best_node, *used_nodes);
return best_node;
}
/**
* sched_domain_node_span - get a cpumask for a node's sched_domain
* @node: node whose cpumask we're constructing
* @span: resulting cpumask
*
* Given a node, construct a good cpumask for its sched_domain to span. It
* should be one that prevents unnecessary balancing, but also spreads tasks
* out optimally.
*/
static void sched_domain_node_span(int node, struct cpumask *span)
{
nodemask_t used_nodes;
int i;
cpumask_clear(span);
nodes_clear(used_nodes);
cpumask_or(span, span, cpumask_of_node(node));
node_set(node, used_nodes);
for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
int next_node = find_next_best_node(node, &used_nodes);
cpumask_or(span, span, cpumask_of_node(next_node));
}
}
#endif /* CONFIG_NUMA */
int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
/*
* The cpus mask in sched_group and sched_domain hangs off the end.
*
* ( See the the comments in include/linux/sched.h:struct sched_group
* and struct sched_domain. )
*/
struct static_sched_group {
struct sched_group sg;
DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
};
struct static_sched_domain {
struct sched_domain sd;
DECLARE_BITMAP(span, CONFIG_NR_CPUS);
};
struct s_data {
#ifdef CONFIG_NUMA
int sd_allnodes;
cpumask_var_t domainspan;
cpumask_var_t covered;
cpumask_var_t notcovered;
#endif
cpumask_var_t nodemask;
cpumask_var_t this_sibling_map;
cpumask_var_t this_core_map;
cpumask_var_t send_covered;
cpumask_var_t tmpmask;
struct sched_group **sched_group_nodes;
struct root_domain *rd;
};
enum s_alloc {
sa_sched_groups = 0,
sa_rootdomain,
sa_tmpmask,
sa_send_covered,
sa_this_core_map,
sa_this_sibling_map,
sa_nodemask,
sa_sched_group_nodes,
#ifdef CONFIG_NUMA
sa_notcovered,
sa_covered,
sa_domainspan,
#endif
sa_none,
};
/*
* SMT sched-domains:
*/
#ifdef CONFIG_SCHED_SMT
static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
static int
cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
struct sched_group **sg, struct cpumask *unused)
{
if (sg)
*sg = &per_cpu(sched_groups, cpu).sg;
return cpu;
}
#endif /* CONFIG_SCHED_SMT */
/*
* multi-core sched-domains:
*/
#ifdef CONFIG_SCHED_MC
static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
#endif /* CONFIG_SCHED_MC */
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
static int
cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
struct sched_group **sg, struct cpumask *mask)
{
int group;
cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
group = cpumask_first(mask);
if (sg)
*sg = &per_cpu(sched_group_core, group).sg;
return group;
}
#elif defined(CONFIG_SCHED_MC)
static int
cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
struct sched_group **sg, struct cpumask *unused)
{
if (sg)
*sg = &per_cpu(sched_group_core, cpu).sg;
return cpu;
}
#endif
static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
static int
cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
struct sched_group **sg, struct cpumask *mask)
{
int group;
#ifdef CONFIG_SCHED_MC
cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
group = cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT)
cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
group = cpumask_first(mask);
#else
group = cpu;
#endif
if (sg)
*sg = &per_cpu(sched_group_phys, group).sg;
return group;
}
#ifdef CONFIG_NUMA
/*
* The init_sched_build_groups can't handle what we want to do with node
* groups, so roll our own. Now each node has its own list of groups which
* gets dynamically allocated.
*/
static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
static struct sched_group ***sched_group_nodes_bycpu;
static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
struct sched_group **sg,
struct cpumask *nodemask)
{
int group;
cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
group = cpumask_first(nodemask);
if (sg)
*sg = &per_cpu(sched_group_allnodes, group).sg;
return group;
}
static void init_numa_sched_groups_power(struct sched_group *group_head)
{
struct sched_group *sg = group_head;
int j;
if (!sg)
return;
do {
for_each_cpu(j, sched_group_cpus(sg)) {
struct sched_domain *sd;
sd = &per_cpu(phys_domains, j).sd;
if (j != group_first_cpu(sd->groups)) {
/*
* Only add "power" once for each
* physical package.
*/
continue;
}
sg->cpu_power += sd->groups->cpu_power;
}
sg = sg->next;
} while (sg != group_head);
}
static int build_numa_sched_groups(struct s_data *d,
const struct cpumask *cpu_map, int num)
{
struct sched_domain *sd;
struct sched_group *sg, *prev;
int n, j;
cpumask_clear(d->covered);
cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
if (cpumask_empty(d->nodemask)) {
d->sched_group_nodes[num] = NULL;
goto out;
}
sched_domain_node_span(num, d->domainspan);
cpumask_and(d->domainspan, d->domainspan, cpu_map);
sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, num);
if (!sg) {
printk(KERN_WARNING "Can not alloc domain group for node %d\n",
num);
return -ENOMEM;
}
d->sched_group_nodes[num] = sg;
for_each_cpu(j, d->nodemask) {
sd = &per_cpu(node_domains, j).sd;
sd->groups = sg;
}
sg->cpu_power = 0;
cpumask_copy(sched_group_cpus(sg), d->nodemask);
sg->next = sg;
cpumask_or(d->covered, d->covered, d->nodemask);
prev = sg;
for (j = 0; j < nr_node_ids; j++) {
n = (num + j) % nr_node_ids;
cpumask_complement(d->notcovered, d->covered);
cpumask_and(d->tmpmask, d->notcovered, cpu_map);
cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
if (cpumask_empty(d->tmpmask))
break;
cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
if (cpumask_empty(d->tmpmask))
continue;
sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
GFP_KERNEL, num);
if (!sg) {
printk(KERN_WARNING
"Can not alloc domain group for node %d\n", j);
return -ENOMEM;
}
sg->cpu_power = 0;
cpumask_copy(sched_group_cpus(sg), d->tmpmask);
sg->next = prev->next;
cpumask_or(d->covered, d->covered, d->tmpmask);
prev->next = sg;
prev = sg;
}
out:
return 0;
}
#endif /* CONFIG_NUMA */
#ifdef CONFIG_NUMA
/* Free memory allocated for various sched_group structures */
static void free_sched_groups(const struct cpumask *cpu_map,
struct cpumask *nodemask)
{
int cpu, i;
for_each_cpu(cpu, cpu_map) {
struct sched_group **sched_group_nodes
= sched_group_nodes_bycpu[cpu];
if (!sched_group_nodes)
continue;
for (i = 0; i < nr_node_ids; i++) {
struct sched_group *oldsg, *sg = sched_group_nodes[i];
cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
if (cpumask_empty(nodemask))
continue;
if (sg == NULL)
continue;
sg = sg->next;
next_sg:
oldsg = sg;
sg = sg->next;
kfree(oldsg);
if (oldsg != sched_group_nodes[i])
goto next_sg;
}
kfree(sched_group_nodes);
sched_group_nodes_bycpu[cpu] = NULL;
}
}
#else /* !CONFIG_NUMA */
static void free_sched_groups(const struct cpumask *cpu_map,
struct cpumask *nodemask)
{
}
#endif /* CONFIG_NUMA */
/*
* Initialize sched groups cpu_power.
*
* cpu_power indicates the capacity of sched group, which is used while
* distributing the load between different sched groups in a sched domain.
* Typically cpu_power for all the groups in a sched domain will be same unless
* there are asymmetries in the topology. If there are asymmetries, group
* having more cpu_power will pickup more load compared to the group having
* less cpu_power.
*/
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
{
struct sched_domain *child;
struct sched_group *group;
long power;
int weight;
WARN_ON(!sd || !sd->groups);
if (cpu != group_first_cpu(sd->groups))
return;
child = sd->child;
sd->groups->cpu_power = 0;
if (!child) {
power = SCHED_LOAD_SCALE;
weight = cpumask_weight(sched_domain_span(sd));
/*
* SMT siblings share the power of a single core.
* Usually multiple threads get a better yield out of
* that one core than a single thread would have,
* reflect that in sd->smt_gain.
*/
if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
power *= sd->smt_gain;
power /= weight;
power >>= SCHED_LOAD_SHIFT;
}
sd->groups->cpu_power += power;
return;
}
/*
* Add cpu_power of each child group to this groups cpu_power.
*/
group = child->groups;
do {
sd->groups->cpu_power += group->cpu_power;
group = group->next;
} while (group != child->groups);
}
/*
* Initializers for schedule domains
* Non-inlined to reduce accumulated stack pressure in build_sched_domains()
*/
#ifdef CONFIG_SCHED_DEBUG
# define SD_INIT_NAME(sd, type) sd->name = #type
#else
# define SD_INIT_NAME(sd, type) do { } while (0)
#endif
#define SD_INIT(sd, type) sd_init_##type(sd)
#define SD_INIT_FUNC(type) \
static noinline void sd_init_##type(struct sched_domain *sd) \
{ \
memset(sd, 0, sizeof(*sd)); \
*sd = SD_##type##_INIT; \
sd->level = SD_LV_##type; \
SD_INIT_NAME(sd, type); \
}
SD_INIT_FUNC(CPU)
#ifdef CONFIG_NUMA
SD_INIT_FUNC(ALLNODES)
SD_INIT_FUNC(NODE)
#endif
#ifdef CONFIG_SCHED_SMT
SD_INIT_FUNC(SIBLING)
#endif
#ifdef CONFIG_SCHED_MC
SD_INIT_FUNC(MC)
#endif
static int default_relax_domain_level = -1;
static int __init setup_relax_domain_level(char *str)
{
unsigned long val;
val = simple_strtoul(str, NULL, 0);
if (val < SD_LV_MAX)
default_relax_domain_level = val;
return 1;
}
__setup("relax_domain_level=", setup_relax_domain_level);
static void set_domain_attribute(struct sched_domain *sd,
struct sched_domain_attr *attr)
{
int request;
if (!attr || attr->relax_domain_level < 0) {
if (default_relax_domain_level < 0)
return;
else
request = default_relax_domain_level;
} else
request = attr->relax_domain_level;
if (request < sd->level) {
/* turn off idle balance on this domain */
sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
} else {
/* turn on idle balance on this domain */
sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
}
}
static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
const struct cpumask *cpu_map)
{
switch (what) {
case sa_sched_groups:
free_sched_groups(cpu_map, d->tmpmask); /* fall through */
d->sched_group_nodes = NULL;
case sa_rootdomain:
free_rootdomain(d->rd); /* fall through */
case sa_tmpmask:
free_cpumask_var(d->tmpmask); /* fall through */
case sa_send_covered:
free_cpumask_var(d->send_covered); /* fall through */
case sa_this_core_map:
free_cpumask_var(d->this_core_map); /* fall through */
case sa_this_sibling_map:
free_cpumask_var(d->this_sibling_map); /* fall through */
case sa_nodemask:
free_cpumask_var(d->nodemask); /* fall through */
case sa_sched_group_nodes:
#ifdef CONFIG_NUMA
kfree(d->sched_group_nodes); /* fall through */
case sa_notcovered:
free_cpumask_var(d->notcovered); /* fall through */
case sa_covered:
free_cpumask_var(d->covered); /* fall through */
case sa_domainspan:
free_cpumask_var(d->domainspan); /* fall through */
#endif
case sa_none:
break;
}
}
static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
const struct cpumask *cpu_map)
{
#ifdef CONFIG_NUMA
if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
return sa_none;
if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
return sa_domainspan;
if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
return sa_covered;
/* Allocate the per-node list of sched groups */
d->sched_group_nodes = kcalloc(nr_node_ids,
sizeof(struct sched_group *), GFP_KERNEL);
if (!d->sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
return sa_notcovered;
}
sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
#endif
if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
return sa_sched_group_nodes;
if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
return sa_nodemask;
if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
return sa_this_sibling_map;
if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
return sa_this_core_map;
if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
return sa_send_covered;
d->rd = alloc_rootdomain();
if (!d->rd) {
printk(KERN_WARNING "Cannot alloc root domain\n");
return sa_tmpmask;
}
return sa_rootdomain;
}
static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
{
struct sched_domain *sd = NULL;
#ifdef CONFIG_NUMA
struct sched_domain *parent;
d->sd_allnodes = 0;
if (cpumask_weight(cpu_map) >
SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
sd = &per_cpu(allnodes_domains, i).sd;
SD_INIT(sd, ALLNODES);
set_domain_attribute(sd, attr);
cpumask_copy(sched_domain_span(sd), cpu_map);
cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
d->sd_allnodes = 1;
}
parent = sd;
sd = &per_cpu(node_domains, i).sd;
SD_INIT(sd, NODE);
set_domain_attribute(sd, attr);
sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
sd->parent = parent;
if (parent)
parent->child = sd;
cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
#endif
return sd;
}
static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
struct sched_domain *parent, int i)
{
struct sched_domain *sd;
sd = &per_cpu(phys_domains, i).sd;
SD_INIT(sd, CPU);
set_domain_attribute(sd, attr);
cpumask_copy(sched_domain_span(sd), d->nodemask);
sd->parent = parent;
if (parent)
parent->child = sd;
cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
return sd;
}
static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
struct sched_domain *parent, int i)
{
struct sched_domain *sd = parent;
#ifdef CONFIG_SCHED_MC
sd = &per_cpu(core_domains, i).sd;
SD_INIT(sd, MC);
set_domain_attribute(sd, attr);
cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
sd->parent = parent;
parent->child = sd;
cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
#endif
return sd;
}
static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
struct sched_domain *parent, int i)
{
struct sched_domain *sd = parent;
#ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i).sd;
SD_INIT(sd, SIBLING);
set_domain_attribute(sd, attr);
cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
sd->parent = parent;
parent->child = sd;
cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
#endif
return sd;
}
static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
const struct cpumask *cpu_map, int cpu)
{
switch (l) {
#ifdef CONFIG_SCHED_SMT
case SD_LV_SIBLING: /* set up CPU (sibling) groups */
cpumask_and(d->this_sibling_map, cpu_map,
topology_thread_cpumask(cpu));
if (cpu == cpumask_first(d->this_sibling_map))
init_sched_build_groups(d->this_sibling_map, cpu_map,
&cpu_to_cpu_group,
d->send_covered, d->tmpmask);
break;
#endif
#ifdef CONFIG_SCHED_MC
case SD_LV_MC: /* set up multi-core groups */
cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
if (cpu == cpumask_first(d->this_core_map))
init_sched_build_groups(d->this_core_map, cpu_map,
&cpu_to_core_group,
d->send_covered, d->tmpmask);
break;
#endif
case SD_LV_CPU: /* set up physical groups */
cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
if (!cpumask_empty(d->nodemask))
init_sched_build_groups(d->nodemask, cpu_map,
&cpu_to_phys_group,
d->send_covered, d->tmpmask);
break;
#ifdef CONFIG_NUMA
case SD_LV_ALLNODES:
init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
d->send_covered, d->tmpmask);
break;
#endif
default:
break;
}
}
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
*/
static int __build_sched_domains(const struct cpumask *cpu_map,
struct sched_domain_attr *attr)
{
enum s_alloc alloc_state = sa_none;
struct s_data d;
struct sched_domain *sd;
int i;
#ifdef CONFIG_NUMA
d.sd_allnodes = 0;
#endif
alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
if (alloc_state != sa_rootdomain)
goto error;
alloc_state = sa_sched_groups;
/*
* Set up domains for cpus specified by the cpu_map.
*/
for_each_cpu(i, cpu_map) {
cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
cpu_map);
sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
}
for_each_cpu(i, cpu_map) {
build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
build_sched_groups(&d, SD_LV_MC, cpu_map, i);
}
/* Set up physical groups */
for (i = 0; i < nr_node_ids; i++)
build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
#ifdef CONFIG_NUMA
/* Set up node groups */
if (d.sd_allnodes)
build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
for (i = 0; i < nr_node_ids; i++)
if (build_numa_sched_groups(&d, cpu_map, i))
goto error;
#endif
/* Calculate CPU power for physical packages and nodes */
#ifdef CONFIG_SCHED_SMT
for_each_cpu(i, cpu_map) {
sd = &per_cpu(cpu_domains, i).sd;
init_sched_groups_power(i, sd);
}
#endif
#ifdef CONFIG_SCHED_MC
for_each_cpu(i, cpu_map) {
sd = &per_cpu(core_domains, i).sd;
init_sched_groups_power(i, sd);
}
#endif
for_each_cpu(i, cpu_map) {
sd = &per_cpu(phys_domains, i).sd;
init_sched_groups_power(i, sd);
}
#ifdef CONFIG_NUMA
for (i = 0; i < nr_node_ids; i++)
init_numa_sched_groups_power(d.sched_group_nodes[i]);
if (d.sd_allnodes) {
struct sched_group *sg;
cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
d.tmpmask);
init_numa_sched_groups_power(sg);
}
#endif
/* Attach the domains */
for_each_cpu(i, cpu_map) {
#ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i).sd;
#elif defined(CONFIG_SCHED_MC)
sd = &per_cpu(core_domains, i).sd;
#else
sd = &per_cpu(phys_domains, i).sd;
#endif
cpu_attach_domain(sd, d.rd, i);
}
d.sched_group_nodes = NULL; /* don't free this we still need it */
__free_domain_allocs(&d, sa_tmpmask, cpu_map);
return 0;
error:
__free_domain_allocs(&d, alloc_state, cpu_map);
return -ENOMEM;
}
static int build_sched_domains(const struct cpumask *cpu_map)
{
return __build_sched_domains(cpu_map, NULL);
}
static cpumask_var_t *doms_cur; /* current sched domains */
static int ndoms_cur; /* number of sched domains in 'doms_cur' */
static struct sched_domain_attr *dattr_cur;
/* attribues of custom domains in 'doms_cur' */
/*
* Special case: If a kmalloc of a doms_cur partition (array of
* cpumask) fails, then fallback to a single sched domain,
* as determined by the single cpumask fallback_doms.
*/
static cpumask_var_t fallback_doms;
/*
* arch_update_cpu_topology lets virtualized architectures update the
* cpu core maps. It is supposed to return 1 if the topology changed
* or 0 if it stayed the same.
*/
int __attribute__((weak)) arch_update_cpu_topology(void)
{
return 0;
}
cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
{
int i;
cpumask_var_t *doms;
doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
if (!doms)
return NULL;
for (i = 0; i < ndoms; i++) {
if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
free_sched_domains(doms, i);
return NULL;
}
}
return doms;
}
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
{
unsigned int i;
for (i = 0; i < ndoms; i++)
free_cpumask_var(doms[i]);
kfree(doms);
}
/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to
* exclude other special cases in the future.
*/
static int arch_init_sched_domains(const struct cpumask *cpu_map)
{
int err;
arch_update_cpu_topology();
ndoms_cur = 1;
doms_cur = alloc_sched_domains(ndoms_cur);
if (!doms_cur)
doms_cur = &fallback_doms;
cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
dattr_cur = NULL;
err = build_sched_domains(doms_cur[0]);
register_sched_domain_sysctl();
return err;
}
static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
struct cpumask *tmpmask)
{
free_sched_groups(cpu_map, tmpmask);
}
/*
* Detach sched domains from a group of cpus specified in cpu_map
* These cpus will now be attached to the NULL domain
*/
static void detach_destroy_domains(const struct cpumask *cpu_map)
{
/* Save because hotplug lock held. */
static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
int i;
for_each_cpu(i, cpu_map)
cpu_attach_domain(NULL, &def_root_domain, i);
synchronize_sched();
arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
}
/* handle null as "default" */
static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
struct sched_domain_attr *new, int idx_new)
{
struct sched_domain_attr tmp;
/* fast path */
if (!new && !cur)
return 1;
tmp = SD_ATTR_INIT;
return !memcmp(cur ? (cur + idx_cur) : &tmp,
new ? (new + idx_new) : &tmp,
sizeof(struct sched_domain_attr));
}
/*
* Partition sched domains as specified by the 'ndoms_new'
* cpumasks in the array doms_new[] of cpumasks. This compares
* doms_new[] to the current sched domain partitioning, doms_cur[].
* It destroys each deleted domain and builds each new domain.
*
* 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
* The masks don't intersect (don't overlap.) We should setup one
* sched domain for each mask. CPUs not in any of the cpumasks will
* not be load balanced. If the same cpumask appears both in the
* current 'doms_cur' domains and in the new 'doms_new', we can leave
* it as it is.
*
* The passed in 'doms_new' should be allocated using
* alloc_sched_domains. This routine takes ownership of it and will
* free_sched_domains it when done with it. If the caller failed the
* alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
* and partition_sched_domains() will fallback to the single partition
* 'fallback_doms', it also forces the domains to be rebuilt.
*
* If doms_new == NULL it will be replaced with cpu_online_mask.
* ndoms_new == 0 is a special case for destroying existing domains,
* and it will not create the default domain.
*
* Call with hotplug lock held
*/
void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
int i, j, n;
int new_topology;
mutex_lock(&sched_domains_mutex);
/* always unregister in case we don't destroy any domains */
unregister_sched_domain_sysctl();
/* Let architecture update cpu core mappings. */
new_topology = arch_update_cpu_topology();
n = doms_new ? ndoms_new : 0;
/* Destroy deleted domains */
for (i = 0; i < ndoms_cur; i++) {
for (j = 0; j < n && !new_topology; j++) {
if (cpumask_equal(doms_cur[i], doms_new[j])
&& dattrs_equal(dattr_cur, i, dattr_new, j))
goto match1;
}
/* no match - a current sched domain not in new doms_new[] */
detach_destroy_domains(doms_cur[i]);
match1:
;
}
if (doms_new == NULL) {
ndoms_cur = 0;
doms_new = &fallback_doms;
cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
WARN_ON_ONCE(dattr_new);
}
/* Build new domains */
for (i = 0; i < ndoms_new; i++) {
for (j = 0; j < ndoms_cur && !new_topology; j++) {
if (cpumask_equal(doms_new[i], doms_cur[j])
&& dattrs_equal(dattr_new, i, dattr_cur, j))
goto match2;
}
/* no match - add a new doms_new */
__build_sched_domains(doms_new[i],
dattr_new ? dattr_new + i : NULL);
match2:
;
}
/* Remember the new sched domains */
if (doms_cur != &fallback_doms)
free_sched_domains(doms_cur, ndoms_cur);
kfree(dattr_cur); /* kfree(NULL) is safe */
doms_cur = doms_new;
dattr_cur = dattr_new;
ndoms_cur = ndoms_new;
register_sched_domain_sysctl();
mutex_unlock(&sched_domains_mutex);
}
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
static void arch_reinit_sched_domains(void)
{
get_online_cpus();
/* Destroy domains first to force the rebuild */
partition_sched_domains(0, NULL, NULL);
rebuild_sched_domains();
put_online_cpus();
}
static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
{
unsigned int level = 0;
if (sscanf(buf, "%u", &level) != 1)
return -EINVAL;
/*
* level is always be positive so don't check for
* level < POWERSAVINGS_BALANCE_NONE which is 0
* What happens on 0 or 1 byte write,
* need to check for count as well?
*/
if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
return -EINVAL;
if (smt)
sched_smt_power_savings = level;
else
sched_mc_power_savings = level;
arch_reinit_sched_domains();
return count;
}
#ifdef CONFIG_SCHED_MC
static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
struct sysdev_class_attribute *attr,
char *page)
{
return sprintf(page, "%u\n", sched_mc_power_savings);
}
static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
struct sysdev_class_attribute *attr,
const char *buf, size_t count)
{
return sched_power_savings_store(buf, count, 0);
}
static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
sched_mc_power_savings_show,
sched_mc_power_savings_store);
#endif
#ifdef CONFIG_SCHED_SMT
static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
struct sysdev_class_attribute *attr,
char *page)
{
return sprintf(page, "%u\n", sched_smt_power_savings);
}
static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
struct sysdev_class_attribute *attr,
const char *buf, size_t count)
{
return sched_power_savings_store(buf, count, 1);
}
static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
sched_smt_power_savings_show,
sched_smt_power_savings_store);
#endif
int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
{
int err = 0;
#ifdef CONFIG_SCHED_SMT
if (smt_capable())
err = sysfs_create_file(&cls->kset.kobj,
&attr_sched_smt_power_savings.attr);
#endif
#ifdef CONFIG_SCHED_MC
if (!err && mc_capable())
err = sysfs_create_file(&cls->kset.kobj,
&attr_sched_mc_power_savings.attr);
#endif
return err;
}
#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
#ifndef CONFIG_CPUSETS
/*
* Add online and remove offline CPUs from the scheduler domains.
* When cpusets are enabled they take over this function.
*/
static int update_sched_domains(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
partition_sched_domains(1, NULL, NULL);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
#endif
static int update_runtime(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
int cpu = (int)(long)hcpu;
switch (action) {
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
disable_runtime(cpu_rq(cpu));
return NOTIFY_OK;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
enable_runtime(cpu_rq(cpu));
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
void __init sched_init_smp(void)
{
cpumask_var_t non_isolated_cpus;
alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
#if defined(CONFIG_NUMA)
sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
GFP_KERNEL);
BUG_ON(sched_group_nodes_bycpu == NULL);
#endif
get_online_cpus();
mutex_lock(&sched_domains_mutex);
arch_init_sched_domains(cpu_active_mask);
cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
if (cpumask_empty(non_isolated_cpus))
cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
mutex_unlock(&sched_domains_mutex);
put_online_cpus();
#ifndef CONFIG_CPUSETS
/* XXX: Theoretical race here - CPU may be hotplugged now */
hotcpu_notifier(update_sched_domains, 0);
#endif
/* RT runtime code needs to handle some hotplug events */
hotcpu_notifier(update_runtime, 0);
init_hrtick();
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
BUG();
sched_init_granularity();
free_cpumask_var(non_isolated_cpus);
init_sched_rt_class();
}
#else
void __init sched_init_smp(void)
{
sched_init_granularity();
}
#endif /* CONFIG_SMP */
const_debug unsigned int sysctl_timer_migration = 1;
int in_sched_functions(unsigned long addr)
{
return in_lock_functions(addr) ||
(addr >= (unsigned long)__sched_text_start
&& addr < (unsigned long)__sched_text_end);
}
static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
{
cfs_rq->tasks_timeline = RB_ROOT;
INIT_LIST_HEAD(&cfs_rq->tasks);
#ifdef CONFIG_FAIR_GROUP_SCHED
cfs_rq->rq = rq;
#endif
cfs_rq->min_vruntime = (u64)(-(1LL << 20));
}
static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
{
struct rt_prio_array *array;
int i;
array = &rt_rq->active;
for (i = 0; i < MAX_RT_PRIO; i++) {
INIT_LIST_HEAD(array->queue + i);
__clear_bit(i, array->bitmap);
}
/* delimiter for bitsearch: */
__set_bit(MAX_RT_PRIO, array->bitmap);
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
rt_rq->highest_prio.curr = MAX_RT_PRIO;
#ifdef CONFIG_SMP
rt_rq->highest_prio.next = MAX_RT_PRIO;
#endif
#endif
#ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
#endif
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq->rt_nr_boosted = 0;
rt_rq->rq = rq;
#endif
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu, int add,
struct sched_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
tg->cfs_rq[cpu] = cfs_rq;
init_cfs_rq(cfs_rq, rq);
cfs_rq->tg = tg;
if (add)
list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
tg->se[cpu] = se;
/* se could be NULL for init_task_group */
if (!se)
return;
if (!parent)
se->cfs_rq = &rq->cfs;
else
se->cfs_rq = parent->my_q;
se->my_q = cfs_rq;
se->load.weight = tg->shares;
se->load.inv_weight = 0;
se->parent = parent;
}
#endif
#ifdef CONFIG_RT_GROUP_SCHED
static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
struct sched_rt_entity *rt_se, int cpu, int add,
struct sched_rt_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
tg->rt_rq[cpu] = rt_rq;
init_rt_rq(rt_rq, rq);
rt_rq->tg = tg;
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
if (add)
list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
tg->rt_se[cpu] = rt_se;
if (!rt_se)
return;
if (!parent)
rt_se->rt_rq = &rq->rt;
else
rt_se->rt_rq = parent->my_q;
rt_se->my_q = rt_rq;
rt_se->parent = parent;
INIT_LIST_HEAD(&rt_se->run_list);
}
#endif
void __init sched_init(void)
{
int i, j;
unsigned long alloc_size = 0, ptr;
#ifdef CONFIG_FAIR_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_RT_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_CPUMASK_OFFSTACK
alloc_size += num_possible_cpus() * cpumask_size();
#endif
if (alloc_size) {
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
#ifdef CONFIG_FAIR_GROUP_SCHED
init_task_group.se = (struct sched_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
init_task_group.cfs_rq = (struct cfs_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
init_task_group.rt_se = (struct sched_rt_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
init_task_group.rt_rq = (struct rt_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CPUMASK_OFFSTACK
for_each_possible_cpu(i) {
per_cpu(load_balance_tmpmask, i) = (void *)ptr;
ptr += cpumask_size();
}
#endif /* CONFIG_CPUMASK_OFFSTACK */
}
#ifdef CONFIG_SMP
init_defrootdomain();
#endif
init_rt_bandwidth(&def_rt_bandwidth,
global_rt_period(), global_rt_runtime());
#ifdef CONFIG_RT_GROUP_SCHED
init_rt_bandwidth(&init_task_group.rt_bandwidth,
global_rt_period(), global_rt_runtime());
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CGROUP_SCHED
list_add(&init_task_group.list, &task_groups);
INIT_LIST_HEAD(&init_task_group.children);
#endif /* CONFIG_CGROUP_SCHED */
#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
__alignof__(unsigned long));
#endif
for_each_possible_cpu(i) {
struct rq *rq;
rq = cpu_rq(i);
raw_spin_lock_init(&rq->lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
init_cfs_rq(&rq->cfs, rq);
init_rt_rq(&rq->rt, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
init_task_group.shares = init_task_group_load;
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
#ifdef CONFIG_CGROUP_SCHED
/*
* How much cpu bandwidth does init_task_group get?
*
* In case of task-groups formed thr' the cgroup filesystem, it
* gets 100% of the cpu resources in the system. This overall
* system cpu resource is divided among the tasks of
* init_task_group and its child task-groups in a fair manner,
* based on each entity's (task or task-group's) weight
* (se->load.weight).
*
* In other words, if init_task_group has 10 tasks of weight
* 1024) and two child groups A0 and A1 (of weight 1024 each),
* then A0's share of the cpu resource is:
*
* A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
*
* We achieve this by letting init_task_group's tasks sit
* directly in rq->cfs (i.e init_task_group->se[] = NULL).
*/
init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
#endif
#endif /* CONFIG_FAIR_GROUP_SCHED */
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
#ifdef CONFIG_RT_GROUP_SCHED
INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
#ifdef CONFIG_CGROUP_SCHED
init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
#endif
#endif
for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
rq->cpu_load[j] = 0;
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
rq->cpu_power = SCHED_LOAD_SCALE;
rq->post_schedule = 0;
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
rq->cpu = i;
rq->online = 0;
rq->idle_stamp = 0;
rq->avg_idle = 2*sysctl_sched_migration_cost;
rq_attach_root(rq, &def_root_domain);
#endif
init_rq_hrtick(rq);
atomic_set(&rq->nr_iowait, 0);
}
set_load_weight(&init_task);
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&init_task.preempt_notifiers);
#endif
#ifdef CONFIG_SMP
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
#endif
#ifdef CONFIG_RT_MUTEXES
plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
#endif
/*
* The boot idle thread does lazy MMU switching as well:
*/
atomic_inc(&init_mm.mm_count);
enter_lazy_tlb(&init_mm, current);
/*
* Make us the idle thread. Technically, schedule() should not be
* called from this thread, however somewhere below it might be,
* but because we are the idle thread, we just pick up running again
* when this runqueue becomes "idle".
*/
init_idle(current, smp_processor_id());
calc_load_update = jiffies + LOAD_FREQ;
/*
* During early bootup we pretend to be a normal task:
*/
current->sched_class = &fair_sched_class;
/* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
#ifdef CONFIG_SMP
#ifdef CONFIG_NO_HZ
zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
#endif
/* May be allocated at isolcpus cmdline parse time */
if (cpu_isolated_map == NULL)
zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
#endif /* SMP */
perf_event_init();
scheduler_running = 1;
}
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
}
static int __might_sleep_init_called;
int __init __might_sleep_init(void)
{
__might_sleep_init_called = 1;
return 0;
}
early_initcall(__might_sleep_init);
void __might_sleep(const char *file, int line, int preempt_offset)
{
#ifdef in_atomic
static unsigned long prev_jiffy; /* ratelimiting */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
oops_in_progress)
return;
if (system_state != SYSTEM_RUNNING &&
(!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
prev_jiffy = jiffies;
printk(KERN_ERR
"BUG: sleeping function called from invalid context at %s:%d\n",
file, line);
printk(KERN_ERR
"in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
in_atomic(), irqs_disabled(),
current->pid, current->comm);
debug_show_held_locks(current);
if (irqs_disabled())
print_irqtrace_events(current);
dump_stack();
#endif
}
EXPORT_SYMBOL(__might_sleep);
#endif
#ifdef CONFIG_MAGIC_SYSRQ
static void normalize_task(struct rq *rq, struct task_struct *p)
{
int on_rq;
on_rq = p->se.on_rq;
if (on_rq)
deactivate_task(rq, p, 0);
__setscheduler(rq, p, SCHED_NORMAL, 0);
if (on_rq) {
activate_task(rq, p, 0);
resched_task(rq->curr);
}
}
void normalize_rt_tasks(void)
{
struct task_struct *g, *p;
unsigned long flags;
struct rq *rq;
read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, p) {
/*
* Only normalize user tasks:
*/
if (!p->mm)
continue;
p->se.exec_start = 0;
#ifdef CONFIG_SCHEDSTATS
p->se.statistics.wait_start = 0;
p->se.statistics.sleep_start = 0;
p->se.statistics.block_start = 0;
#endif
if (!rt_task(p)) {
/*
* Renice negative nice level userspace
* tasks back to 0:
*/
if (TASK_NICE(p) < 0 && p->mm)
set_user_nice(p, 0);
continue;
}
raw_spin_lock(&p->pi_lock);
rq = __task_rq_lock(p);
normalize_task(rq, p);
__task_rq_unlock(rq);
raw_spin_unlock(&p->pi_lock);
} while_each_thread(g, p);
read_unlock_irqrestore(&tasklist_lock, flags);
}
#endif /* CONFIG_MAGIC_SYSRQ */
#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
/*
* These functions are only useful for the IA64 MCA handling, or kdb.
*
* They can only be called when the whole system has been
* stopped - every CPU needs to be quiescent, and no scheduling
* activity can take place. Using them for anything else would
* be a serious bug, and as a result, they aren't even visible
* under any other configuration.
*/
/**
* curr_task - return the current task for a given cpu.
* @cpu: the processor in question.
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
*/
struct task_struct *curr_task(int cpu)
{
return cpu_curr(cpu);
}
#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
#ifdef CONFIG_IA64
/**
* set_curr_task - set the current task for a given cpu.
* @cpu: the processor in question.
* @p: the task pointer to set.
*
* Description: This function must only be used when non-maskable interrupts
* are serviced on a separate stack. It allows the architecture to switch the
* notion of the current task on a cpu in a non-blocking manner. This function
* must be called with all CPU's synchronized, and interrupts disabled, the
* and caller must save the original value of the current task (see
* curr_task() above) and restore that value before reenabling interrupts and
* re-starting the system.
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
*/
void set_curr_task(int cpu, struct task_struct *p)
{
cpu_curr(cpu) = p;
}
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
static void free_fair_sched_group(struct task_group *tg)
{
int i;
for_each_possible_cpu(i) {
if (tg->cfs_rq)
kfree(tg->cfs_rq[i]);
if (tg->se)
kfree(tg->se[i]);
}
kfree(tg->cfs_rq);
kfree(tg->se);
}
static
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se;
struct rq *rq;
int i;
tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
if (!tg->cfs_rq)
goto err;
tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
if (!tg->se)
goto err;
tg->shares = NICE_0_LOAD;
for_each_possible_cpu(i) {
rq = cpu_rq(i);
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
GFP_KERNEL, cpu_to_node(i));
if (!cfs_rq)
goto err;
se = kzalloc_node(sizeof(struct sched_entity),
GFP_KERNEL, cpu_to_node(i));
if (!se)
goto err_free_rq;
init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
}
return 1;
err_free_rq:
kfree(cfs_rq);
err:
return 0;
}
static inline void register_fair_sched_group(struct task_group *tg, int cpu)
{
list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
&cpu_rq(cpu)->leaf_cfs_rq_list);
}
static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
{
list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
}
#else /* !CONFG_FAIR_GROUP_SCHED */
static inline void free_fair_sched_group(struct task_group *tg)
{
}
static inline
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
return 1;
}
static inline void register_fair_sched_group(struct task_group *tg, int cpu)
{
}
static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
{
}
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static void free_rt_sched_group(struct task_group *tg)
{
int i;
destroy_rt_bandwidth(&tg->rt_bandwidth);
for_each_possible_cpu(i) {
if (tg->rt_rq)
kfree(tg->rt_rq[i]);
if (tg->rt_se)
kfree(tg->rt_se[i]);
}
kfree(tg->rt_rq);
kfree(tg->rt_se);
}
static
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
struct rt_rq *rt_rq;
struct sched_rt_entity *rt_se;
struct rq *rq;
int i;
tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
if (!tg->rt_rq)
goto err;
tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
if (!tg->rt_se)
goto err;
init_rt_bandwidth(&tg->rt_bandwidth,
ktime_to_ns(def_rt_bandwidth.rt_period), 0);
for_each_possible_cpu(i) {
rq = cpu_rq(i);
rt_rq = kzalloc_node(sizeof(struct rt_rq),
GFP_KERNEL, cpu_to_node(i));
if (!rt_rq)
goto err;
rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
GFP_KERNEL, cpu_to_node(i));
if (!rt_se)
goto err_free_rq;
init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
}
return 1;
err_free_rq:
kfree(rt_rq);
err:
return 0;
}
static inline void register_rt_sched_group(struct task_group *tg, int cpu)
{
list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
&cpu_rq(cpu)->leaf_rt_rq_list);
}
static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
{
list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
}
#else /* !CONFIG_RT_GROUP_SCHED */
static inline void free_rt_sched_group(struct task_group *tg)
{
}
static inline
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
return 1;
}
static inline void register_rt_sched_group(struct task_group *tg, int cpu)
{
}
static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
{
}
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CGROUP_SCHED
static void free_sched_group(struct task_group *tg)
{
free_fair_sched_group(tg);
free_rt_sched_group(tg);
kfree(tg);
}
/* allocate runqueue etc for a new task group */
struct task_group *sched_create_group(struct task_group *parent)
{
struct task_group *tg;
unsigned long flags;
int i;
tg = kzalloc(sizeof(*tg), GFP_KERNEL);
if (!tg)
return ERR_PTR(-ENOMEM);
if (!alloc_fair_sched_group(tg, parent))
goto err;
if (!alloc_rt_sched_group(tg, parent))
goto err;
spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i) {
register_fair_sched_group(tg, i);
register_rt_sched_group(tg, i);
}
list_add_rcu(&tg->list, &task_groups);
WARN_ON(!parent); /* root should already exist */
tg->parent = parent;
INIT_LIST_HEAD(&tg->children);
list_add_rcu(&tg->siblings, &parent->children);
spin_unlock_irqrestore(&task_group_lock, flags);
return tg;
err:
free_sched_group(tg);
return ERR_PTR(-ENOMEM);
}
/* rcu callback to free various structures associated with a task group */
static void free_sched_group_rcu(struct rcu_head *rhp)
{
/* now it should be safe to free those cfs_rqs */
free_sched_group(container_of(rhp, struct task_group, rcu));
}
/* Destroy runqueue etc associated with a task group */
void sched_destroy_group(struct task_group *tg)
{
unsigned long flags;
int i;
spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i) {
unregister_fair_sched_group(tg, i);
unregister_rt_sched_group(tg, i);
}
list_del_rcu(&tg->list);
list_del_rcu(&tg->siblings);
spin_unlock_irqrestore(&task_group_lock, flags);
/* wait for possible concurrent references to cfs_rqs complete */
call_rcu(&tg->rcu, free_sched_group_rcu);
}
/* change task's runqueue when it moves between groups.
* The caller of this function should have put the task in its new group
* by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
* reflect its new group.
*/
void sched_move_task(struct task_struct *tsk)
{
int on_rq, running;
unsigned long flags;
struct rq *rq;
rq = task_rq_lock(tsk, &flags);
running = task_current(rq, tsk);
on_rq = tsk->se.on_rq;
if (on_rq)
dequeue_task(rq, tsk, 0);
if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk);
#ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->prep_move_group)
tsk->sched_class->prep_move_group(tsk, on_rq);
#endif
set_task_rq(tsk, task_cpu(tsk));
#ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->moved_group)
tsk->sched_class->moved_group(tsk, on_rq);
#endif
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
if (on_rq)
enqueue_task(rq, tsk, 0);
task_rq_unlock(rq, &flags);
}
#endif /* CONFIG_CGROUP_SCHED */
#ifdef CONFIG_FAIR_GROUP_SCHED
static void __set_se_shares(struct sched_entity *se, unsigned long shares)
{
struct cfs_rq *cfs_rq = se->cfs_rq;
int on_rq;
on_rq = se->on_rq;
if (on_rq)
dequeue_entity(cfs_rq, se, 0);
se->load.weight = shares;
se->load.inv_weight = 0;
if (on_rq)
enqueue_entity(cfs_rq, se, 0);
}
static void set_se_shares(struct sched_entity *se, unsigned long shares)
{
struct cfs_rq *cfs_rq = se->cfs_rq;
struct rq *rq = cfs_rq->rq;
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
__set_se_shares(se, shares);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static DEFINE_MUTEX(shares_mutex);
int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int i;
unsigned long flags;
/*
* We can't change the weight of the root cgroup.
*/
if (!tg->se[0])
return -EINVAL;
if (shares < MIN_SHARES)
shares = MIN_SHARES;
else if (shares > MAX_SHARES)
shares = MAX_SHARES;
mutex_lock(&shares_mutex);
if (tg->shares == shares)
goto done;
spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i)
unregister_fair_sched_group(tg, i);
list_del_rcu(&tg->siblings);
spin_unlock_irqrestore(&task_group_lock, flags);
/* wait for any ongoing reference to this group to finish */
synchronize_sched();
/*
* Now we are free to modify the group's share on each cpu
* w/o tripping rebalance_share or load_balance_fair.
*/
tg->shares = shares;
for_each_possible_cpu(i) {
/*
* force a rebalance
*/
cfs_rq_set_shares(tg->cfs_rq[i], 0);
set_se_shares(tg->se[i], shares);
}
/*
* Enable load balance activity on this group, by inserting it back on
* each cpu's rq->leaf_cfs_rq_list.
*/
spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i)
register_fair_sched_group(tg, i);
list_add_rcu(&tg->siblings, &tg->parent->children);
spin_unlock_irqrestore(&task_group_lock, flags);
done:
mutex_unlock(&shares_mutex);
return 0;
}
unsigned long sched_group_shares(struct task_group *tg)
{
return tg->shares;
}
#endif
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Ensure that the real time constraints are schedulable.
*/
static DEFINE_MUTEX(rt_constraints_mutex);
static unsigned long to_ratio(u64 period, u64 runtime)
{
if (runtime == RUNTIME_INF)
return 1ULL << 20;
return div64_u64(runtime << 20, period);
}
/* Must be called with tasklist_lock held */
static inline int tg_has_rt_tasks(struct task_group *tg)
{
struct task_struct *g, *p;
do_each_thread(g, p) {
if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
return 1;
} while_each_thread(g, p);
return 0;
}
struct rt_schedulable_data {
struct task_group *tg;
u64 rt_period;
u64 rt_runtime;
};
static int tg_schedulable(struct task_group *tg, void *data)
{
struct rt_schedulable_data *d = data;
struct task_group *child;
unsigned long total, sum = 0;
u64 period, runtime;
period = ktime_to_ns(tg->rt_bandwidth.rt_period);
runtime = tg->rt_bandwidth.rt_runtime;
if (tg == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}
/*
* Cannot have more runtime than the period.
*/
if (runtime > period && runtime != RUNTIME_INF)
return -EINVAL;
/*
* Ensure we don't starve existing RT tasks.
*/
if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
return -EBUSY;
total = to_ratio(period, runtime);
/*
* Nobody can have more than the global setting allows.
*/
if (total > to_ratio(global_rt_period(), global_rt_runtime()))
return -EINVAL;
/*
* The sum of our children's runtime should not exceed our own.
*/
list_for_each_entry_rcu(child, &tg->children, siblings) {
period = ktime_to_ns(child->rt_bandwidth.rt_period);
runtime = child->rt_bandwidth.rt_runtime;
if (child == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}
sum += to_ratio(period, runtime);
}
if (sum > total)
return -EINVAL;
return 0;
}
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
struct rt_schedulable_data data = {
.tg = tg,
.rt_period = period,
.rt_runtime = runtime,
};
return walk_tg_tree(tg_schedulable, tg_nop, &data);
}
static int tg_set_bandwidth(struct task_group *tg,
u64 rt_period, u64 rt_runtime)
{
int i, err = 0;
mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
err = __rt_schedulable(tg, rt_period, rt_runtime);
if (err)
goto unlock;
raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
return err;
}
int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
{
u64 rt_runtime, rt_period;
rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
if (rt_runtime_us < 0)
rt_runtime = RUNTIME_INF;
return tg_set_bandwidth(tg, rt_period, rt_runtime);
}
long sched_group_rt_runtime(struct task_group *tg)
{
u64 rt_runtime_us;
if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
return -1;
rt_runtime_us = tg->rt_bandwidth.rt_runtime;
do_div(rt_runtime_us, NSEC_PER_USEC);
return rt_runtime_us;
}
int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
{
u64 rt_runtime, rt_period;
rt_period = (u64)rt_period_us * NSEC_PER_USEC;
rt_runtime = tg->rt_bandwidth.rt_runtime;
if (rt_period == 0)
return -EINVAL;
return tg_set_bandwidth(tg, rt_period, rt_runtime);
}
long sched_group_rt_period(struct task_group *tg)
{
u64 rt_period_us;
rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
do_div(rt_period_us, NSEC_PER_USEC);
return rt_period_us;
}
static int sched_rt_global_constraints(void)
{
u64 runtime, period;
int ret = 0;
if (sysctl_sched_rt_period <= 0)
return -EINVAL;
runtime = global_rt_runtime();
period = global_rt_period();
/*
* Sanity check on the sysctl variables.
*/
if (runtime > period && runtime != RUNTIME_INF)
return -EINVAL;
mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
ret = __rt_schedulable(NULL, 0, 0);
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
return ret;
}
int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
{
/* Don't accept realtime tasks when there is no way for them to run */
if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
return 0;
return 1;
}
#else /* !CONFIG_RT_GROUP_SCHED */
static int sched_rt_global_constraints(void)
{
unsigned long flags;
int i;
if (sysctl_sched_rt_period <= 0)
return -EINVAL;
/*
* There's always some RT tasks in the root group
* -- migration, kstopmachine etc..
*/
if (sysctl_sched_rt_runtime == 0)
return -EBUSY;
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0;
}
#endif /* CONFIG_RT_GROUP_SCHED */
int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
int old_period, old_runtime;
static DEFINE_MUTEX(mutex);
mutex_lock(&mutex);
old_period = sysctl_sched_rt_period;
old_runtime = sysctl_sched_rt_runtime;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write) {
ret = sched_rt_global_constraints();
if (ret) {
sysctl_sched_rt_period = old_period;
sysctl_sched_rt_runtime = old_runtime;
} else {
def_rt_bandwidth.rt_runtime = global_rt_runtime();
def_rt_bandwidth.rt_period =
ns_to_ktime(global_rt_period());
}
}
mutex_unlock(&mutex);
return ret;
}
#ifdef CONFIG_CGROUP_SCHED
/* return corresponding task_group object of a cgroup */
static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
{
return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
struct task_group, css);
}
static struct cgroup_subsys_state *
cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct task_group *tg, *parent;
if (!cgrp->parent) {
/* This is early initialization for the top cgroup */
return &init_task_group.css;
}
parent = cgroup_tg(cgrp->parent);
tg = sched_create_group(parent);
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
return &tg->css;
}
static void
cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct task_group *tg = cgroup_tg(cgrp);
sched_destroy_group(tg);
}
static int
cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
if ((current != tsk) && (!capable(CAP_SYS_NICE))) {
const struct cred *cred = current_cred(), *tcred;
tcred = __task_cred(tsk);
if (cred->euid != tcred->uid && cred->euid != tcred->suid)
return -EPERM;
}
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
return -EINVAL;
#else
/* We don't support RT-tasks being in separate groups */
if (tsk->sched_class != &fair_sched_class)
return -EINVAL;
#endif
return 0;
}
static int
cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *tsk, bool threadgroup)
{
int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
if (retval)
return retval;
if (threadgroup) {
struct task_struct *c;
rcu_read_lock();
list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
retval = cpu_cgroup_can_attach_task(cgrp, c);
if (retval) {
rcu_read_unlock();
return retval;
}
}
rcu_read_unlock();
}
return 0;
}
static void
cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct cgroup *old_cont, struct task_struct *tsk,
bool threadgroup)
{
sched_move_task(tsk);
if (threadgroup) {
struct task_struct *c;
rcu_read_lock();
list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
sched_move_task(c);
}
rcu_read_unlock();
}
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
u64 shareval)
{
return sched_group_set_shares(cgroup_tg(cgrp), shareval);
}
static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
{
struct task_group *tg = cgroup_tg(cgrp);
return (u64) tg->shares;
}
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
s64 val)
{
return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
}
static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
{
return sched_group_rt_runtime(cgroup_tg(cgrp));
}
static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
u64 rt_period_us)
{
return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
}
static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
{
return sched_group_rt_period(cgroup_tg(cgrp));
}
#endif /* CONFIG_RT_GROUP_SCHED */
static struct cftype cpu_files[] = {
#ifdef CONFIG_FAIR_GROUP_SCHED
{
.name = "shares",
.read_u64 = cpu_shares_read_u64,
.write_u64 = cpu_shares_write_u64,
},
#endif
#ifdef CONFIG_RT_GROUP_SCHED
{
.name = "rt_runtime_us",
.read_s64 = cpu_rt_runtime_read,
.write_s64 = cpu_rt_runtime_write,
},
{
.name = "rt_period_us",
.read_u64 = cpu_rt_period_read_uint,
.write_u64 = cpu_rt_period_write_uint,
},
#endif
};
static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
{
return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
}
struct cgroup_subsys cpu_cgroup_subsys = {
.name = "cpu",
.create = cpu_cgroup_create,
.destroy = cpu_cgroup_destroy,
.can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach,
.populate = cpu_cgroup_populate,
.subsys_id = cpu_cgroup_subsys_id,
.early_init = 1,
};
#endif /* CONFIG_CGROUP_SCHED */
#ifdef CONFIG_CGROUP_CPUACCT
/*
* CPU accounting code for task groups.
*
* Based on the work by Paul Menage ([email protected]) and Balbir Singh
* ([email protected]).
*/
/* track cpu usage of a group of tasks and its child groups */
struct cpuacct {
struct cgroup_subsys_state css;
/* cpuusage holds pointer to a u64-type object on every cpu */
u64 __percpu *cpuusage;
struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
struct cpuacct *parent;
struct cpuacct_charge_calls *cpufreq_fn;
void *cpuacct_data;
};
static struct cpuacct *cpuacct_root;
/* Default calls for cpufreq accounting */
static struct cpuacct_charge_calls *cpuacct_cpufreq;
int cpuacct_register_cpufreq(struct cpuacct_charge_calls *fn)
{
cpuacct_cpufreq = fn;
/*
* Root node is created before platform can register callbacks,
* initalize here.
*/
if (cpuacct_root && fn) {
cpuacct_root->cpufreq_fn = fn;
if (fn->init)
fn->init(&cpuacct_root->cpuacct_data);
}
return 0;
}
struct cgroup_subsys cpuacct_subsys;
/* return cpu accounting group corresponding to this container */
static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
{
return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
struct cpuacct, css);
}
/* return cpu accounting group to which this task belongs */
static inline struct cpuacct *task_ca(struct task_struct *tsk)
{
return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
struct cpuacct, css);
}
/* create a new cpu accounting group */
static struct cgroup_subsys_state *cpuacct_create(
struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
int i;
if (!ca)
goto out;
ca->cpuusage = alloc_percpu(u64);
if (!ca->cpuusage)
goto out_free_ca;
for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
if (percpu_counter_init(&ca->cpustat[i], 0))
goto out_free_counters;
ca->cpufreq_fn = cpuacct_cpufreq;
/* If available, have platform code initalize cpu frequency table */
if (ca->cpufreq_fn && ca->cpufreq_fn->init)
ca->cpufreq_fn->init(&ca->cpuacct_data);
if (cgrp->parent)
ca->parent = cgroup_ca(cgrp->parent);
else
cpuacct_root = ca;
return &ca->css;
out_free_counters:
while (--i >= 0)
percpu_counter_destroy(&ca->cpustat[i]);
free_percpu(ca->cpuusage);
out_free_ca:
kfree(ca);
out:
return ERR_PTR(-ENOMEM);
}
/* destroy an existing cpu accounting group */
static void
cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct cpuacct *ca = cgroup_ca(cgrp);
int i;
for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
percpu_counter_destroy(&ca->cpustat[i]);
free_percpu(ca->cpuusage);
kfree(ca);
}
static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
{
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
u64 data;
#ifndef CONFIG_64BIT
/*
* Take rq->lock to make 64-bit read safe on 32-bit platforms.
*/
raw_spin_lock_irq(&cpu_rq(cpu)->lock);
data = *cpuusage;
raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
data = *cpuusage;
#endif
return data;
}
static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
{
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
#ifndef CONFIG_64BIT
/*
* Take rq->lock to make 64-bit write safe on 32-bit platforms.
*/
raw_spin_lock_irq(&cpu_rq(cpu)->lock);
*cpuusage = val;
raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
*cpuusage = val;
#endif
}
/* return total cpu usage (in nanoseconds) of a group */
static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
{
struct cpuacct *ca = cgroup_ca(cgrp);
u64 totalcpuusage = 0;
int i;
for_each_present_cpu(i)
totalcpuusage += cpuacct_cpuusage_read(ca, i);
return totalcpuusage;
}
static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
u64 reset)
{
struct cpuacct *ca = cgroup_ca(cgrp);
int err = 0;
int i;
if (reset) {
err = -EINVAL;
goto out;
}
for_each_present_cpu(i)
cpuacct_cpuusage_write(ca, i, 0);
out:
return err;
}
static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
struct seq_file *m)
{
struct cpuacct *ca = cgroup_ca(cgroup);
u64 percpu;
int i;
for_each_present_cpu(i) {
percpu = cpuacct_cpuusage_read(ca, i);
seq_printf(m, "%llu ", (unsigned long long) percpu);
}
seq_printf(m, "\n");
return 0;
}
static const char *cpuacct_stat_desc[] = {
[CPUACCT_STAT_USER] = "user",
[CPUACCT_STAT_SYSTEM] = "system",
};
static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
struct cgroup_map_cb *cb)
{
struct cpuacct *ca = cgroup_ca(cgrp);
int i;
for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
s64 val = percpu_counter_read(&ca->cpustat[i]);
val = cputime64_to_clock_t(val);
cb->fill(cb, cpuacct_stat_desc[i], val);
}
return 0;
}
static int cpuacct_cpufreq_show(struct cgroup *cgrp, struct cftype *cft,
struct cgroup_map_cb *cb)
{
struct cpuacct *ca = cgroup_ca(cgrp);
if (ca->cpufreq_fn && ca->cpufreq_fn->cpufreq_show)
ca->cpufreq_fn->cpufreq_show(ca->cpuacct_data, cb);
return 0;
}
/* return total cpu power usage (milliWatt second) of a group */
static u64 cpuacct_powerusage_read(struct cgroup *cgrp, struct cftype *cft)
{
int i;
struct cpuacct *ca = cgroup_ca(cgrp);
u64 totalpower = 0;
if (ca->cpufreq_fn && ca->cpufreq_fn->power_usage)
for_each_present_cpu(i) {
totalpower += ca->cpufreq_fn->power_usage(
ca->cpuacct_data);
}
return totalpower;
}
static struct cftype files[] = {
{
.name = "usage",
.read_u64 = cpuusage_read,
.write_u64 = cpuusage_write,
},
{
.name = "usage_percpu",
.read_seq_string = cpuacct_percpu_seq_read,
},
{
.name = "stat",
.read_map = cpuacct_stats_show,
},
{
.name = "cpufreq",
.read_map = cpuacct_cpufreq_show,
},
{
.name = "power",
.read_u64 = cpuacct_powerusage_read
},
};
static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
}
/*
* charge this task's execution time to its accounting group.
*
* called with rq->lock held.
*/
static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
{
struct cpuacct *ca;
int cpu;
if (unlikely(!cpuacct_subsys.active))
return;
cpu = task_cpu(tsk);
rcu_read_lock();
ca = task_ca(tsk);
for (; ca; ca = ca->parent) {
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
*cpuusage += cputime;
/* Call back into platform code to account for CPU speeds */
if (ca->cpufreq_fn && ca->cpufreq_fn->charge)
ca->cpufreq_fn->charge(ca->cpuacct_data, cputime, cpu);
}
rcu_read_unlock();
}
/*
* When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
* in cputime_t units. As a result, cpuacct_update_stats calls
* percpu_counter_add with values large enough to always overflow the
* per cpu batch limit causing bad SMP scalability.
*
* To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
* batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
* and enabled. We cap it at INT_MAX which is the largest allowed batch value.
*/
#ifdef CONFIG_SMP
#define CPUACCT_BATCH \
min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
#else
#define CPUACCT_BATCH 0
#endif
/*
* Charge the system/user time to the task's accounting group.
*/
static void cpuacct_update_stats(struct task_struct *tsk,
enum cpuacct_stat_index idx, cputime_t val)
{
struct cpuacct *ca;
int batch = CPUACCT_BATCH;
if (unlikely(!cpuacct_subsys.active))
return;
rcu_read_lock();
ca = task_ca(tsk);
do {
__percpu_counter_add(&ca->cpustat[idx], val, batch);
ca = ca->parent;
} while (ca);
rcu_read_unlock();
}
struct cgroup_subsys cpuacct_subsys = {
.name = "cpuacct",
.create = cpuacct_create,
.destroy = cpuacct_destroy,
.populate = cpuacct_populate,
.subsys_id = cpuacct_subsys_id,
};
#endif /* CONFIG_CGROUP_CPUACCT */
#ifndef CONFIG_SMP
void synchronize_sched_expedited(void)
{
barrier();
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
#else /* #ifndef CONFIG_SMP */
static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
static int synchronize_sched_expedited_cpu_stop(void *data)
{
/*
* There must be a full memory barrier on each affected CPU
* between the time that try_stop_cpus() is called and the
* time that it returns.
*
* In the current initial implementation of cpu_stop, the
* above condition is already met when the control reaches
* this point and the following smp_mb() is not strictly
* necessary. Do smp_mb() anyway for documentation and
* robustness against future implementation changes.
*/
smp_mb(); /* See above comment block. */
return 0;
}
/*
* Wait for an rcu-sched grace period to elapse, but use "big hammer"
* approach to force grace period to end quickly. This consumes
* significant time on all CPUs, and is thus not recommended for
* any sort of common-case code.
*
* Note that it is illegal to call this function while holding any
* lock that is acquired by a CPU-hotplug notifier. Failing to
* observe this restriction will result in deadlock.
*/
void synchronize_sched_expedited(void)
{
int snap, trycount = 0;
smp_mb(); /* ensure prior mod happens before capturing snap. */
snap = atomic_read(&synchronize_sched_expedited_count) + 1;
get_online_cpus();
while (try_stop_cpus(cpu_online_mask,
synchronize_sched_expedited_cpu_stop,
NULL) == -EAGAIN) {
put_online_cpus();
if (trycount++ < 10)
udelay(trycount * num_online_cpus());
else {
synchronize_sched();
return;
}
if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
smp_mb(); /* ensure test happens before caller kfree */
return;
}
get_online_cpus();
}
atomic_inc(&synchronize_sched_expedited_count);
smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
put_online_cpus();
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
#endif /* #else #ifndef CONFIG_SMP */
| gpl-2.0 |
ajf8/transmission-remote-gtk | src/util.c | 15717 | /*
* transmission-remote-gtk - A GTK RPC client to Transmission
* Copyright (C) 2011-2013 Alan Fitton
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* Many of these functions are taken from the Transmission Project. */
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <limits.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <glib/gi18n.h>
#include <glib-object.h>
#include <curl/curl.h>
#include <json-glib/json-glib.h>
#include <glib.h>
#include <glib/gprintf.h>
#include <gtk/gtk.h>
#include "util.h"
/***
**** The code for formatting size and speeds, taken from Transmission.
***/
const int disk_K = 1024;
const char *disk_K_str = N_("KiB");
const char *disk_M_str = N_("MiB");
const char *disk_G_str = N_("GiB");
const char *disk_T_str = N_("TiB");
const int speed_K = 1024;
const char *speed_K_str = N_("KiB/s");
const char *speed_M_str = N_("MiB/s");
const char *speed_G_str = N_("GiB/s");
const char *speed_T_str = N_("TiB/s");
struct formatter_unit {
char *name;
gint64 value;
};
struct formatter_units {
struct formatter_unit units[4];
};
enum { TR_FMT_KB, TR_FMT_MB, TR_FMT_GB, TR_FMT_TB };
static void
formatter_init(struct formatter_units *units,
unsigned int kilo,
const char *kb, const char *mb,
const char *gb, const char *tb)
{
guint64 value = kilo;
units->units[TR_FMT_KB].name = g_strdup(kb);
units->units[TR_FMT_KB].value = value;
value *= kilo;
units->units[TR_FMT_MB].name = g_strdup(mb);
units->units[TR_FMT_MB].value = value;
value *= kilo;
units->units[TR_FMT_GB].name = g_strdup(gb);
units->units[TR_FMT_GB].value = value;
value *= kilo;
units->units[TR_FMT_TB].name = g_strdup(tb);
units->units[TR_FMT_TB].value = value;
}
static char *formatter_get_size_str(const struct formatter_units *u,
char *buf, gint64 bytes, size_t buflen)
{
int precision;
double value;
const char *units;
const struct formatter_unit *unit;
if (bytes < u->units[1].value)
unit = &u->units[0];
else if (bytes < u->units[2].value)
unit = &u->units[1];
else if (bytes < u->units[3].value)
unit = &u->units[2];
else
unit = &u->units[3];
value = (double) bytes / unit->value;
units = unit->name;
if (unit->value == 1)
precision = 0;
else if (value < 100)
precision = 2;
else
precision = 1;
g_snprintf(buf, buflen, "%.*f %s", precision, value, units);
return buf;
}
static struct formatter_units size_units;
void
tr_formatter_size_init(unsigned int kilo,
const char *kb, const char *mb,
const char *gb, const char *tb)
{
formatter_init(&size_units, kilo, kb, mb, gb, tb);
}
char *tr_formatter_size_B(char *buf, gint64 bytes, size_t buflen)
{
return formatter_get_size_str(&size_units, buf, bytes, buflen);
}
static struct formatter_units speed_units;
unsigned int tr_speed_K = 0u;
void
tr_formatter_speed_init(unsigned int kilo,
const char *kb, const char *mb,
const char *gb, const char *tb)
{
tr_speed_K = kilo;
formatter_init(&speed_units, kilo, kb, mb, gb, tb);
}
char *tr_formatter_speed_KBps(char *buf, double KBps, size_t buflen)
{
const double K = speed_units.units[TR_FMT_KB].value;
double speed = KBps;
if (speed <= 999.95) /* 0.0 KB to 999.9 KB */
g_snprintf(buf, buflen, "%d %s", (int) speed,
speed_units.units[TR_FMT_KB].name);
else {
speed /= K;
if (speed <= 99.995) /* 0.98 MB to 99.99 MB */
g_snprintf(buf, buflen, "%.2f %s", speed,
speed_units.units[TR_FMT_MB].name);
else if (speed <= 999.95) /* 100.0 MB to 999.9 MB */
g_snprintf(buf, buflen, "%.1f %s", speed,
speed_units.units[TR_FMT_MB].name);
else {
speed /= K;
g_snprintf(buf, buflen, "%.1f %s", speed,
speed_units.units[TR_FMT_GB].name);
}
}
return buf;
}
/* URL checkers. */
gboolean is_magnet(const gchar * string)
{
return g_str_has_prefix(string, "magnet:");
}
gboolean is_url(const gchar * string)
{
/* return g_regex_match_simple ("^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?", string, 0, 0); */
return g_regex_match_simple("^http[s]?://", string, 0, 0);
}
/*
* Glib-ish Utility functions.
*/
gchar *trg_base64encode(const gchar * filename)
{
GError *error = NULL;
GMappedFile *mf = g_mapped_file_new(filename, FALSE, &error);
gchar *b64out = NULL;
if (error) {
g_error("%s", error->message);
g_error_free(error);
} else {
b64out =
g_base64_encode((guchar *) g_mapped_file_get_contents(mf),
g_mapped_file_get_length(mf));
}
g_mapped_file_unref(mf);
return b64out;
}
gchar *trg_gregex_get_first(GRegex * rx, const gchar * src)
{
GMatchInfo *mi = NULL;
gchar *dst = NULL;
g_regex_match(rx, src, 0, &mi);
if (mi) {
dst = g_match_info_fetch(mi, 1);
g_match_info_free(mi);
}
return dst;
}
GRegex *trg_uri_host_regex_new(void)
{
return
g_regex_new
("^[^:/?#]+:?//(?:www\\.|torrent\\.|torrents\\.|tracker\\.|\\d+\\.)?([^/?#:]*)",
G_REGEX_OPTIMIZE, 0, NULL);
}
void g_str_slist_free(GSList * list)
{
g_slist_foreach(list, (GFunc) g_free, NULL);
g_slist_free(list);
}
void rm_trailing_slashes(gchar * str)
{
int i, len;
if (!str)
return;
if ((len = strlen(str)) < 1)
return;
for (i = strlen(str) - 1; str[i]; i--) {
if (str[i] == '/')
str[i] = '\0';
else
return;
}
}
/* Working with torrents.. */
void add_file_id_to_array(JsonObject * args, const gchar * key, gint index)
{
JsonArray *array;
if (json_object_has_member(args, key)) {
array = json_object_get_array_member(args, key);
} else {
array = json_array_new();
json_object_set_array_member(args, key, array);
}
json_array_add_int_element(array, index);
}
/* GTK utilities. */
GtkWidget *gtr_combo_box_new_enum(const char *text_1, ...)
{
GtkWidget *w;
GtkCellRenderer *r;
GtkListStore *store;
va_list vl;
const char *text;
va_start(vl, text_1);
store = gtk_list_store_new(2, G_TYPE_INT, G_TYPE_STRING);
text = text_1;
if (text != NULL)
do {
const int val = va_arg(vl, int);
gtk_list_store_insert_with_values(store, NULL, INT_MAX, 0, val,
1, text, -1);
text = va_arg(vl, const char *);
}
while (text != NULL);
w = gtk_combo_box_new_with_model(GTK_TREE_MODEL(store));
r = gtk_cell_renderer_text_new();
gtk_cell_layout_pack_start(GTK_CELL_LAYOUT(w), r, TRUE);
gtk_cell_layout_set_attributes(GTK_CELL_LAYOUT(w), r, "text", 1, NULL);
/* cleanup */
g_object_unref(store);
return w;
}
GtkWidget *my_scrolledwin_new(GtkWidget * child)
{
GtkWidget *scrolled_win = gtk_scrolled_window_new(NULL, NULL);
gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_win),
GTK_POLICY_AUTOMATIC,
GTK_POLICY_AUTOMATIC);
gtk_scrolled_window_set_shadow_type (GTK_SCROLLED_WINDOW(scrolled_win),
GTK_SHADOW_ETCHED_IN);
gtk_container_add(GTK_CONTAINER(scrolled_win), child);
return scrolled_win;
}
/* gtk_widget_set_sensitive() was introduced in 2.18, we can have a minimum of
* 2.16 otherwise. */
void trg_widget_set_visible(GtkWidget * w, gboolean visible)
{
if (visible)
gtk_widget_show(w);
else
gtk_widget_hide(w);
}
void trg_error_dialog(GtkWindow * parent, trg_response * response)
{
gchar *msg = make_error_message(response->obj, response->status);
GtkWidget *dialog = gtk_message_dialog_new(parent,
GTK_DIALOG_MODAL,
GTK_MESSAGE_ERROR,
GTK_BUTTONS_OK, "%s",
msg);
gtk_window_set_title(GTK_WINDOW(dialog), _("Error"));
gtk_dialog_run(GTK_DIALOG(dialog));
gtk_widget_destroy(dialog);
g_free(msg);
}
gchar *make_error_message(JsonObject * response, int status)
{
if (status == FAIL_JSON_DECODE) {
return g_strdup(_("JSON decoding error."));
} else if (response && status == FAIL_RESPONSE_UNSUCCESSFUL) {
const gchar *resultStr =
json_object_get_string_member(response, "result");
if (resultStr == NULL)
return g_strdup(_("Server responded, but with no result."));
else
return g_strdup(resultStr);
} else if (status <= -100) {
return g_strdup_printf(_("Request failed with HTTP code %d"),
-(status + 100));
} else {
return g_strdup(curl_easy_strerror(status));
}
}
/* Formatters and Transmission basic utility functions.. */
char *tr_strlpercent(char *buf, double x, size_t buflen)
{
int precision;
if (x < 10.0)
precision = 2;
else if (x < 100.0)
precision = 1;
else
precision = 0;
g_snprintf(buf, buflen, "%.*f%%", precision, tr_truncd(x, precision));
return buf;
}
double tr_truncd(double x, int decimal_places)
{
const int i = (int) pow(10, decimal_places);
double x2 = (int) (x * i);
return x2 / i;
}
char *tr_strratio(char *buf, size_t buflen, double ratio,
const char *infinity)
{
if ((int) ratio == TR_RATIO_NA)
g_strlcpy(buf, _("None"), buflen);
else if ((int) ratio == TR_RATIO_INF)
g_strlcpy(buf, infinity, buflen);
else if (ratio < 10.0)
g_snprintf(buf, buflen, "%.2f", tr_truncd(ratio, 2));
else if (ratio < 100.0)
g_snprintf(buf, buflen, "%.1f", tr_truncd(ratio, 1));
else
g_snprintf(buf, buflen, "%'.0f", ratio);
return buf;
}
char *tr_strlratio(char *buf, double ratio, size_t buflen)
{
return tr_strratio(buf, buflen, ratio, "\xE2\x88\x9E");
}
char *tr_strltime_short(char *buf, long seconds, size_t buflen)
{
int hours, minutes;
if (seconds < 0)
seconds = 0;
hours = seconds / 3600;
minutes = (seconds % 3600) / 60;
seconds = (seconds % 3600) % 60;
g_snprintf(buf, buflen, "%02d:%02d:%02ld", hours, minutes, seconds);
return buf;
}
char *tr_strltime_long(char *buf, long seconds, size_t buflen)
{
int days, hours, minutes;
char d[128], h[128], m[128], s[128];
if (seconds < 0)
seconds = 0;
days = seconds / 86400;
hours = (seconds % 86400) / 3600;
minutes = (seconds % 3600) / 60;
seconds = (seconds % 3600) % 60;
g_snprintf(d, sizeof(d), ngettext("%d day", "%d days", days), days);
g_snprintf(h, sizeof(h), ngettext("%d hour", "%d hours", hours),
hours);
g_snprintf(m, sizeof(m), ngettext("%d minute", "%d minutes", minutes),
minutes);
g_snprintf(s, sizeof(s),
ngettext("%ld second", "%ld seconds", seconds), seconds);
if (days) {
if (days >= 4 || !hours) {
g_strlcpy(buf, d, buflen);
} else {
g_snprintf(buf, buflen, "%s, %s", d, h);
}
} else if (hours) {
if (hours >= 4 || !minutes) {
g_strlcpy(buf, h, buflen);
} else {
g_snprintf(buf, buflen, "%s, %s", h, m);
}
} else if (minutes) {
if (minutes >= 4 || !seconds) {
g_strlcpy(buf, m, buflen);
} else {
g_snprintf(buf, buflen, "%s, %s", m, s);
}
} else {
g_strlcpy(buf, s, buflen);
}
return buf;
}
char *gtr_localtime(time_t time)
{
const struct tm tm = *localtime(&time);
char buf[256], *eoln;
g_strlcpy(buf, asctime(&tm), sizeof(buf));
if ((eoln = strchr(buf, '\n')))
*eoln = '\0';
return g_locale_to_utf8(buf, -1, NULL, NULL, NULL);
}
char *gtr_localtime2(char *buf, time_t time, size_t buflen)
{
char *tmp = gtr_localtime(time);
g_strlcpy(buf, tmp, buflen);
g_free(tmp);
return buf;
}
gchar *epoch_to_string(gint64 epoch)
{
if(epoch == 0)
return g_strdup(_("N/A"));
GDateTime *dt = g_date_time_new_from_unix_local(epoch);
gchar *timestring = g_date_time_format(dt, "%F %H:%M:%S");
g_date_time_unref(dt);
return timestring;
}
/* wrap a link in text with a hyperlink, for use in pango markup.
* with or without any links - a newly allocated string is returned. */
gchar *add_links_to_text(const gchar * original)
{
/* return if original already contains links */
if (g_regex_match_simple("<a\\s.*>", original, 0, 0)) {
return g_strdup(original);
}
gchar *newText, *url, *link;
GMatchInfo *match_info;
GRegex *regex =
g_regex_new("(https?://[a-zA-Z0-9_\\-\\./?=&]+)", 0, 0, NULL);
// extract url and build escaped link
g_regex_match(regex, original, 0, &match_info);
url = g_match_info_fetch(match_info, 1);
if(url) {
link = g_markup_printf_escaped("<a href='%s'>%s</a>", url, url);
newText = g_regex_replace(regex, original, -1, 0, link,
0, NULL);
g_free(url);
g_free(link);
} else {
newText = g_strdup(original);
}
g_regex_unref(regex);
g_match_info_unref(match_info);
return newText;
}
char *tr_strlsize(char *buf, guint64 bytes, size_t buflen)
{
if (!bytes)
g_strlcpy(buf, Q_("None"), buflen);
else
tr_formatter_size_B(buf, bytes, buflen);
return buf;
}
gboolean is_minimised_arg(const gchar * arg)
{
return !g_strcmp0(arg, "-m")
|| !g_strcmp0(arg, "--minimized") || !g_strcmp0(arg, "/m");
}
gboolean should_be_minimised(int argc, char *argv[])
{
int i;
for (i = 1; i < argc; i++)
if (is_minimised_arg(argv[i]))
return TRUE;
return FALSE;
}
GtkWidget *trg_hbox_new(gboolean homogeneous, gint spacing)
{
GtkWidget *box;
box = gtk_box_new(GTK_ORIENTATION_HORIZONTAL, spacing);
gtk_box_set_homogeneous(GTK_BOX(box), homogeneous);
return box;
}
GtkWidget *trg_vbox_new(gboolean homogeneous, gint spacing)
{
GtkWidget *box;
box = gtk_box_new(GTK_ORIENTATION_VERTICAL, spacing);
gtk_box_set_homogeneous(GTK_BOX(box), homogeneous);
return box;
}
#ifdef WIN32
gchar *trg_win32_support_path(gchar * file)
{
gchar *moddir =
g_win32_get_package_installation_directory_of_module(NULL);
gchar *path = g_build_filename(moddir, file, NULL);
g_free(moddir);
return path;
}
#endif
gboolean is_unity(void)
{
return g_strcmp0(g_getenv("XDG_CURRENT_DESKTOP"), "Unity") == 0;
}
| gpl-2.0 |
meebey/smuxi-head-mirror | lib/db4o-net/Db4objects.Db4o/native/Sharpen/Lang/IdentityHashCodeProvider.cs | 1240 | /* Copyright (C) 2005 Versant Inc. http://www.db4o.com */
using System;
using System.Reflection;
namespace Sharpen.Lang
{
public class IdentityHashCodeProvider
{
#if !CF
public static int IdentityHashCode(object obj)
{
return System.Runtime.CompilerServices.RuntimeHelpers.GetHashCode(obj);
}
#else
public static int IdentityHashCode(object obj)
{
if (obj == null) return 0;
return (int) _hashMethod.Invoke(null, new object[] { obj });
}
private static MethodInfo _hashMethod = GetIdentityHashCodeMethod();
private static MethodInfo GetIdentityHashCodeMethod()
{
Assembly assembly = typeof(object).Assembly;
try
{
Type t = assembly.GetType("System.PInvoke.EE");
return t.GetMethod(
"Object_GetHashCode",
BindingFlags.Public |
BindingFlags.NonPublic |
BindingFlags.Static);
}
catch (Exception e)
{
}
// We may be running the CF app on .NET Framework 1.1
// for profiling, let's give that a chance
try
{
Type t = assembly.GetType(
"System.Runtime.CompilerServices.RuntimeHelpers");
return t.GetMethod(
"GetHashCode",
BindingFlags.Public |
BindingFlags.Static);
}
catch
{
}
return null;
}
#endif
}
} | gpl-2.0 |
chidelmun/server | storage/innobase/btr/btr0sea.cc | 53277 | /*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2017, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
briefly in the InnoDB documentation. The contributions by Google are
incorporated with their permission, and subject to the conditions contained in
the file COPYING.Google.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
*****************************************************************************/
/********************************************************************//**
@file btr/btr0sea.cc
The index tree adaptive search
Created 2/17/1996 Heikki Tuuri
*************************************************************************/
#include "btr0sea.h"
#ifdef BTR_CUR_HASH_ADAPT
#include "buf0buf.h"
#include "page0page.h"
#include "page0cur.h"
#include "btr0cur.h"
#include "btr0pcur.h"
#include "btr0btr.h"
#include "ha0ha.h"
#include "srv0mon.h"
#include "sync0sync.h"
/** Is search system enabled.
Search system is protected by array of latches. */
char btr_search_enabled = true;
/** Number of adaptive hash index partition. */
ulong btr_ahi_parts = 8;
#ifdef UNIV_SEARCH_PERF_STAT
/** Number of successful adaptive hash index lookups */
ulint btr_search_n_succ = 0;
/** Number of failed adaptive hash index lookups */
ulint btr_search_n_hash_fail = 0;
#endif /* UNIV_SEARCH_PERF_STAT */
/** padding to prevent other memory update
hotspots from residing on the same memory
cache line as btr_search_latches */
UNIV_INTERN byte btr_sea_pad1[CACHE_LINE_SIZE];
/** The latches protecting the adaptive search system: this latches protects the
(1) positions of records on those pages where a hash index has been built.
NOTE: It does not protect values of non-ordering fields within a record from
being updated in-place! We can use fact (1) to perform unique searches to
indexes. We will allocate the latches from dynamic memory to get it to the
same DRAM page as other hotspot semaphores */
rw_lock_t** btr_search_latches;
/** padding to prevent other memory update hotspots from residing on
the same memory cache line */
UNIV_INTERN byte btr_sea_pad2[CACHE_LINE_SIZE];
/** The adaptive hash index */
btr_search_sys_t* btr_search_sys;
/** If the number of records on the page divided by this parameter
would have been successfully accessed using a hash index, the index
is then built on the page, assuming the global limit has been reached */
#define BTR_SEARCH_PAGE_BUILD_LIMIT 16
/** The global limit for consecutive potentially successful hash searches,
before hash index building is started */
#define BTR_SEARCH_BUILD_LIMIT 100
/** Determine the number of accessed key fields.
@param[in] n_fields number of complete fields
@param[in] n_bytes number of bytes in an incomplete last field
@return number of complete or incomplete fields */
inline MY_ATTRIBUTE((warn_unused_result))
ulint
btr_search_get_n_fields(
ulint n_fields,
ulint n_bytes)
{
return(n_fields + (n_bytes > 0 ? 1 : 0));
}
/** Determine the number of accessed key fields.
@param[in] cursor b-tree cursor
@return number of complete or incomplete fields */
inline MY_ATTRIBUTE((warn_unused_result))
ulint
btr_search_get_n_fields(
const btr_cur_t* cursor)
{
return(btr_search_get_n_fields(cursor->n_fields, cursor->n_bytes));
}
/********************************************************************//**
Builds a hash index on a page with the given parameters. If the page already
has a hash index with different parameters, the old hash index is removed.
If index is non-NULL, this function checks if n_fields and n_bytes are
sensible values, and does not build a hash index if not. */
static
void
btr_search_build_page_hash_index(
/*=============================*/
dict_index_t* index, /*!< in: index for which to build, or NULL if
not known */
buf_block_t* block, /*!< in: index page, s- or x-latched */
ulint n_fields,/*!< in: hash this many full fields */
ulint n_bytes,/*!< in: hash this many bytes from the next
field */
ibool left_side);/*!< in: hash for searches from left side? */
/** This function should be called before reserving any btr search mutex, if
the intended operation might add nodes to the search system hash table.
Because of the latching order, once we have reserved the btr search system
latch, we cannot allocate a free frame from the buffer pool. Checks that
there is a free buffer frame allocated for hash table heap in the btr search
system. If not, allocates a free frames for the heap. This check makes it
probable that, when have reserved the btr search system latch and we need to
allocate a new node to the hash table, it will succeed. However, the check
will not guarantee success.
@param[in] index index handler */
static
void
btr_search_check_free_space_in_heap(dict_index_t* index)
{
hash_table_t* table;
mem_heap_t* heap;
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_S));
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X));
table = btr_get_search_table(index);
heap = table->heap;
/* Note that we peek the value of heap->free_block without reserving
the latch: this is ok, because we will not guarantee that there will
be enough free space in the hash table. */
if (heap->free_block == NULL) {
buf_block_t* block = buf_block_alloc(NULL);
btr_search_x_lock(index);
if (btr_search_enabled
&& heap->free_block == NULL) {
heap->free_block = block;
} else {
buf_block_free(block);
}
btr_search_x_unlock(index);
}
}
/** Creates and initializes the adaptive search system at a database start.
@param[in] hash_size hash table size. */
void
btr_search_sys_create(ulint hash_size)
{
/* Search System is divided into n parts.
Each part controls access to distinct set of hash buckets from
hash table through its own latch. */
/* Step-1: Allocate latches (1 per part). */
btr_search_latches = reinterpret_cast<rw_lock_t**>(
ut_malloc(sizeof(rw_lock_t*) * btr_ahi_parts, mem_key_ahi));
for (ulint i = 0; i < btr_ahi_parts; ++i) {
btr_search_latches[i] = reinterpret_cast<rw_lock_t*>(
ut_malloc(sizeof(rw_lock_t), mem_key_ahi));
rw_lock_create(btr_search_latch_key,
btr_search_latches[i], SYNC_SEARCH_SYS);
}
/* Step-2: Allocate hash tablees. */
btr_search_sys = reinterpret_cast<btr_search_sys_t*>(
ut_malloc(sizeof(btr_search_sys_t), mem_key_ahi));
btr_search_sys->hash_tables = reinterpret_cast<hash_table_t**>(
ut_malloc(sizeof(hash_table_t*) * btr_ahi_parts, mem_key_ahi));
for (ulint i = 0; i < btr_ahi_parts; ++i) {
btr_search_sys->hash_tables[i] =
ib_create((hash_size / btr_ahi_parts),
LATCH_ID_HASH_TABLE_MUTEX,
0, MEM_HEAP_FOR_BTR_SEARCH);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
btr_search_sys->hash_tables[i]->adaptive = TRUE;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
}
}
/** Resize hash index hash table.
@param[in] hash_size hash index hash table size */
void
btr_search_sys_resize(ulint hash_size)
{
/* Step-1: Lock all search latches in exclusive mode. */
btr_search_x_lock_all();
if (btr_search_enabled) {
btr_search_x_unlock_all();
ib::error() << "btr_search_sys_resize failed because"
" hash index hash table is not empty.";
ut_ad(0);
return;
}
/* Step-2: Recreate hash tables with new size. */
for (ulint i = 0; i < btr_ahi_parts; ++i) {
mem_heap_free(btr_search_sys->hash_tables[i]->heap);
hash_table_free(btr_search_sys->hash_tables[i]);
btr_search_sys->hash_tables[i] =
ib_create((hash_size / btr_ahi_parts),
LATCH_ID_HASH_TABLE_MUTEX,
0, MEM_HEAP_FOR_BTR_SEARCH);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
btr_search_sys->hash_tables[i]->adaptive = TRUE;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
}
/* Step-3: Unlock all search latches from exclusive mode. */
btr_search_x_unlock_all();
}
/** Frees the adaptive search system at a database shutdown. */
void
btr_search_sys_free()
{
ut_ad(btr_search_sys != NULL && btr_search_latches != NULL);
/* Step-1: Release the hash tables. */
for (ulint i = 0; i < btr_ahi_parts; ++i) {
mem_heap_free(btr_search_sys->hash_tables[i]->heap);
hash_table_free(btr_search_sys->hash_tables[i]);
}
ut_free(btr_search_sys->hash_tables);
ut_free(btr_search_sys);
btr_search_sys = NULL;
/* Step-2: Release all allocates latches. */
for (ulint i = 0; i < btr_ahi_parts; ++i) {
rw_lock_free(btr_search_latches[i]);
ut_free(btr_search_latches[i]);
}
ut_free(btr_search_latches);
btr_search_latches = NULL;
}
/** Set index->ref_count = 0 on all indexes of a table.
@param[in,out] table table handler */
static
void
btr_search_disable_ref_count(
dict_table_t* table)
{
dict_index_t* index;
ut_ad(mutex_own(&dict_sys->mutex));
for (index = dict_table_get_first_index(table);
index != NULL;
index = dict_table_get_next_index(index)) {
ut_ad(rw_lock_own(btr_get_search_latch(index), RW_LOCK_X));
index->search_info->ref_count = 0;
}
}
/** Disable the adaptive hash search system and empty the index.
@param[in] need_mutex need to acquire dict_sys->mutex */
void
btr_search_disable(
bool need_mutex)
{
dict_table_t* table;
if (need_mutex) {
mutex_enter(&dict_sys->mutex);
}
ut_ad(mutex_own(&dict_sys->mutex));
btr_search_x_lock_all();
if (!btr_search_enabled) {
if (need_mutex) {
mutex_exit(&dict_sys->mutex);
}
btr_search_x_unlock_all();
return;
}
btr_search_enabled = false;
/* Clear the index->search_info->ref_count of every index in
the data dictionary cache. */
for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); table;
table = UT_LIST_GET_NEXT(table_LRU, table)) {
btr_search_disable_ref_count(table);
}
for (table = UT_LIST_GET_FIRST(dict_sys->table_non_LRU); table;
table = UT_LIST_GET_NEXT(table_LRU, table)) {
btr_search_disable_ref_count(table);
}
if (need_mutex) {
mutex_exit(&dict_sys->mutex);
}
/* Set all block->index = NULL. */
buf_pool_clear_hash_index();
/* Clear the adaptive hash index. */
for (ulint i = 0; i < btr_ahi_parts; ++i) {
hash_table_clear(btr_search_sys->hash_tables[i]);
mem_heap_empty(btr_search_sys->hash_tables[i]->heap);
}
btr_search_x_unlock_all();
}
/** Enable the adaptive hash search system. */
void
btr_search_enable()
{
buf_pool_mutex_enter_all();
if (srv_buf_pool_old_size != srv_buf_pool_size) {
buf_pool_mutex_exit_all();
return;
}
buf_pool_mutex_exit_all();
btr_search_x_lock_all();
btr_search_enabled = true;
btr_search_x_unlock_all();
}
/** Returns the value of ref_count. The value is protected by latch.
@param[in] info search info
@param[in] index index identifier
@return ref_count value. */
ulint
btr_search_info_get_ref_count(
btr_search_t* info,
dict_index_t* index)
{
ulint ret = 0;
if (!btr_search_enabled) {
return(ret);
}
ut_ad(info);
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_S));
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X));
btr_search_s_lock(index);
ret = info->ref_count;
btr_search_s_unlock(index);
return(ret);
}
/** Updates the search info of an index about hash successes. NOTE that info
is NOT protected by any semaphore, to save CPU time! Do not assume its fields
are consistent.
@param[in,out] info search info
@param[in] cursor cursor which was just positioned */
static
void
btr_search_info_update_hash(
btr_search_t* info,
btr_cur_t* cursor)
{
dict_index_t* index = cursor->index;
ulint n_unique;
int cmp;
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_S));
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X));
if (dict_index_is_ibuf(index)) {
/* So many deletes are performed on an insert buffer tree
that we do not consider a hash index useful on it: */
return;
}
n_unique = dict_index_get_n_unique_in_tree(index);
if (info->n_hash_potential == 0) {
goto set_new_recomm;
}
/* Test if the search would have succeeded using the recommended
hash prefix */
if (info->n_fields >= n_unique && cursor->up_match >= n_unique) {
increment_potential:
info->n_hash_potential++;
return;
}
cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
cursor->low_match, cursor->low_bytes);
if (info->left_side ? cmp <= 0 : cmp > 0) {
goto set_new_recomm;
}
cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
cursor->up_match, cursor->up_bytes);
if (info->left_side ? cmp <= 0 : cmp > 0) {
goto increment_potential;
}
set_new_recomm:
/* We have to set a new recommendation; skip the hash analysis
for a while to avoid unnecessary CPU time usage when there is no
chance for success */
info->hash_analysis = 0;
cmp = ut_pair_cmp(cursor->up_match, cursor->up_bytes,
cursor->low_match, cursor->low_bytes);
if (cmp == 0) {
info->n_hash_potential = 0;
/* For extra safety, we set some sensible values here */
info->n_fields = 1;
info->n_bytes = 0;
info->left_side = TRUE;
} else if (cmp > 0) {
info->n_hash_potential = 1;
if (cursor->up_match >= n_unique) {
info->n_fields = n_unique;
info->n_bytes = 0;
} else if (cursor->low_match < cursor->up_match) {
info->n_fields = cursor->low_match + 1;
info->n_bytes = 0;
} else {
info->n_fields = cursor->low_match;
info->n_bytes = cursor->low_bytes + 1;
}
info->left_side = TRUE;
} else {
info->n_hash_potential = 1;
if (cursor->low_match >= n_unique) {
info->n_fields = n_unique;
info->n_bytes = 0;
} else if (cursor->low_match > cursor->up_match) {
info->n_fields = cursor->up_match + 1;
info->n_bytes = 0;
} else {
info->n_fields = cursor->up_match;
info->n_bytes = cursor->up_bytes + 1;
}
info->left_side = FALSE;
}
}
/** Update the block search info on hash successes. NOTE that info and
block->n_hash_helps, n_fields, n_bytes, left_side are NOT protected by any
semaphore, to save CPU time! Do not assume the fields are consistent.
@return TRUE if building a (new) hash index on the block is recommended
@param[in,out] info search info
@param[in,out] block buffer block
@param[in] cursor cursor */
static
ibool
btr_search_update_block_hash_info(
btr_search_t* info,
buf_block_t* block,
const btr_cur_t* cursor)
{
ut_ad(!rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_S));
ut_ad(!rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_X));
ut_ad(rw_lock_own(&block->lock, RW_LOCK_S)
|| rw_lock_own(&block->lock, RW_LOCK_X));
info->last_hash_succ = FALSE;
ut_a(buf_block_state_valid(block));
ut_ad(info->magic_n == BTR_SEARCH_MAGIC_N);
if ((block->n_hash_helps > 0)
&& (info->n_hash_potential > 0)
&& (block->n_fields == info->n_fields)
&& (block->n_bytes == info->n_bytes)
&& (block->left_side == info->left_side)) {
if ((block->index)
&& (block->curr_n_fields == info->n_fields)
&& (block->curr_n_bytes == info->n_bytes)
&& (block->curr_left_side == info->left_side)) {
/* The search would presumably have succeeded using
the hash index */
info->last_hash_succ = TRUE;
}
block->n_hash_helps++;
} else {
block->n_hash_helps = 1;
block->n_fields = info->n_fields;
block->n_bytes = info->n_bytes;
block->left_side = info->left_side;
}
if ((block->n_hash_helps > page_get_n_recs(block->frame)
/ BTR_SEARCH_PAGE_BUILD_LIMIT)
&& (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) {
if ((!block->index)
|| (block->n_hash_helps
> 2 * page_get_n_recs(block->frame))
|| (block->n_fields != block->curr_n_fields)
|| (block->n_bytes != block->curr_n_bytes)
|| (block->left_side != block->curr_left_side)) {
/* Build a new hash index on the page */
return(TRUE);
}
}
return(FALSE);
}
/** Updates a hash node reference when it has been unsuccessfully used in a
search which could have succeeded with the used hash parameters. This can
happen because when building a hash index for a page, we do not check
what happens at page boundaries, and therefore there can be misleading
hash nodes. Also, collisions in the fold value can lead to misleading
references. This function lazily fixes these imperfections in the hash
index.
@param[in] info search info
@param[in] block buffer block where cursor positioned
@param[in] cursor cursor */
static
void
btr_search_update_hash_ref(
const btr_search_t* info,
buf_block_t* block,
const btr_cur_t* cursor)
{
dict_index_t* index;
ulint fold;
rec_t* rec;
ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
ut_ad(rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_X));
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_S)
|| rw_lock_own(&(block->lock), RW_LOCK_X));
ut_ad(page_align(btr_cur_get_rec(cursor))
== buf_block_get_frame(block));
assert_block_ahi_valid(block);
index = block->index;
if (!index) {
return;
}
ut_ad(block->page.id.space() == index->space);
ut_a(index == cursor->index);
ut_a(!dict_index_is_ibuf(index));
if ((info->n_hash_potential > 0)
&& (block->curr_n_fields == info->n_fields)
&& (block->curr_n_bytes == info->n_bytes)
&& (block->curr_left_side == info->left_side)) {
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs_init(offsets_);
rec = btr_cur_get_rec(cursor);
if (!page_rec_is_user_rec(rec)) {
return;
}
fold = rec_fold(rec,
rec_get_offsets(rec, index, offsets_,
ULINT_UNDEFINED, &heap),
block->curr_n_fields,
block->curr_n_bytes, index->id);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
ut_ad(rw_lock_own(btr_get_search_latch(index), RW_LOCK_X));
ha_insert_for_fold(btr_get_search_table(index), fold,
block, rec);
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
}
}
/** Updates the search info.
@param[in,out] info search info
@param[in] cursor cursor which was just positioned */
void
btr_search_info_update_slow(
btr_search_t* info,
btr_cur_t* cursor)
{
buf_block_t* block;
ibool build_index;
ut_ad(!rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_S));
ut_ad(!rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_X));
block = btr_cur_get_block(cursor);
/* NOTE that the following two function calls do NOT protect
info or block->n_fields etc. with any semaphore, to save CPU time!
We cannot assume the fields are consistent when we return from
those functions! */
btr_search_info_update_hash(info, cursor);
build_index = btr_search_update_block_hash_info(info, block, cursor);
if (build_index || (cursor->flag == BTR_CUR_HASH_FAIL)) {
btr_search_check_free_space_in_heap(cursor->index);
}
if (cursor->flag == BTR_CUR_HASH_FAIL) {
/* Update the hash node reference, if appropriate */
#ifdef UNIV_SEARCH_PERF_STAT
btr_search_n_hash_fail++;
#endif /* UNIV_SEARCH_PERF_STAT */
btr_search_x_lock(cursor->index);
btr_search_update_hash_ref(info, block, cursor);
btr_search_x_unlock(cursor->index);
}
if (build_index) {
/* Note that since we did not protect block->n_fields etc.
with any semaphore, the values can be inconsistent. We have
to check inside the function call that they make sense. */
btr_search_build_page_hash_index(cursor->index, block,
block->n_fields,
block->n_bytes,
block->left_side);
}
}
/** Checks if a guessed position for a tree cursor is right. Note that if
mode is PAGE_CUR_LE, which is used in inserts, and the function returns
TRUE, then cursor->up_match and cursor->low_match both have sensible values.
@param[in,out] cursor guess cursor position
@param[in] can_only_compare_to_cursor_rec
if we do not have a latch on the page of cursor,
but a latch corresponding search system, then
ONLY the columns of the record UNDER the cursor
are protected, not the next or previous record
in the chain: we cannot look at the next or
previous record to check our guess!
@param[in] tuple data tuple
@param[in] mode PAGE_CUR_L, PAGE_CUR_LE, PAGE_CUR_G, PAGE_CUR_GE
@param[in] mtr mini transaction
@return TRUE if success */
static
ibool
btr_search_check_guess(
btr_cur_t* cursor,
ibool can_only_compare_to_cursor_rec,
const dtuple_t* tuple,
ulint mode,
mtr_t* mtr)
{
rec_t* rec;
ulint n_unique;
ulint match;
int cmp;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
ibool success = FALSE;
rec_offs_init(offsets_);
n_unique = dict_index_get_n_unique_in_tree(cursor->index);
rec = btr_cur_get_rec(cursor);
ut_ad(page_rec_is_user_rec(rec));
match = 0;
offsets = rec_get_offsets(rec, cursor->index, offsets,
n_unique, &heap);
cmp = cmp_dtuple_rec_with_match(tuple, rec, offsets, &match);
if (mode == PAGE_CUR_GE) {
if (cmp > 0) {
goto exit_func;
}
cursor->up_match = match;
if (match >= n_unique) {
success = TRUE;
goto exit_func;
}
} else if (mode == PAGE_CUR_LE) {
if (cmp < 0) {
goto exit_func;
}
cursor->low_match = match;
} else if (mode == PAGE_CUR_G) {
if (cmp >= 0) {
goto exit_func;
}
} else if (mode == PAGE_CUR_L) {
if (cmp <= 0) {
goto exit_func;
}
}
if (can_only_compare_to_cursor_rec) {
/* Since we could not determine if our guess is right just by
looking at the record under the cursor, return FALSE */
goto exit_func;
}
match = 0;
if ((mode == PAGE_CUR_G) || (mode == PAGE_CUR_GE)) {
rec_t* prev_rec;
ut_ad(!page_rec_is_infimum(rec));
prev_rec = page_rec_get_prev(rec);
if (page_rec_is_infimum(prev_rec)) {
success = btr_page_get_prev(page_align(prev_rec), mtr)
== FIL_NULL;
goto exit_func;
}
offsets = rec_get_offsets(prev_rec, cursor->index, offsets,
n_unique, &heap);
cmp = cmp_dtuple_rec_with_match(
tuple, prev_rec, offsets, &match);
if (mode == PAGE_CUR_GE) {
success = cmp > 0;
} else {
success = cmp >= 0;
}
goto exit_func;
} else {
rec_t* next_rec;
ut_ad(!page_rec_is_supremum(rec));
next_rec = page_rec_get_next(rec);
if (page_rec_is_supremum(next_rec)) {
if (btr_page_get_next(page_align(next_rec), mtr)
== FIL_NULL) {
cursor->up_match = 0;
success = TRUE;
}
goto exit_func;
}
offsets = rec_get_offsets(next_rec, cursor->index, offsets,
n_unique, &heap);
cmp = cmp_dtuple_rec_with_match(
tuple, next_rec, offsets, &match);
if (mode == PAGE_CUR_LE) {
success = cmp < 0;
cursor->up_match = match;
} else {
success = cmp <= 0;
}
}
exit_func:
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
return(success);
}
static
void
btr_search_failure(btr_search_t* info, btr_cur_t* cursor)
{
cursor->flag = BTR_CUR_HASH_FAIL;
#ifdef UNIV_SEARCH_PERF_STAT
++info->n_hash_fail;
if (info->n_hash_succ > 0) {
--info->n_hash_succ;
}
#endif /* UNIV_SEARCH_PERF_STAT */
info->last_hash_succ = FALSE;
}
/** Tries to guess the right search position based on the hash search info
of the index. Note that if mode is PAGE_CUR_LE, which is used in inserts,
and the function returns TRUE, then cursor->up_match and cursor->low_match
both have sensible values.
@param[in,out] index index
@param[in,out] info index search info
@param[in] tuple logical record
@param[in] mode PAGE_CUR_L, ....
@param[in] latch_mode BTR_SEARCH_LEAF, ...;
NOTE that only if has_search_latch is 0, we will
have a latch set on the cursor page, otherwise
we assume the caller uses his search latch
to protect the record!
@param[out] cursor tree cursor
@param[in] has_search_latch
latch mode the caller currently has on
search system: RW_S/X_LATCH or 0
@param[in] mtr mini transaction
@return TRUE if succeeded */
ibool
btr_search_guess_on_hash(
dict_index_t* index,
btr_search_t* info,
const dtuple_t* tuple,
ulint mode,
ulint latch_mode,
btr_cur_t* cursor,
ulint has_search_latch,
mtr_t* mtr)
{
const rec_t* rec;
ulint fold;
index_id_t index_id;
#ifdef notdefined
btr_cur_t cursor2;
btr_pcur_t pcur;
#endif
if (!btr_search_enabled) {
return(FALSE);
}
ut_ad(index && info && tuple && cursor && mtr);
ut_ad(!dict_index_is_ibuf(index));
ut_ad((latch_mode == BTR_SEARCH_LEAF)
|| (latch_mode == BTR_MODIFY_LEAF));
/* Not supported for spatial index */
ut_ad(!dict_index_is_spatial(index));
/* Note that, for efficiency, the struct info may not be protected by
any latch here! */
if (info->n_hash_potential == 0) {
return(FALSE);
}
cursor->n_fields = info->n_fields;
cursor->n_bytes = info->n_bytes;
if (dtuple_get_n_fields(tuple) < btr_search_get_n_fields(cursor)) {
return(FALSE);
}
index_id = index->id;
#ifdef UNIV_SEARCH_PERF_STAT
info->n_hash_succ++;
#endif
fold = dtuple_fold(tuple, cursor->n_fields, cursor->n_bytes, index_id);
cursor->fold = fold;
cursor->flag = BTR_CUR_HASH;
if (!has_search_latch) {
btr_search_s_lock(index);
if (!btr_search_enabled) {
btr_search_s_unlock(index);
btr_search_failure(info, cursor);
return(FALSE);
}
}
ut_ad(rw_lock_get_writer(btr_get_search_latch(index)) != RW_LOCK_X);
ut_ad(rw_lock_get_reader_count(btr_get_search_latch(index)) > 0);
rec = (rec_t*) ha_search_and_get_data(
btr_get_search_table(index), fold);
if (rec == NULL) {
if (!has_search_latch) {
btr_search_s_unlock(index);
}
btr_search_failure(info, cursor);
return(FALSE);
}
buf_block_t* block = buf_block_from_ahi(rec);
if (!has_search_latch) {
if (!buf_page_get_known_nowait(
latch_mode, block, BUF_MAKE_YOUNG,
__FILE__, __LINE__, mtr)) {
if (!has_search_latch) {
btr_search_s_unlock(index);
}
btr_search_failure(info, cursor);
return(FALSE);
}
btr_search_s_unlock(index);
buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
}
if (buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
ut_ad(buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH);
if (!has_search_latch) {
btr_leaf_page_release(block, latch_mode, mtr);
}
btr_search_failure(info, cursor);
return(FALSE);
}
ut_ad(page_rec_is_user_rec(rec));
btr_cur_position(index, (rec_t*) rec, block, cursor);
/* Check the validity of the guess within the page */
/* If we only have the latch on search system, not on the
page, it only protects the columns of the record the cursor
is positioned on. We cannot look at the next of the previous
record to determine if our guess for the cursor position is
right. */
if (index_id != btr_page_get_index_id(block->frame)
|| !btr_search_check_guess(cursor,
has_search_latch,
tuple, mode, mtr)) {
if (!has_search_latch) {
btr_leaf_page_release(block, latch_mode, mtr);
}
btr_search_failure(info, cursor);
return(FALSE);
}
if (info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5) {
info->n_hash_potential++;
}
#ifdef notdefined
/* These lines of code can be used in a debug version to check
the correctness of the searched cursor position: */
info->last_hash_succ = FALSE;
/* Currently, does not work if the following fails: */
ut_ad(!has_search_latch);
btr_leaf_page_release(block, latch_mode, mtr);
btr_cur_search_to_nth_level(
index, 0, tuple, mode, latch_mode, &cursor2, 0, mtr);
if (mode == PAGE_CUR_GE
&& page_rec_is_supremum(btr_cur_get_rec(&cursor2))) {
/* If mode is PAGE_CUR_GE, then the binary search
in the index tree may actually take us to the supremum
of the previous page */
info->last_hash_succ = FALSE;
btr_pcur_open_on_user_rec(
index, tuple, mode, latch_mode, &pcur, mtr);
ut_ad(btr_pcur_get_rec(&pcur) == btr_cur_get_rec(cursor));
} else {
ut_ad(btr_cur_get_rec(&cursor2) == btr_cur_get_rec(cursor));
}
/* NOTE that it is theoretically possible that the above assertions
fail if the page of the cursor gets removed from the buffer pool
meanwhile! Thus it might not be a bug. */
#endif
info->last_hash_succ = TRUE;
#ifdef UNIV_SEARCH_PERF_STAT
btr_search_n_succ++;
#endif
if (!has_search_latch && buf_page_peek_if_too_old(&block->page)) {
buf_page_make_young(&block->page);
}
/* Increment the page get statistics though we did not really
fix the page: for user info only */
{
buf_pool_t* buf_pool = buf_pool_from_bpage(&block->page);
++buf_pool->stat.n_page_gets;
}
return(TRUE);
}
/** Drop any adaptive hash index entries that point to an index page.
@param[in,out] block block containing index page, s- or x-latched, or an
index page for which we know that
block->buf_fix_count == 0 or it is an index page which
has already been removed from the buf_pool->page_hash
i.e.: it is in state BUF_BLOCK_REMOVE_HASH */
void
btr_search_drop_page_hash_index(buf_block_t* block)
{
ulint n_fields;
ulint n_bytes;
const page_t* page;
const rec_t* rec;
ulint fold;
ulint prev_fold;
ulint n_cached;
ulint n_recs;
ulint* folds;
ulint i;
mem_heap_t* heap;
const dict_index_t* index;
ulint* offsets;
rw_lock_t* latch;
btr_search_t* info;
retry:
/* Do a dirty check on block->index, return if the block is
not in the adaptive hash index. */
index = block->index;
/* This debug check uses a dirty read that could theoretically cause
false positives while buf_pool_clear_hash_index() is executing. */
assert_block_ahi_valid(block);
if (index == NULL) {
return;
}
ut_ad(block->page.buf_fix_count == 0
|| buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH
|| rw_lock_own(&block->lock, RW_LOCK_S)
|| rw_lock_own(&block->lock, RW_LOCK_X));
/* We must not dereference index here, because it could be freed
if (index->table->n_ref_count == 0 && !mutex_own(&dict_sys->mutex)).
Determine the ahi_slot based on the block contents. */
const index_id_t index_id
= btr_page_get_index_id(block->frame);
const ulint ahi_slot
= ut_fold_ulint_pair(static_cast<ulint>(index_id),
static_cast<ulint>(block->page.id.space()))
% btr_ahi_parts;
latch = btr_search_latches[ahi_slot];
ut_ad(!btr_search_own_any(RW_LOCK_S));
ut_ad(!btr_search_own_any(RW_LOCK_X));
rw_lock_s_lock(latch);
assert_block_ahi_valid(block);
if (block->index == NULL) {
rw_lock_s_unlock(latch);
return;
}
/* The index associated with a block must remain the
same, because we are holding block->lock or the block is
not accessible by other threads (BUF_BLOCK_REMOVE_HASH),
or the index is not accessible to other threads
(buf_fix_count == 0 when DROP TABLE or similar is executing
buf_LRU_drop_page_hash_for_tablespace()). */
ut_a(index == block->index);
#ifdef MYSQL_INDEX_DISABLE_AHI
ut_ad(!index->disable_ahi);
#endif
ut_ad(btr_search_enabled);
ut_ad(block->page.id.space() == index->space);
ut_a(index_id == index->id);
ut_a(!dict_index_is_ibuf(index));
#ifdef UNIV_DEBUG
switch (dict_index_get_online_status(index)) {
case ONLINE_INDEX_CREATION:
/* The index is being created (bulk loaded). */
case ONLINE_INDEX_COMPLETE:
/* The index has been published. */
case ONLINE_INDEX_ABORTED:
/* Either the index creation was aborted due to an
error observed by InnoDB (in which case there should
not be any adaptive hash index entries), or it was
completed and then flagged aborted in
rollback_inplace_alter_table(). */
break;
case ONLINE_INDEX_ABORTED_DROPPED:
/* The index should have been dropped from the tablespace
already, and the adaptive hash index entries should have
been dropped as well. */
ut_error;
}
#endif /* UNIV_DEBUG */
n_fields = block->curr_n_fields;
n_bytes = block->curr_n_bytes;
/* NOTE: The AHI fields of block must not be accessed after
releasing search latch, as the index page might only be s-latched! */
rw_lock_s_unlock(latch);
ut_a(n_fields > 0 || n_bytes > 0);
page = block->frame;
n_recs = page_get_n_recs(page);
/* Calculate and cache fold values into an array for fast deletion
from the hash index */
folds = (ulint*) ut_malloc_nokey(n_recs * sizeof(ulint));
n_cached = 0;
rec = page_get_infimum_rec(page);
rec = page_rec_get_next_low(rec, page_is_comp(page));
prev_fold = 0;
heap = NULL;
offsets = NULL;
while (!page_rec_is_supremum(rec)) {
offsets = rec_get_offsets(
rec, index, offsets,
btr_search_get_n_fields(n_fields, n_bytes),
&heap);
fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
if (fold == prev_fold && prev_fold != 0) {
goto next_rec;
}
/* Remove all hash nodes pointing to this page from the
hash chain */
folds[n_cached] = fold;
n_cached++;
next_rec:
rec = page_rec_get_next_low(rec, page_rec_is_comp(rec));
prev_fold = fold;
}
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
rw_lock_x_lock(latch);
if (UNIV_UNLIKELY(!block->index)) {
/* Someone else has meanwhile dropped the hash index */
goto cleanup;
}
ut_a(block->index == index);
if (block->curr_n_fields != n_fields
|| block->curr_n_bytes != n_bytes) {
/* Someone else has meanwhile built a new hash index on the
page, with different parameters */
rw_lock_x_unlock(latch);
ut_free(folds);
goto retry;
}
for (i = 0; i < n_cached; i++) {
ha_remove_all_nodes_to_page(
btr_search_sys->hash_tables[ahi_slot],
folds[i], page);
}
info = btr_search_get_info(block->index);
ut_a(info->ref_count > 0);
info->ref_count--;
block->index = NULL;
MONITOR_INC(MONITOR_ADAPTIVE_HASH_PAGE_REMOVED);
MONITOR_INC_VALUE(MONITOR_ADAPTIVE_HASH_ROW_REMOVED, n_cached);
cleanup:
assert_block_ahi_valid(block);
rw_lock_x_unlock(latch);
ut_free(folds);
}
/** Drop any adaptive hash index entries that may point to an index
page that may be in the buffer pool, when a page is evicted from the
buffer pool or freed in a file segment.
@param[in] page_id page id
@param[in] page_size page size */
void
btr_search_drop_page_hash_when_freed(
const page_id_t& page_id,
const page_size_t& page_size)
{
buf_block_t* block;
mtr_t mtr;
dberr_t err = DB_SUCCESS;
ut_d(export_vars.innodb_ahi_drop_lookups++);
mtr_start(&mtr);
/* If the caller has a latch on the page, then the caller must
have a x-latch on the page and it must have already dropped
the hash index for the page. Because of the x-latch that we
are possibly holding, we cannot s-latch the page, but must
(recursively) x-latch it, even though we are only reading. */
block = buf_page_get_gen(page_id, page_size, RW_X_LATCH, NULL,
BUF_PEEK_IF_IN_POOL, __FILE__, __LINE__,
&mtr, &err);
if (block) {
/* If AHI is still valid, page can't be in free state.
AHI is dropped when page is freed. */
ut_ad(!block->page.file_page_was_freed);
buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
dict_index_t* index = block->index;
if (index != NULL) {
/* In all our callers, the table handle should
be open, or we should be in the process of
dropping the table (preventing eviction). */
ut_ad(index->table->n_ref_count > 0
|| mutex_own(&dict_sys->mutex));
btr_search_drop_page_hash_index(block);
}
}
mtr_commit(&mtr);
}
/** Build a hash index on a page with the given parameters. If the page already
has a hash index with different parameters, the old hash index is removed.
If index is non-NULL, this function checks if n_fields and n_bytes are
sensible, and does not build a hash index if not.
@param[in,out] index index for which to build.
@param[in,out] block index page, s-/x- latched.
@param[in] n_fields hash this many full fields
@param[in] n_bytes hash this many bytes of the next field
@param[in] left_side hash for searches from left side */
static
void
btr_search_build_page_hash_index(
dict_index_t* index,
buf_block_t* block,
ulint n_fields,
ulint n_bytes,
ibool left_side)
{
hash_table_t* table;
page_t* page;
rec_t* rec;
rec_t* next_rec;
ulint fold;
ulint next_fold;
ulint n_cached;
ulint n_recs;
ulint* folds;
rec_t** recs;
ulint i;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
#ifdef MYSQL_INDEX_DISABLE_AHI
if (index->disable_ahi) return;
#endif
if (!btr_search_enabled) {
return;
}
rec_offs_init(offsets_);
ut_ad(index);
ut_ad(block->page.id.space() == index->space);
ut_a(!dict_index_is_ibuf(index));
ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X));
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_S)
|| rw_lock_own(&(block->lock), RW_LOCK_X));
btr_search_s_lock(index);
table = btr_get_search_table(index);
page = buf_block_get_frame(block);
if (block->index && ((block->curr_n_fields != n_fields)
|| (block->curr_n_bytes != n_bytes)
|| (block->curr_left_side != left_side))) {
btr_search_s_unlock(index);
btr_search_drop_page_hash_index(block);
} else {
btr_search_s_unlock(index);
}
/* Check that the values for hash index build are sensible */
if (n_fields == 0 && n_bytes == 0) {
return;
}
if (dict_index_get_n_unique_in_tree(index)
< btr_search_get_n_fields(n_fields, n_bytes)) {
return;
}
n_recs = page_get_n_recs(page);
if (n_recs == 0) {
return;
}
/* Calculate and cache fold values and corresponding records into
an array for fast insertion to the hash index */
folds = (ulint*) ut_malloc_nokey(n_recs * sizeof(ulint));
recs = (rec_t**) ut_malloc_nokey(n_recs * sizeof(rec_t*));
n_cached = 0;
ut_a(index->id == btr_page_get_index_id(page));
rec = page_rec_get_next(page_get_infimum_rec(page));
offsets = rec_get_offsets(
rec, index, offsets,
btr_search_get_n_fields(n_fields, n_bytes),
&heap);
ut_ad(page_rec_is_supremum(rec)
|| n_fields + (n_bytes > 0) == rec_offs_n_fields(offsets));
fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id);
if (left_side) {
folds[n_cached] = fold;
recs[n_cached] = rec;
n_cached++;
}
for (;;) {
next_rec = page_rec_get_next(rec);
if (page_rec_is_supremum(next_rec)) {
if (!left_side) {
folds[n_cached] = fold;
recs[n_cached] = rec;
n_cached++;
}
break;
}
offsets = rec_get_offsets(
next_rec, index, offsets,
btr_search_get_n_fields(n_fields, n_bytes), &heap);
next_fold = rec_fold(next_rec, offsets, n_fields,
n_bytes, index->id);
if (fold != next_fold) {
/* Insert an entry into the hash index */
if (left_side) {
folds[n_cached] = next_fold;
recs[n_cached] = next_rec;
n_cached++;
} else {
folds[n_cached] = fold;
recs[n_cached] = rec;
n_cached++;
}
}
rec = next_rec;
fold = next_fold;
}
btr_search_check_free_space_in_heap(index);
btr_search_x_lock(index);
if (!btr_search_enabled) {
goto exit_func;
}
if (block->index && ((block->curr_n_fields != n_fields)
|| (block->curr_n_bytes != n_bytes)
|| (block->curr_left_side != left_side))) {
goto exit_func;
}
/* This counter is decremented every time we drop page
hash index entries and is incremented here. Since we can
rebuild hash index for a page that is already hashed, we
have to take care not to increment the counter in that
case. */
if (!block->index) {
assert_block_ahi_empty(block);
index->search_info->ref_count++;
}
block->n_hash_helps = 0;
block->curr_n_fields = unsigned(n_fields);
block->curr_n_bytes = unsigned(n_bytes);
block->curr_left_side = unsigned(left_side);
block->index = index;
for (i = 0; i < n_cached; i++) {
ha_insert_for_fold(table, folds[i], block, recs[i]);
}
MONITOR_INC(MONITOR_ADAPTIVE_HASH_PAGE_ADDED);
MONITOR_INC_VALUE(MONITOR_ADAPTIVE_HASH_ROW_ADDED, n_cached);
exit_func:
assert_block_ahi_valid(block);
btr_search_x_unlock(index);
ut_free(folds);
ut_free(recs);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
}
/** Moves or deletes hash entries for moved records. If new_page is already
hashed, then the hash index for page, if any, is dropped. If new_page is not
hashed, and page is hashed, then a new hash index is built to new_page with the
same parameters as page (this often happens when a page is split).
@param[in,out] new_block records are copied to this page.
@param[in,out] block index page from which record are copied, and the
copied records will be deleted from this page.
@param[in,out] index record descriptor */
void
btr_search_move_or_delete_hash_entries(
buf_block_t* new_block,
buf_block_t* block,
dict_index_t* index)
{
#ifdef MYSQL_INDEX_DISABLE_AHI
if (index->disable_ahi) return;
#endif
if (!btr_search_enabled) {
return;
}
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_X));
ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_X));
btr_search_s_lock(index);
ut_a(!new_block->index || new_block->index == index);
ut_a(!block->index || block->index == index);
ut_a(!(new_block->index || block->index)
|| !dict_index_is_ibuf(index));
assert_block_ahi_valid(block);
assert_block_ahi_valid(new_block);
if (new_block->index) {
btr_search_s_unlock(index);
btr_search_drop_page_hash_index(block);
return;
}
if (block->index) {
ulint n_fields = block->curr_n_fields;
ulint n_bytes = block->curr_n_bytes;
ibool left_side = block->curr_left_side;
new_block->n_fields = block->curr_n_fields;
new_block->n_bytes = block->curr_n_bytes;
new_block->left_side = left_side;
btr_search_s_unlock(index);
ut_a(n_fields > 0 || n_bytes > 0);
btr_search_build_page_hash_index(
index, new_block, n_fields, n_bytes, left_side);
ut_ad(n_fields == block->curr_n_fields);
ut_ad(n_bytes == block->curr_n_bytes);
ut_ad(left_side == block->curr_left_side);
return;
}
btr_search_s_unlock(index);
}
/** Updates the page hash index when a single record is deleted from a page.
@param[in] cursor cursor which was positioned on the record to delete
using btr_cur_search_, the record is not yet deleted.*/
void
btr_search_update_hash_on_delete(btr_cur_t* cursor)
{
hash_table_t* table;
buf_block_t* block;
const rec_t* rec;
ulint fold;
dict_index_t* index;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
mem_heap_t* heap = NULL;
rec_offs_init(offsets_);
#ifdef MYSQL_INDEX_DISABLE_AHI
if (cursor->index->disable_ahi) return;
#endif
if (!btr_search_enabled) {
return;
}
block = btr_cur_get_block(cursor);
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_X));
assert_block_ahi_valid(block);
index = block->index;
if (!index) {
return;
}
ut_ad(block->page.id.space() == index->space);
ut_a(index == cursor->index);
ut_a(block->curr_n_fields > 0 || block->curr_n_bytes > 0);
ut_a(!dict_index_is_ibuf(index));
table = btr_get_search_table(index);
rec = btr_cur_get_rec(cursor);
fold = rec_fold(rec, rec_get_offsets(rec, index, offsets_,
ULINT_UNDEFINED, &heap),
block->curr_n_fields, block->curr_n_bytes, index->id);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
btr_search_x_lock(index);
assert_block_ahi_valid(block);
if (block->index) {
ut_a(block->index == index);
if (ha_search_and_delete_if_found(table, fold, rec)) {
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_REMOVED);
} else {
MONITOR_INC(
MONITOR_ADAPTIVE_HASH_ROW_REMOVE_NOT_FOUND);
}
assert_block_ahi_valid(block);
}
btr_search_x_unlock(index);
}
/** Updates the page hash index when a single record is inserted on a page.
@param[in] cursor cursor which was positioned to the place to insert
using btr_cur_search_, and the new record has been
inserted next to the cursor. */
void
btr_search_update_hash_node_on_insert(btr_cur_t* cursor)
{
hash_table_t* table;
buf_block_t* block;
dict_index_t* index;
rec_t* rec;
#ifdef MYSQL_INDEX_DISABLE_AHI
if (cursor->index->disable_ahi) return;
#endif
if (!btr_search_enabled) {
return;
}
rec = btr_cur_get_rec(cursor);
block = btr_cur_get_block(cursor);
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_X));
index = block->index;
if (!index) {
return;
}
ut_a(cursor->index == index);
ut_a(!dict_index_is_ibuf(index));
btr_search_x_lock(index);
if (!block->index) {
goto func_exit;
}
ut_a(block->index == index);
if ((cursor->flag == BTR_CUR_HASH)
&& (cursor->n_fields == block->curr_n_fields)
&& (cursor->n_bytes == block->curr_n_bytes)
&& !block->curr_left_side) {
table = btr_get_search_table(index);
if (ha_search_and_update_if_found(
table, cursor->fold, rec, block,
page_rec_get_next(rec))) {
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_UPDATED);
}
func_exit:
assert_block_ahi_valid(block);
btr_search_x_unlock(index);
} else {
btr_search_x_unlock(index);
btr_search_update_hash_on_insert(cursor);
}
}
/** Updates the page hash index when a single record is inserted on a page.
@param[in,out] cursor cursor which was positioned to the
place to insert using btr_cur_search_...,
and the new record has been inserted next
to the cursor */
void
btr_search_update_hash_on_insert(btr_cur_t* cursor)
{
hash_table_t* table;
buf_block_t* block;
dict_index_t* index;
const rec_t* rec;
const rec_t* ins_rec;
const rec_t* next_rec;
ulint fold;
ulint ins_fold;
ulint next_fold = 0; /* remove warning (??? bug ???) */
ulint n_fields;
ulint n_bytes;
ibool left_side;
ibool locked = FALSE;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
rec_offs_init(offsets_);
#ifdef MYSQL_INDEX_DISABLE_AHI
if (cursor->index->disable_ahi) return;
#endif
if (!btr_search_enabled) {
return;
}
block = btr_cur_get_block(cursor);
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_X));
assert_block_ahi_valid(block);
index = block->index;
if (!index) {
return;
}
ut_ad(block->page.id.space() == index->space);
btr_search_check_free_space_in_heap(index);
table = btr_get_search_table(index);
rec = btr_cur_get_rec(cursor);
#ifdef MYSQL_INDEX_DISABLE_AHI
ut_a(!index->disable_ahi);
#endif
ut_a(index == cursor->index);
ut_a(!dict_index_is_ibuf(index));
n_fields = block->curr_n_fields;
n_bytes = block->curr_n_bytes;
left_side = block->curr_left_side;
ins_rec = page_rec_get_next_const(rec);
next_rec = page_rec_get_next_const(ins_rec);
offsets = rec_get_offsets(ins_rec, index, offsets,
ULINT_UNDEFINED, &heap);
ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index->id);
if (!page_rec_is_supremum(next_rec)) {
offsets = rec_get_offsets(
next_rec, index, offsets,
btr_search_get_n_fields(n_fields, n_bytes), &heap);
next_fold = rec_fold(next_rec, offsets, n_fields,
n_bytes, index->id);
}
if (!page_rec_is_infimum(rec)) {
offsets = rec_get_offsets(
rec, index, offsets,
btr_search_get_n_fields(n_fields, n_bytes), &heap);
fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id);
} else {
if (left_side) {
btr_search_x_lock(index);
locked = TRUE;
if (!btr_search_enabled) {
goto function_exit;
}
ha_insert_for_fold(table, ins_fold, block, ins_rec);
}
goto check_next_rec;
}
if (fold != ins_fold) {
if (!locked) {
btr_search_x_lock(index);
locked = TRUE;
if (!btr_search_enabled) {
goto function_exit;
}
}
if (!left_side) {
ha_insert_for_fold(table, fold, block, rec);
} else {
ha_insert_for_fold(table, ins_fold, block, ins_rec);
}
}
check_next_rec:
if (page_rec_is_supremum(next_rec)) {
if (!left_side) {
if (!locked) {
btr_search_x_lock(index);
locked = TRUE;
if (!btr_search_enabled) {
goto function_exit;
}
}
ha_insert_for_fold(table, ins_fold, block, ins_rec);
}
goto function_exit;
}
if (ins_fold != next_fold) {
if (!locked) {
btr_search_x_lock(index);
locked = TRUE;
if (!btr_search_enabled) {
goto function_exit;
}
}
if (!left_side) {
ha_insert_for_fold(table, ins_fold, block, ins_rec);
} else {
ha_insert_for_fold(table, next_fold, block, next_rec);
}
}
function_exit:
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
if (locked) {
btr_search_x_unlock(index);
}
}
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Validates the search system for given hash table.
@param[in] hash_table_id hash table to validate
@return TRUE if ok */
static
ibool
btr_search_hash_table_validate(ulint hash_table_id)
{
ha_node_t* node;
ibool ok = TRUE;
ulint i;
ulint cell_count;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
if (!btr_search_enabled) {
return(TRUE);
}
/* How many cells to check before temporarily releasing
search latches. */
ulint chunk_size = 10000;
rec_offs_init(offsets_);
btr_search_x_lock_all();
buf_pool_mutex_enter_all();
cell_count = hash_get_n_cells(
btr_search_sys->hash_tables[hash_table_id]);
for (i = 0; i < cell_count; i++) {
/* We release search latches every once in a while to
give other queries a chance to run. */
if ((i != 0) && ((i % chunk_size) == 0)) {
buf_pool_mutex_exit_all();
btr_search_x_unlock_all();
os_thread_yield();
btr_search_x_lock_all();
buf_pool_mutex_enter_all();
ulint curr_cell_count = hash_get_n_cells(
btr_search_sys->hash_tables[hash_table_id]);
if (cell_count != curr_cell_count) {
cell_count = curr_cell_count;
if (i >= cell_count) {
break;
}
}
}
node = (ha_node_t*) hash_get_nth_cell(
btr_search_sys->hash_tables[hash_table_id], i)->node;
for (; node != NULL; node = node->next) {
const buf_block_t* block
= buf_block_from_ahi((byte*) node->data);
const buf_block_t* hash_block;
buf_pool_t* buf_pool;
index_id_t page_index_id;
buf_pool = buf_pool_from_bpage((buf_page_t*) block);
if (UNIV_LIKELY(buf_block_get_state(block)
== BUF_BLOCK_FILE_PAGE)) {
/* The space and offset are only valid
for file blocks. It is possible that
the block is being freed
(BUF_BLOCK_REMOVE_HASH, see the
assertion and the comment below) */
hash_block = buf_block_hash_get(
buf_pool,
block->page.id);
} else {
hash_block = NULL;
}
if (hash_block) {
ut_a(hash_block == block);
} else {
/* When a block is being freed,
buf_LRU_search_and_free_block() first
removes the block from
buf_pool->page_hash by calling
buf_LRU_block_remove_hashed_page().
After that, it invokes
btr_search_drop_page_hash_index() to
remove the block from
btr_search_sys->hash_tables[i]. */
ut_a(buf_block_get_state(block)
== BUF_BLOCK_REMOVE_HASH);
}
ut_a(!dict_index_is_ibuf(block->index));
ut_ad(block->page.id.space() == block->index->space);
page_index_id = btr_page_get_index_id(block->frame);
offsets = rec_get_offsets(
node->data, block->index, offsets,
btr_search_get_n_fields(block->curr_n_fields,
block->curr_n_bytes),
&heap);
const ulint fold = rec_fold(
node->data, offsets,
block->curr_n_fields,
block->curr_n_bytes,
page_index_id);
if (node->fold != fold) {
const page_t* page = block->frame;
ok = FALSE;
ib::error() << "Error in an adaptive hash"
<< " index pointer to page "
<< page_id_t(page_get_space_id(page),
page_get_page_no(page))
<< ", ptr mem address "
<< reinterpret_cast<const void*>(
node->data)
<< ", index id " << page_index_id
<< ", node fold " << node->fold
<< ", rec fold " << fold;
fputs("InnoDB: Record ", stderr);
rec_print_new(stderr, node->data, offsets);
fprintf(stderr, "\nInnoDB: on that page."
" Page mem address %p, is hashed %p,"
" n fields %lu\n"
"InnoDB: side %lu\n",
(void*) page, (void*) block->index,
(ulong) block->curr_n_fields,
(ulong) block->curr_left_side);
ut_ad(0);
}
}
}
for (i = 0; i < cell_count; i += chunk_size) {
/* We release search latches every once in a while to
give other queries a chance to run. */
if (i != 0) {
buf_pool_mutex_exit_all();
btr_search_x_unlock_all();
os_thread_yield();
btr_search_x_lock_all();
buf_pool_mutex_enter_all();
ulint curr_cell_count = hash_get_n_cells(
btr_search_sys->hash_tables[hash_table_id]);
if (cell_count != curr_cell_count) {
cell_count = curr_cell_count;
if (i >= cell_count) {
break;
}
}
}
ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1);
if (!ha_validate(btr_search_sys->hash_tables[hash_table_id],
i, end_index)) {
ok = FALSE;
}
}
buf_pool_mutex_exit_all();
btr_search_x_unlock_all();
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
return(ok);
}
/** Validate the search system.
@return true if ok. */
bool
btr_search_validate()
{
for (ulint i = 0; i < btr_ahi_parts; ++i) {
if (!btr_search_hash_table_validate(i)) {
return(false);
}
}
return(true);
}
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */
| gpl-2.0 |
TheProjecter/sharedmind | freemind/common/NamedObject.java | 1843 | /*FreeMind - A Program for creating and viewing Mindmaps
*Copyright (C) 2000-2006 Joerg Mueller, Daniel Polansky, Christian Foltin, Dimitri Polivaev and others.
*
*See COPYING for Details
*
*This program is free software; you can redistribute it and/or
*modify it under the terms of the GNU General Public License
*as published by the Free Software Foundation; either version 2
*of the License, or (at your option) any later version.
*
*This program is distributed in the hope that it will be useful,
*but WITHOUT ANY WARRANTY; without even the implied warranty of
*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*GNU General Public License for more details.
*
*You should have received a copy of the GNU General Public License
*along with this program; if not, write to the Free Software
*Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
* Created on 08.05.2005
*
*/
package freemind.common;
/**
* Utility Class for displaying local object names in GUI components.
*
* @author Dimitri Polivaev
* 18.01.2007
*/
public class NamedObject{
private String name;
private Object object;
private NamedObject(){
}
public NamedObject(Object object, String name) {
this.object = object;
this.name = name;
}
static public NamedObject literal(String literal){
NamedObject result = new NamedObject();
result.object = literal;
result.name = literal;
return result;
}
public boolean equals(Object o){
if (o instanceof NamedObject){
NamedObject ts = (NamedObject)o;
return object.equals(ts.object);
}
return object.equals(o);
}
public String toString(){
return name;
}
public Object getObject(){
return object;
}
}
| gpl-2.0 |
rohankataria/linuxScheduler | include/config/usb/serial/keyspan/usa28xa.h | 42 | #undef CONFIG_USB_SERIAL_KEYSPAN_USA28XA
| gpl-2.0 |
PaymentHighway/woocommerce-gateway-paymenthighway | includes/vendor/respect/validation/tests/unit/Rules/NoneOfTest.php | 1502 | <?php
/*
* This file is part of Respect/Validation.
*
* (c) Alexandre Gomes Gaigalas <[email protected]>
*
* For the full copyright and license information, please view the "LICENSE.md"
* file that was distributed with this source code.
*/
namespace Respect\Validation\Rules;
use Respect\Validation\TestCase;
/**
* @group rule
* @covers Respect\Validation\Rules\NoneOf
* @covers Respect\Validation\Exceptions\NoneOfException
*/
class NoneOfTest extends TestCase
{
public function testValid()
{
$valid1 = new Callback(function () {
return false;
});
$valid2 = new Callback(function () {
return false;
});
$valid3 = new Callback(function () {
return false;
});
$o = new NoneOf($valid1, $valid2, $valid3);
$this->assertTrue($o->validate('any'));
$this->assertTrue($o->assert('any'));
$this->assertTrue($o->check('any'));
}
/**
* @expectedException Respect\Validation\Exceptions\NoneOfException
*/
public function testInvalid()
{
$valid1 = new Callback(function () {
return false;
});
$valid2 = new Callback(function () {
return false;
});
$valid3 = new Callback(function () {
return true;
});
$o = new NoneOf($valid1, $valid2, $valid3);
$this->assertFalse($o->validate('any'));
$this->assertFalse($o->assert('any'));
}
}
| gpl-2.0 |
rloos289/PDXwing | vendor/pantheon-systems/terminus/tests/unit_tests/Models/DomainTest.php | 2462 | <?php
namespace Pantheon\Terminus\UnitTests\Models;
use Pantheon\Terminus\Collections\Workflows;
use Pantheon\Terminus\Models\Environment;
use Pantheon\Terminus\Models\Domain;
use Pantheon\Terminus\Models\Workflow;
/**
* Class DomainTest
* Testing class for Pantheon\Terminus\Models\Domain
* @package Pantheon\Terminus\UnitTests\Models
*/
class DomainTest extends ModelTestCase
{
public function setUp()
{
parent::setUp();
$this->domain = $this->_createDomain(['id' => 'dev.example.com']);
}
protected function _createDomain($attr)
{
$this->workflow = $this->getMockBuilder(Workflow::class)
->disableOriginalConstructor()
->getMock();
$this->workflows = $this->getMockBuilder(Workflows::class)
->disableOriginalConstructor()
->getMock();
$this->environment = $this->getMockBuilder(Environment::class)
->disableOriginalConstructor()
->getMock();
$this->environment->method('getWorkflows')->willReturn($this->workflows);
$this->environment->site = (object)['id' => 'abc'];
$this->environment->id = 'dev';
$domain = new Domain((object)$attr, ['collection' => (object)['environment' => $this->environment]]);
$domain->setRequest($this->request);
return $domain;
}
public function testDelete()
{
$this->request->expects($this->once())
->method('request')
->with('sites/abc/environments/dev/hostnames/dev.example.com', ['method' => 'delete']);
$this->domain->delete();
}
public function testSerialize()
{
$data = [
'dns_zone_name' => 'pantheonsite.io',
'environment' => 'live',
'site_id' => '1111-1111-1111-1111-1111',
'type' => 'platform',
'id' => 'live-mysite.pantheonsite.io',
'key' => 'live-mysite.pantheonsite.io',
'deletable' => false,
];
$domain = $this->_createDomain($data);
$expected = [
'domain' => 'live-mysite.pantheonsite.io',
'dns_zone_name' => 'pantheonsite.io',
'environment' => 'live',
'site_id' => '1111-1111-1111-1111-1111',
'key' => 'live-mysite.pantheonsite.io',
'deletable' => false,
];
$actual = $domain->serialize();
$this->assertEquals($expected, $actual);
}
}
| gpl-2.0 |
bsmitty83/kernel_omap | include/linux/power/bq27541_battery.h | 566 | /*
* Charger driver for TI BQ27541
*
* Copyright (C) Quanta Computer Inc. All rights reserved.
* Eric Nien <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __BQ27541_BATTERY_PLATFORM_DATA_H_
#define __BQ27541_BATTERY_PLATFORM_DATA_H_
struct bq27541_battery_platform_data {
void (*led_callback)(u8 green_value, u8 orange_value);
};
#endif /* __BQ27541_BATTERY_PLATFORM_DATA_H_ */
| gpl-2.0 |
saintkepha/hackenv-emacs | packages/cedet-1.0pre6/semantic/tests/testsubclass.cpp | 3820 | /* Special test file for Semantic Analyzer and complex C++ inheritance.
*/
//#include <iostream>
#include "testsubclass.hh"
void animal::moose::setFeet(int numfeet) //^1^
{
if (numfeet > 4) {
std::cerr << "Why would a moose have more than 4 feet?" << std::endl;
return;
}
fFeet = numfeet;
}
int animal::moose::getFeet() //^2^
{
return fFeet;
}
void animal::moose::doNothing() //^3^
{
animal::moose foo();
fFeet = 3;
}
void deer::moose::setAntlers(bool have_antlers) //^4^
{
fAntlers = have_antlers;
}
bool deer::moose::getAntlers() //^5^
// %1% ( ( "testsubclass.cpp" "testsubclass.hh" ) ( "deer::moose::getAntlers" "deer::moose::doSomething" "moose" ) )
{
return fAntlers;
}
bool i_dont_have_symrefs()
// %2% ( ("testsubclass.cpp" ) ("i_dont_have_symrefs"))
{
}
void deer::moose::doSomething() //^6^
{
// All these functions should be identified by semantic analyzer.
getAntlers();
setAntlers(true);
getFeet();
setFeet(true);
doNothing();
fSomeField = true;
fIsValid = true;
}
void deer::alces::setLatin(bool l) {
fLatin = l;
}
bool deer::alces::getLatin() {
return fLatin;
}
void deer::alces::doLatinStuff(moose moosein) {
// All these functions should be identified by semantic analyzer.
getFeet();
setFeet(true);
getLatin();
setLatin(true);
doNothing();
deer::moose foo();
}
moose deer::alces::createMoose()
{
moose MooseVariableName;
bool tmp;
int itmp;
bool fool;
int fast;
MooseVariableName = createMoose();
doLatinStuff(MooseVariableName);
tmp = this.f// -1-
// #1# ( "fAlcesBool" "fIsValid" "fLatin" )
;
itmp = this.f// -2-
// #2# ( "fAlcesInt" "fGreek" "fIsProtectedInt" )
;
tmp = f// -3-
// #3# ( "fAlcesBool" "fIsValid" "fLatin" "fool" )
;
itmp = f// -4-
// #4# ( "fAlcesInt" "fGreek" "fIsProtectedInt" "fast" )
;
MooseVariableName = m// -5-
// #5# ( "moose" )
return MooseVariableName;
}
/** Test Scope Changes
*
* This function is rigged to make sure the scope changes to account
* for different locations in local variable parsing.
*/
int someFunction(int mPickle)
{
moose mMoose = deer::alces::createMoose();
if (mPickle == 1) {
int mOption1 = 2;
m// -5-
// #5# ( "mMoose" "mOption1" "mPickle" )
;
} else {
int mOption2 = 2;
m// -6-
// #6# ( "mMoose" "mOption2" "mPickle" )
;
}
}
// Thanks Ming-Wei Chang for this next example.
namespace pub_priv {
class A{
private:
void private_a(){}
public:
void public_a();
};
void A::public_a() {
A other_a;
other_a.p// -7-
// #7# ( "private_a" "public_a" )
;
}
int some_regular_function(){
A a;
a.p// -8-
// #8# ( "public_a" )
;
return 0;
}
}
/** Test Scope w/in a function (non-method) with classes using
* different levels of inheritance.
*/
int otherFunction()
{
sneaky::antelope Antelope(1);
sneaky::jackalope Jackalope(1);
sneaky::bugalope Bugalope(1);
Antelope.// -9-
// #9# ( "fAntyPublic" "fQuadPublic" "testAccess")
;
Jackalope.// -10-
// #10# ( "fBunnyPublic" "testAccess")
;
Bugalope.// -11-
// #11# ( "fBugPublic" "testAccess")
;
}
/** Test methods within each class for types of access to the baseclass.
*/
bool sneaky::antelope::testAccess() //^7^
{
this.// -12-
// #12# ( "fAntyPrivate" "fAntyProtected" "fAntyPublic" "fQuadProtected" "fQuadPublic" "testAccess" )
;
}
bool sneaky::jackalope::testAccess() //^8^
{
this.// -13-
// #13# ( "fBunnyPrivate" "fBunnyProtected" "fBunnyPublic" "fQuadProtected" "fQuadPublic" "testAccess" )
;
}
bool sneaky::bugalope::testAccess() //^9^
{
this.// -14-
// #14# ( "fBugPrivate" "fBugProtected" "fBugPublic" "fQuadPublic" "testAccess" )
;
}
| gpl-2.0 |
tossp/lede-k3 | package/lean/mt/drivers/mt7615d/src/mt_wifi/embedded/ap/ap_wpa.c | 7337 | /*
***************************************************************************
* Ralink Tech Inc.
* 4F, No. 2 Technology 5th Rd.
* Science-based Industrial Park
* Hsin-chu, Taiwan, R.O.C.
*
* (c) Copyright 2002, Ralink Technology, Inc.
*
* All rights reserved. Ralink's source code is an unpublished work and the
* use of a copyright notice does not imply otherwise. This source code
* contains confidential trade secret material of Ralink Tech. Any attemp
* or participation in deciphering, decoding, reverse engineering or in any
* way altering the source code is stricitly prohibited, unless the prior
* written consent of Ralink Technology, Inc. is obtained.
***************************************************************************
Module Name:
wpa.c
Abstract:
Revision History:
Who When What
-------- ---------- ----------------------------------------------
Jan Lee 03-07-22 Initial
Rory Chen 04-11-29 Add WPA2PSK
*/
#include "rt_config.h"
extern UCHAR EAPOL[];
/*
==========================================================================
Description:
Port Access Control Inquiry function. Return entry's Privacy and Wpastate.
Return:
pEntry
==========================================================================
*/
MAC_TABLE_ENTRY *PACInquiry(RTMP_ADAPTER *pAd, UCHAR Wcid)
{
MAC_TABLE_ENTRY *pEntry = NULL;
if (VALID_UCAST_ENTRY_WCID(pAd, Wcid))
pEntry = &(pAd->MacTab.Content[Wcid]);
return pEntry;
}
/*
==========================================================================
Description:
Function to handle countermeasures active attack. Init 60-sec timer if necessary.
Return:
==========================================================================
*/
VOID HandleCounterMeasure(RTMP_ADAPTER *pAd, MAC_TABLE_ENTRY *pEntry)
{
#ifndef RT_CFG80211_SUPPORT
INT i;
#endif
BOOLEAN Cancelled;
if (!pEntry)
return;
/* Todo by AlbertY - Not support currently in ApClient-link */
if (IS_ENTRY_APCLI(pEntry) || IS_ENTRY_REPEATER(pEntry))
return;
#ifndef RT_CFG80211_SUPPORT
/* if entry not set key done, ignore this RX MIC ERROR */
if ((pEntry->SecConfig.Handshake.WpaState < AS_PTKINITDONE) || (pEntry->SecConfig.Handshake.GTKState != REKEY_ESTABLISHED))
return;
#endif
MTWF_LOG(DBG_CAT_AP, DBG_SUBCAT_ALL, DBG_LVL_TRACE, ("HandleCounterMeasure ===>\n"));
/* record which entry causes this MIC error, if this entry sends disauth/disassoc, AP doesn't need to log the CM */
pEntry->CMTimerRunning = TRUE;
pAd->ApCfg.MICFailureCounter++;
/* send wireless event - for MIC error */
RTMPSendWirelessEvent(pAd, IW_MIC_ERROR_EVENT_FLAG, pEntry->Addr, 0, 0);
#ifdef RT_CFG80211_SUPPORT
{
const UCHAR tsc[6] = {0, 0, 0, 0, 0, 0};
PNET_DEV pNetDev = pAd->ApCfg.MBSSID[pEntry->func_tb_idx].wdev.if_dev;
/* NL80211_KEYTYPE_PAIRWISE = 1, tsc = tsc of frame causing mic failure */
MTWF_LOG(DBG_CAT_AP, DBG_SUBCAT_ALL, DBG_LVL_ERROR,
("%s:calling cfg event to HandleCounterMeasure\n", __FUNCTION__));
cfg80211_michael_mic_failure(pNetDev, pEntry->Addr, 1, 0, tsc, GFP_KERNEL);
}
#endif
if (pAd->ApCfg.CMTimerRunning == TRUE) {
MTWF_LOG(DBG_CAT_AP, DBG_SUBCAT_ALL, DBG_LVL_ERROR, ("Receive CM Attack Twice within 60 seconds ====>>>\n"));
/* send wireless event - for counter measures */
RTMPSendWirelessEvent(pAd, IW_COUNTER_MEASURES_EVENT_FLAG, pEntry->Addr, 0, 0);
ApLogEvent(pAd, pEntry->Addr, EVENT_COUNTER_M);
#ifndef RT_CFG80211_SUPPORT
/* renew GTK */
GenRandom(pAd, pAd->ApCfg.MBSSID[pEntry->func_tb_idx].wdev.bssid, pAd->ApCfg.MBSSID[pEntry->func_tb_idx].GNonce);
#endif
/* Cancel CounterMeasure Timer */
RTMPCancelTimer(&pAd->ApCfg.CounterMeasureTimer, &Cancelled);
pAd->ApCfg.CMTimerRunning = FALSE;
#ifndef RT_CFG80211_SUPPORT
for (i = 0; VALID_UCAST_ENTRY_WCID(pAd, i); i++) {
struct wifi_dev *wdev = pAd->MacTab.Content[i].wdev;
/* happened twice within 60 sec, AP SENDS disaccociate all associated STAs. All STA's transition to State 2 */
if ((IS_ENTRY_CLIENT(&pAd->MacTab.Content[i])) && wdev != NULL
&& wdev->channel == pEntry->wdev->channel)
MlmeDeAuthAction(pAd, &pAd->MacTab.Content[i], REASON_MIC_FAILURE, FALSE);
}
#endif
/*
Further, ban all Class 3 DATA transportation for a period 0f 60 sec
disallow new association , too
*/
pAd->ApCfg.BANClass3Data = TRUE;
/* check how many entry left... should be zero */
/*pAd->ApCfg.MBSSID[pEntry->func_tb_idx].GKeyDoneStations = pAd->MacTab.Size; */
/*MTWF_LOG(DBG_CAT_AP, DBG_SUBCAT_ALL, DBG_LVL_TRACE, ("GKeyDoneStations=%d\n", pAd->ApCfg.MBSSID[pEntry->func_tb_idx].GKeyDoneStations)); */
}
RTMPSetTimer(&pAd->ApCfg.CounterMeasureTimer, 60 * MLME_TASK_EXEC_INTV * MLME_TASK_EXEC_MULTIPLE);
pAd->ApCfg.CMTimerRunning = TRUE;
pAd->ApCfg.PrevaMICFailTime = pAd->ApCfg.aMICFailTime;
RTMP_GetCurrentSystemTime(&pAd->ApCfg.aMICFailTime);
}
/*
==========================================================================
Description:
countermeasures active attack timer execution
Return:
==========================================================================
*/
VOID CMTimerExec(
IN PVOID SystemSpecific1,
IN PVOID FunctionContext,
IN PVOID SystemSpecific2,
IN PVOID SystemSpecific3)
{
UINT i, j = 0;
PRTMP_ADAPTER pAd = (PRTMP_ADAPTER)FunctionContext;
pAd->ApCfg.BANClass3Data = FALSE;
for (i = 0; VALID_UCAST_ENTRY_WCID(pAd, i); i++) {
if (IS_ENTRY_CLIENT(&pAd->MacTab.Content[i])
&& (pAd->MacTab.Content[i].CMTimerRunning == TRUE)) {
pAd->MacTab.Content[i].CMTimerRunning = FALSE;
j++;
}
}
if (j > 1)
MTWF_LOG(DBG_CAT_AP, DBG_SUBCAT_ALL, DBG_LVL_ERROR, ("Find more than one entry which generated MIC Fail ..\n"));
pAd->ApCfg.CMTimerRunning = FALSE;
MTWF_LOG(DBG_CAT_AP, DBG_SUBCAT_ALL, DBG_LVL_ERROR,
("Counter measure timer expired, resume connection access.\n"));
}
#ifdef HOSTAPD_SUPPORT
/*for sending an event to notify hostapd about michael failure. */
VOID ieee80211_notify_michael_failure(
IN PRTMP_ADAPTER pAd,
IN PHEADER_802_11 pHeader,
IN UINT keyix,
IN INT report)
{
static const char *tag = "MLME-MICHAELMICFAILURE.indication";
/* struct net_device *dev = pAd->net_dev; */
/* union iwreq_data wrqu; */
char buf[128]; /* XXX */
/* TODO: needed parameters: count, keyid, key type, src address, TSC */
if (report) { /*station reports a mic error to this ap. */
snprintf(buf, sizeof(buf), "%s(keyid=%d %scast addr=%s)", tag,
keyix, "uni",
ether_sprintf(pHeader->Addr2));
} else { /*ap itself receives a mic error. */
snprintf(buf, sizeof(buf), "%s(keyid=%d %scast addr=%s)", tag,
keyix, IEEE80211_IS_MULTICAST(pHeader->Addr1) ? "broad" : "uni",
ether_sprintf(pHeader->Addr2));
}
RtmpOSWrielessEventSend(pAd->net_dev, RT_WLAN_EVENT_CUSTOM, -1, NULL, NULL, 0);
/* NdisZeroMemory(&wrqu, sizeof(wrqu)); */
/* wrqu.data.length = strlen(buf); */
/* wireless_send_event(dev, RT_WLAN_EVENT_CUSTOM, &wrqu, buf); */
}
const CHAR *ether_sprintf(const UINT8 *mac)
{
static char etherbuf[18];
snprintf(etherbuf, sizeof(etherbuf), "%02x:%02x:%02x:%02x:%02x:%02x", PRINT_MAC(mac));
return etherbuf;
}
#endif /* HOSTAPD_SUPPORT */
#ifdef APCLI_SUPPORT
#endif/*APCLI_SUPPORT*/
| gpl-2.0 |
wschweer/mscoreserver | libmscore/pitchspelling.h | 3308 | //=============================================================================
// MuseScore
// Music Composition & Notation
// $Id: pitchspelling.h 5163 2011-12-30 09:57:08Z wschweer $
//
// Copyright (C) 2002-2011 Werner Schweer
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2
// as published by the Free Software Foundation and appearing in
// the file LICENCE.GPL
//=============================================================================
#ifndef __PITCHSPELLING_H__
#define __PITCHSPELLING_H__
#include "mscore.h"
class MidiNote;
class Note;
class Event;
// a list of tpc's, with legal ranges
enum {
INVALID_TPC = -2,
TPC_F_BB, TPC_C_BB, TPC_G_BB, TPC_D_BB, TPC_A_BB, TPC_E_BB, TPC_B_BB,
TPC_F_B, TPC_C_B, TPC_G_B, TPC_D_B, TPC_A_B, TPC_E_B, TPC_B_B,
TPC_F, TPC_C, TPC_G, TPC_D, TPC_A, TPC_E, TPC_B,
TPC_F_S, TPC_C_S, TPC_G_S, TPC_D_S, TPC_A_S, TPC_E_S, TPC_B_S,
TPC_F_SS, TPC_C_SS, TPC_G_SS, TPC_D_SS, TPC_A_SS, TPC_E_SS, TPC_B_SS,
TPC_MIN = TPC_F_BB,
TPC_MAX = TPC_B_SS
};
const int TPC_DELTA_SEMITONE = 7; // the delta in tpc value to go 1 semitone up or down
const int TPC_DELTA_ENHARMONIC = 12; // the delta in tpc value to reach the next (or prev) enharmonic spelling
const int TPC_FIRST_STEP = 3; // the step of the first valid tpc (= F = step 3)
const int PITCH_DELTA_OCTAVE = 12; // the delta in pitch value to go 1 octave up or down
const int STEP_DELTA_OCTAVE = 7; // the number of steps in an octave
const int STEP_DELTA_TPC = 4; // the number of steps in a tpc step (= a fifth = 4 steps)
//---------------------------------------------------------
// pitch2tpc
// Returns a default tpc for a given midi pitch.
// Midi pitch 60 is middle C.
//---------------------------------------------------------
inline static int pitch2tpc(int pitch)
{
return (((((pitch % 12) * 7) % 12) + 5) % 12) + 9;
}
int pitch2tpc2(int pitch, bool preferSharp);
extern int pitch2tpc(int pitch, int key);
extern void spell(QList<Event>& notes, int);
extern void spell(QList<Note*>& notes);
extern int computeWindow(const QList<Note*>& notes, int start, int end);
extern int tpc(int idx, int pitch, int opt);
extern QString tpc2name(int tpc, bool germanNames);
extern void tpc2name(int tpc, bool germanNames, QChar* name, int* acc);
extern int step2tpc(const QString& stepName, AccidentalVal alter);
extern int step2tpc(int step);
extern int step2tpc(int step, AccidentalVal alter);
extern int step2tpcByKey(int step, int key);
extern int tpc2pitch(int tpc);
extern int tpc2step(int tpc);
extern int tpc2stepByKey(int tpc, int key, int* pAlter);
extern int tpc2alterByKey(int tpc, int key);
extern int pitch2absStepByKey(int pitch, int tpc, int key, int* pAlter);
extern int absStep2pitchByKey(int step, int key);
//---------------------------------------------------------
// tpc2alter
//---------------------------------------------------------
inline static AccidentalVal tpc2alter(int tpc) {
return AccidentalVal(((tpc+1) / 7) - 2);
}
extern QString tpc2stepName(int tpc);
extern bool tpcIsValid(int val);
#endif
| gpl-2.0 |
noorelden/QuickBooking | QuickBooking/migrations/0011_auto_20150704_2001.py | 426 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('QuickBooking', '0010_auto_20150704_1942'),
]
operations = [
migrations.AlterField(
model_name='seat',
name='seat_type',
field=models.CharField(max_length=10, primary_key=True),
),
]
| gpl-2.0 |
raj-bhatia/grooveip-ios-public | submodules/mswebrtc/webrtc/talk/app/webrtc/peerconnectionfactory.cc | 11762 | /*
* libjingle
* Copyright 2004--2011 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "talk/app/webrtc/peerconnectionfactory.h"
#include "talk/app/webrtc/audiotrack.h"
#include "talk/app/webrtc/localaudiosource.h"
#include "talk/app/webrtc/mediastream.h"
#include "talk/app/webrtc/mediastreamproxy.h"
#include "talk/app/webrtc/mediastreamtrackproxy.h"
#include "talk/app/webrtc/peerconnection.h"
#include "talk/app/webrtc/peerconnectionfactoryproxy.h"
#include "talk/app/webrtc/peerconnectionproxy.h"
#include "talk/app/webrtc/portallocatorfactory.h"
#include "talk/app/webrtc/videosource.h"
#include "talk/app/webrtc/videosourceproxy.h"
#include "talk/app/webrtc/videotrack.h"
#include "talk/media/webrtc/webrtcmediaengine.h"
#include "talk/media/webrtc/webrtcvideodecoderfactory.h"
#include "talk/media/webrtc/webrtcvideoencoderfactory.h"
#include "webrtc/base/bind.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
namespace webrtc {
namespace {
// Passes down the calls to |store_|. See usage in CreatePeerConnection.
class DtlsIdentityStoreWrapper : public DtlsIdentityStoreInterface {
public:
DtlsIdentityStoreWrapper(
const rtc::scoped_refptr<RefCountedDtlsIdentityStore>& store)
: store_(store) {
RTC_DCHECK(store_);
}
void RequestIdentity(
rtc::KeyType key_type,
const rtc::scoped_refptr<webrtc::DtlsIdentityRequestObserver>&
observer) override {
store_->RequestIdentity(key_type, observer);
}
private:
rtc::scoped_refptr<RefCountedDtlsIdentityStore> store_;
};
} // anonymous namespace
rtc::scoped_refptr<PeerConnectionFactoryInterface>
CreatePeerConnectionFactory() {
rtc::scoped_refptr<PeerConnectionFactory> pc_factory(
new rtc::RefCountedObject<PeerConnectionFactory>());
// Call Initialize synchronously but make sure its executed on
// |signaling_thread|.
MethodCall0<PeerConnectionFactory, bool> call(
pc_factory.get(),
&PeerConnectionFactory::Initialize);
bool result = call.Marshal(pc_factory->signaling_thread());
if (!result) {
return NULL;
}
return PeerConnectionFactoryProxy::Create(pc_factory->signaling_thread(),
pc_factory);
}
rtc::scoped_refptr<PeerConnectionFactoryInterface>
CreatePeerConnectionFactory(
rtc::Thread* worker_thread,
rtc::Thread* signaling_thread,
AudioDeviceModule* default_adm,
cricket::WebRtcVideoEncoderFactory* encoder_factory,
cricket::WebRtcVideoDecoderFactory* decoder_factory) {
rtc::scoped_refptr<PeerConnectionFactory> pc_factory(
new rtc::RefCountedObject<PeerConnectionFactory>(worker_thread,
signaling_thread,
default_adm,
encoder_factory,
decoder_factory));
// Call Initialize synchronously but make sure its executed on
// |signaling_thread|.
MethodCall0<PeerConnectionFactory, bool> call(
pc_factory.get(),
&PeerConnectionFactory::Initialize);
bool result = call.Marshal(signaling_thread);
if (!result) {
return NULL;
}
return PeerConnectionFactoryProxy::Create(signaling_thread, pc_factory);
}
PeerConnectionFactory::PeerConnectionFactory()
: owns_ptrs_(true),
wraps_current_thread_(false),
signaling_thread_(rtc::ThreadManager::Instance()->CurrentThread()),
worker_thread_(new rtc::Thread) {
if (!signaling_thread_) {
signaling_thread_ = rtc::ThreadManager::Instance()->WrapCurrentThread();
wraps_current_thread_ = true;
}
worker_thread_->Start();
}
PeerConnectionFactory::PeerConnectionFactory(
rtc::Thread* worker_thread,
rtc::Thread* signaling_thread,
AudioDeviceModule* default_adm,
cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
cricket::WebRtcVideoDecoderFactory* video_decoder_factory)
: owns_ptrs_(false),
wraps_current_thread_(false),
signaling_thread_(signaling_thread),
worker_thread_(worker_thread),
default_adm_(default_adm),
video_encoder_factory_(video_encoder_factory),
video_decoder_factory_(video_decoder_factory) {
ASSERT(worker_thread != NULL);
ASSERT(signaling_thread != NULL);
// TODO: Currently there is no way creating an external adm in
// libjingle source tree. So we can 't currently assert if this is NULL.
// ASSERT(default_adm != NULL);
}
PeerConnectionFactory::~PeerConnectionFactory() {
RTC_DCHECK(signaling_thread_->IsCurrent());
channel_manager_.reset(nullptr);
default_allocator_factory_ = nullptr;
// Make sure |worker_thread_| and |signaling_thread_| outlive
// |dtls_identity_store_|.
dtls_identity_store_ = nullptr;
if (owns_ptrs_) {
if (wraps_current_thread_)
rtc::ThreadManager::Instance()->UnwrapCurrentThread();
delete worker_thread_;
}
}
bool PeerConnectionFactory::Initialize() {
RTC_DCHECK(signaling_thread_->IsCurrent());
rtc::InitRandom(rtc::Time());
default_allocator_factory_ = PortAllocatorFactory::Create(worker_thread_);
if (!default_allocator_factory_)
return false;
// TODO: Need to make sure only one VoE is created inside
// WebRtcMediaEngine.
cricket::MediaEngineInterface* media_engine =
worker_thread_->Invoke<cricket::MediaEngineInterface*>(rtc::Bind(
&PeerConnectionFactory::CreateMediaEngine_w, this));
channel_manager_.reset(
new cricket::ChannelManager(media_engine, worker_thread_));
channel_manager_->SetVideoRtxEnabled(true);
if (!channel_manager_->Init()) {
return false;
}
dtls_identity_store_ = new RefCountedDtlsIdentityStore(
signaling_thread_, worker_thread_);
return true;
}
rtc::scoped_refptr<AudioSourceInterface>
PeerConnectionFactory::CreateAudioSource(
const MediaConstraintsInterface* constraints) {
RTC_DCHECK(signaling_thread_->IsCurrent());
rtc::scoped_refptr<LocalAudioSource> source(
LocalAudioSource::Create(options_, constraints));
return source;
}
rtc::scoped_refptr<VideoSourceInterface>
PeerConnectionFactory::CreateVideoSource(
cricket::VideoCapturer* capturer,
const MediaConstraintsInterface* constraints) {
RTC_DCHECK(signaling_thread_->IsCurrent());
rtc::scoped_refptr<VideoSource> source(
VideoSource::Create(channel_manager_.get(), capturer, constraints));
return VideoSourceProxy::Create(signaling_thread_, source);
}
bool PeerConnectionFactory::StartAecDump(rtc::PlatformFile file) {
RTC_DCHECK(signaling_thread_->IsCurrent());
return channel_manager_->StartAecDump(file);
}
void PeerConnectionFactory::StopAecDump() {
RTC_DCHECK(signaling_thread_->IsCurrent());
channel_manager_->StopAecDump();
}
bool PeerConnectionFactory::StartRtcEventLog(rtc::PlatformFile file) {
RTC_DCHECK(signaling_thread_->IsCurrent());
return channel_manager_->StartRtcEventLog(file);
}
void PeerConnectionFactory::StopRtcEventLog() {
RTC_DCHECK(signaling_thread_->IsCurrent());
channel_manager_->StopRtcEventLog();
}
rtc::scoped_refptr<PeerConnectionInterface>
PeerConnectionFactory::CreatePeerConnection(
const PeerConnectionInterface::RTCConfiguration& configuration,
const MediaConstraintsInterface* constraints,
PortAllocatorFactoryInterface* allocator_factory,
rtc::scoped_ptr<DtlsIdentityStoreInterface> dtls_identity_store,
PeerConnectionObserver* observer) {
RTC_DCHECK(signaling_thread_->IsCurrent());
RTC_DCHECK(allocator_factory || default_allocator_factory_);
if (!dtls_identity_store.get()) {
// Because |pc|->Initialize takes ownership of the store we need a new
// wrapper object that can be deleted without deleting the underlying
// |dtls_identity_store_|, protecting it from being deleted multiple times.
dtls_identity_store.reset(
new DtlsIdentityStoreWrapper(dtls_identity_store_));
}
PortAllocatorFactoryInterface* chosen_allocator_factory =
allocator_factory ? allocator_factory : default_allocator_factory_.get();
chosen_allocator_factory->SetNetworkIgnoreMask(options_.network_ignore_mask);
rtc::scoped_refptr<PeerConnection> pc(
new rtc::RefCountedObject<PeerConnection>(this));
if (!pc->Initialize(
configuration,
constraints,
chosen_allocator_factory,
dtls_identity_store.Pass(),
observer)) {
return NULL;
}
return PeerConnectionProxy::Create(signaling_thread(), pc);
}
rtc::scoped_refptr<MediaStreamInterface>
PeerConnectionFactory::CreateLocalMediaStream(const std::string& label) {
RTC_DCHECK(signaling_thread_->IsCurrent());
return MediaStreamProxy::Create(signaling_thread_,
MediaStream::Create(label));
}
rtc::scoped_refptr<VideoTrackInterface>
PeerConnectionFactory::CreateVideoTrack(
const std::string& id,
VideoSourceInterface* source) {
RTC_DCHECK(signaling_thread_->IsCurrent());
rtc::scoped_refptr<VideoTrackInterface> track(
VideoTrack::Create(id, source));
return VideoTrackProxy::Create(signaling_thread_, track);
}
rtc::scoped_refptr<AudioTrackInterface>
PeerConnectionFactory::CreateAudioTrack(const std::string& id,
AudioSourceInterface* source) {
RTC_DCHECK(signaling_thread_->IsCurrent());
rtc::scoped_refptr<AudioTrackInterface> track(
AudioTrack::Create(id, source));
return AudioTrackProxy::Create(signaling_thread_, track);
}
webrtc::MediaControllerInterface* PeerConnectionFactory::CreateMediaController()
const {
RTC_DCHECK(signaling_thread_->IsCurrent());
return MediaControllerInterface::Create(worker_thread_,
channel_manager_.get());
}
rtc::Thread* PeerConnectionFactory::signaling_thread() {
// This method can be called on a different thread when the factory is
// created in CreatePeerConnectionFactory().
return signaling_thread_;
}
rtc::Thread* PeerConnectionFactory::worker_thread() {
RTC_DCHECK(signaling_thread_->IsCurrent());
return worker_thread_;
}
cricket::MediaEngineInterface* PeerConnectionFactory::CreateMediaEngine_w() {
ASSERT(worker_thread_ == rtc::Thread::Current());
return cricket::WebRtcMediaEngineFactory::Create(
default_adm_.get(), video_encoder_factory_.get(),
video_decoder_factory_.get());
}
} // namespace webrtc
| gpl-2.0 |
znjRoLS/RUISHarryPotter | HarryPoter/RUISunity/Assets/RUIS/Scripts/Input/Calibration/RUISOculusFollow.cs | 1178 | using UnityEngine;
using System.Collections;
public class RUISOculusFollow : MonoBehaviour
{
RUISCoordinateSystem coordinateSystem;
void Start()
{
coordinateSystem = MonoBehaviour.FindObjectOfType(typeof(RUISCoordinateSystem)) as RUISCoordinateSystem;
}
void Update ()
{
if(RUISOVRManager.ovrHmd != null)
{
Vector3 tempSample = Vector3.zero;
Ovr.Posef headpose = RUISOVRManager.ovrHmd.GetTrackingState().HeadPose.ThePose;
float px = headpose.Position.x;
float py = headpose.Position.y;
float pz = -headpose.Position.z; // This needs to be negated TODO: might change with future OVR version
tempSample = new Vector3(px, py, pz);
tempSample = coordinateSystem.ConvertRawOculusDK2Location(tempSample);
Vector3 convertedLocation = coordinateSystem.ConvertLocation(tempSample, RUISDevice.Oculus_DK2);
this.transform.localPosition = convertedLocation;
if(OVRManager.capiHmd != null)
{
try
{
this.transform.localRotation = OVRManager.capiHmd.GetTrackingState().HeadPose.ThePose.Orientation.ToQuaternion();
}
catch(System.Exception e)
{
Debug.LogError(e.Message);
}
}
}
}
}
| gpl-2.0 |
knil-sama/YADDW | lib/apache-jena-2.12.1/javadoc-arq/com/hp/hpl/jena/sparql/path/class-use/P_ZeroOrMore1.html | 7516 | <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_45) on Thu Oct 02 16:39:54 BST 2014 -->
<meta http-equiv="Content-Type" content="text/html" charset="UTF-8">
<title>Uses of Class com.hp.hpl.jena.sparql.path.P_ZeroOrMore1 (Apache Jena ARQ)</title>
<meta name="date" content="2014-10-02">
<link rel="stylesheet" type="text/css" href="../../../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class com.hp.hpl.jena.sparql.path.P_ZeroOrMore1 (Apache Jena ARQ)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../../com/hp/hpl/jena/sparql/path/P_ZeroOrMore1.html" title="class in com.hp.hpl.jena.sparql.path">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../index.html?com/hp/hpl/jena/sparql/path/class-use/P_ZeroOrMore1.html" target="_top">Frames</a></li>
<li><a href="P_ZeroOrMore1.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class com.hp.hpl.jena.sparql.path.P_ZeroOrMore1" class="title">Uses of Class<br>com.hp.hpl.jena.sparql.path.P_ZeroOrMore1</h2>
</div>
<div class="classUseContainer">
<ul class="blockList">
<li class="blockList">
<table border="0" cellpadding="3" cellspacing="0" summary="Use table, listing packages, and an explanation">
<caption><span>Packages that use <a href="../../../../../../../com/hp/hpl/jena/sparql/path/P_ZeroOrMore1.html" title="class in com.hp.hpl.jena.sparql.path">P_ZeroOrMore1</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Package</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><a href="#com.hp.hpl.jena.sparql.path">com.hp.hpl.jena.sparql.path</a></td>
<td class="colLast"> </td>
</tr>
</tbody>
</table>
</li>
<li class="blockList">
<ul class="blockList">
<li class="blockList"><a name="com.hp.hpl.jena.sparql.path">
<!-- -->
</a>
<h3>Uses of <a href="../../../../../../../com/hp/hpl/jena/sparql/path/P_ZeroOrMore1.html" title="class in com.hp.hpl.jena.sparql.path">P_ZeroOrMore1</a> in <a href="../../../../../../../com/hp/hpl/jena/sparql/path/package-summary.html">com.hp.hpl.jena.sparql.path</a></h3>
<table border="0" cellpadding="3" cellspacing="0" summary="Use table, listing methods, and an explanation">
<caption><span>Methods in <a href="../../../../../../../com/hp/hpl/jena/sparql/path/package-summary.html">com.hp.hpl.jena.sparql.path</a> with parameters of type <a href="../../../../../../../com/hp/hpl/jena/sparql/path/P_ZeroOrMore1.html" title="class in com.hp.hpl.jena.sparql.path">P_ZeroOrMore1</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><span class="strong">PathVisitorByType.</span><code><strong><a href="../../../../../../../com/hp/hpl/jena/sparql/path/PathVisitorByType.html#visit(com.hp.hpl.jena.sparql.path.P_ZeroOrMore1)">visit</a></strong>(<a href="../../../../../../../com/hp/hpl/jena/sparql/path/P_ZeroOrMore1.html" title="class in com.hp.hpl.jena.sparql.path">P_ZeroOrMore1</a> path)</code> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><span class="strong">PathVisitorBase.</span><code><strong><a href="../../../../../../../com/hp/hpl/jena/sparql/path/PathVisitorBase.html#visit(com.hp.hpl.jena.sparql.path.P_ZeroOrMore1)">visit</a></strong>(<a href="../../../../../../../com/hp/hpl/jena/sparql/path/P_ZeroOrMore1.html" title="class in com.hp.hpl.jena.sparql.path">P_ZeroOrMore1</a> path)</code> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><span class="strong">PathVisitor.</span><code><strong><a href="../../../../../../../com/hp/hpl/jena/sparql/path/PathVisitor.html#visit(com.hp.hpl.jena.sparql.path.P_ZeroOrMore1)">visit</a></strong>(<a href="../../../../../../../com/hp/hpl/jena/sparql/path/P_ZeroOrMore1.html" title="class in com.hp.hpl.jena.sparql.path">P_ZeroOrMore1</a> path)</code> </td>
</tr>
</tbody>
</table>
</li>
</ul>
</li>
</ul>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../../com/hp/hpl/jena/sparql/path/P_ZeroOrMore1.html" title="class in com.hp.hpl.jena.sparql.path">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../index.html?com/hp/hpl/jena/sparql/path/class-use/P_ZeroOrMore1.html" target="_top">Frames</a></li>
<li><a href="P_ZeroOrMore1.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Licenced under the Apache License, Version 2.0</small></p>
</body>
</html>
| gpl-2.0 |
mehhhh/TheLammerGame | game.js | 3254 | var lvl1 = (function () {
var xPartition = 320;
var preload = function () {
// tilemap
this.xPartition = xPartition;
game.load.tilemap('map', 'assets/map.json', null, Phaser.Tilemap.TILED_JSON);
game.load.image('floor', 'assets/floor.png');
game.load.image('tileset', 'assets/tileset.png');
game.load.image('wall', 'assets/wall.png');
// furniture
game.load.spritesheet('door', 'assets/door.png', 48, 80);
game.load.spritesheet('phone', 'assets/phone.png', 32, 48);
game.load.spritesheet('atm', 'assets/atm.png', 48, 80);
// ppj
game.load.spritesheet('cracker', 'assets/cracker.png', 48, 96);
game.load.spritesheet('sysadmin', 'assets/sysadmin.png', 48, 96);
game.load.spritesheet('secre0', 'assets/ingenuous.png', 48, 96);
game.load.spritesheet('secre1', 'assets/ingenuous2.png', 48, 96);
game.load.image('pear', 'assets/pear.png');
// removing blury images
game.stage.smoothed = false;
};
var create = function () {
// Background color.
game.stage.backgroundColor = '#eee';
// Physics.
game.physics.startSystem(Phaser.Physics.ARCADE);
// Sprites creation
this.tilemap = map(this, xPartition);
this.cracker = cracker(this);
this.cursor = cursor();
// this is a horrible patch: do not remove it, unless
// you wanna fix cracker's overlapDoor conflict
this.cracker.cursor = this.cursor;
// creating doors
this.doors = this.tilemap.parseDoors();
// creating phones
this.phones = parsePhones(this, this.tilemap, this.tilemap.phone);
// creating sysadmins
this.sysadmins = parseSysadmins(this, this.tilemap, this.tilemap.sysadmin,
this.phones);
// creating atms
this.atms = parseAtms(this, this.tilemap, this.tilemap.atm);
// scoreboard
this.scoreboard = scoreboard(this.phones);
// creating secres
this.secres = parseSecres(this, this.tilemap, this.tilemap.secre, this.phones, this.scoreboard);
this.spawner = new spawner(this);
// bringing to top things (below this line)
this.cracker.bringToTop();
this.sysadmins.forEach( function (sysadmin) {
sysadmin.bringToTop();
});
this.secres.forEach( function (secre) {
secre.bringToTop();
});
};
var update = function () {
this.spawner.update();
// sysadmin fixes the atm's
game.physics.arcade.overlap(this.atms, this.sysadmins,
function (atm, sysadmin) {
atm.animations.play('ok');
});
this.secres.lookPhone();
};
// check the cracker.js file! the overlapDoor function ;)
return {
create : create,
preload : preload,
update : update
};
})();
var game = new Phaser.Game(800, 640, Phaser.AUTO, 'game');
game.state.add('lvl1', lvl1);
game.state.start('lvl1');
// Global variables
window.firstAtmCracked = false;
window.firstPhishing = false;
| gpl-2.0 |
zhoupeng/spice4xen | linux-2.6.18-xen.hg/drivers/char/ipmi/ipmi_si_intf.c | 67681 | /*
* ipmi_si.c
*
* The interface to the IPMI driver for the system interfaces (KCS, SMIC,
* BT).
*
* Author: MontaVista Software, Inc.
* Corey Minyard <[email protected]>
* [email protected]
*
* Copyright 2002 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* This file holds the "policy" for the interface to the SMI state
* machine. It does the configuration, handles timers and interrupts,
* and drives the real SMI state machine.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <asm/system.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <asm/irq.h>
#include <linux/interrupt.h>
#include <linux/rcupdate.h>
#include <linux/ipmi_smi.h>
#include <asm/io.h>
#include "ipmi_si_sm.h"
#include <linux/init.h>
#include <linux/dmi.h>
/* Measure times between events in the driver. */
#undef DEBUG_TIMING
/* Call every 10 ms. */
#define SI_TIMEOUT_TIME_USEC 10000
#define SI_USEC_PER_JIFFY (1000000/HZ)
#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
short timeout */
enum si_intf_state {
SI_NORMAL,
SI_GETTING_FLAGS,
SI_GETTING_EVENTS,
SI_CLEARING_FLAGS,
SI_CLEARING_FLAGS_THEN_SET_IRQ,
SI_GETTING_MESSAGES,
SI_ENABLE_INTERRUPTS1,
SI_ENABLE_INTERRUPTS2
/* FIXME - add watchdog stuff. */
};
/* Some BT-specific defines we need here. */
#define IPMI_BT_INTMASK_REG 2
#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
enum si_type {
SI_KCS, SI_SMIC, SI_BT
};
static char *si_to_str[] = { "KCS", "SMIC", "BT" };
#define DEVICE_NAME "ipmi_si"
static struct device_driver ipmi_driver =
{
.name = DEVICE_NAME,
.bus = &platform_bus_type
};
struct smi_info
{
int intf_num;
ipmi_smi_t intf;
struct si_sm_data *si_sm;
struct si_sm_handlers *handlers;
enum si_type si_type;
spinlock_t si_lock;
spinlock_t msg_lock;
struct list_head xmit_msgs;
struct list_head hp_xmit_msgs;
struct ipmi_smi_msg *curr_msg;
enum si_intf_state si_state;
/* Used to handle the various types of I/O that can occur with
IPMI */
struct si_sm_io io;
int (*io_setup)(struct smi_info *info);
void (*io_cleanup)(struct smi_info *info);
int (*irq_setup)(struct smi_info *info);
void (*irq_cleanup)(struct smi_info *info);
unsigned int io_size;
char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
void (*addr_source_cleanup)(struct smi_info *info);
void *addr_source_data;
/* Per-OEM handler, called from handle_flags().
Returns 1 when handle_flags() needs to be re-run
or 0 indicating it set si_state itself.
*/
int (*oem_data_avail_handler)(struct smi_info *smi_info);
/* Flags from the last GET_MSG_FLAGS command, used when an ATTN
is set to hold the flags until we are done handling everything
from the flags. */
#define RECEIVE_MSG_AVAIL 0x01
#define EVENT_MSG_BUFFER_FULL 0x02
#define WDT_PRE_TIMEOUT_INT 0x08
#define OEM0_DATA_AVAIL 0x20
#define OEM1_DATA_AVAIL 0x40
#define OEM2_DATA_AVAIL 0x80
#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
OEM1_DATA_AVAIL | \
OEM2_DATA_AVAIL)
unsigned char msg_flags;
/* If set to true, this will request events the next time the
state machine is idle. */
atomic_t req_events;
/* If true, run the state machine to completion on every send
call. Generally used after a panic to make sure stuff goes
out. */
int run_to_completion;
/* The I/O port of an SI interface. */
int port;
/* The space between start addresses of the two ports. For
instance, if the first port is 0xca2 and the spacing is 4, then
the second port is 0xca6. */
unsigned int spacing;
/* zero if no irq; */
int irq;
/* The timer for this si. */
struct timer_list si_timer;
/* The time (in jiffies) the last timeout occurred at. */
unsigned long last_timeout_jiffies;
/* Used to gracefully stop the timer without race conditions. */
atomic_t stop_operation;
/* The driver will disable interrupts when it gets into a
situation where it cannot handle messages due to lack of
memory. Once that situation clears up, it will re-enable
interrupts. */
int interrupt_disabled;
/* From the get device id response... */
struct ipmi_device_id device_id;
/* Driver model stuff. */
struct device *dev;
struct platform_device *pdev;
/* True if we allocated the device, false if it came from
* someplace else (like PCI). */
int dev_registered;
/* Slave address, could be reported from DMI. */
unsigned char slave_addr;
/* Counters and things for the proc filesystem. */
spinlock_t count_lock;
unsigned long short_timeouts;
unsigned long long_timeouts;
unsigned long timeout_restarts;
unsigned long idles;
unsigned long interrupts;
unsigned long attentions;
unsigned long flag_fetches;
unsigned long hosed_count;
unsigned long complete_transactions;
unsigned long events;
unsigned long watchdog_pretimeouts;
unsigned long incoming_messages;
struct task_struct *thread;
struct list_head link;
};
static int try_smi_init(struct smi_info *smi);
static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
static int register_xaction_notifier(struct notifier_block * nb)
{
return atomic_notifier_chain_register(&xaction_notifier_list, nb);
}
static void deliver_recv_msg(struct smi_info *smi_info,
struct ipmi_smi_msg *msg)
{
/* Deliver the message to the upper layer with the lock
released. */
spin_unlock(&(smi_info->si_lock));
ipmi_smi_msg_received(smi_info->intf, msg);
spin_lock(&(smi_info->si_lock));
}
static void return_hosed_msg(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg = smi_info->curr_msg;
/* Make it a reponse */
msg->rsp[0] = msg->data[0] | 4;
msg->rsp[1] = msg->data[1];
msg->rsp[2] = 0xFF; /* Unknown error. */
msg->rsp_size = 3;
smi_info->curr_msg = NULL;
deliver_recv_msg(smi_info, msg);
}
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
{
int rv;
struct list_head *entry = NULL;
#ifdef DEBUG_TIMING
struct timeval t;
#endif
/* No need to save flags, we aleady have interrupts off and we
already hold the SMI lock. */
spin_lock(&(smi_info->msg_lock));
/* Pick the high priority queue first. */
if (!list_empty(&(smi_info->hp_xmit_msgs))) {
entry = smi_info->hp_xmit_msgs.next;
} else if (!list_empty(&(smi_info->xmit_msgs))) {
entry = smi_info->xmit_msgs.next;
}
if (!entry) {
smi_info->curr_msg = NULL;
rv = SI_SM_IDLE;
} else {
int err;
list_del(entry);
smi_info->curr_msg = list_entry(entry,
struct ipmi_smi_msg,
link);
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
err = atomic_notifier_call_chain(&xaction_notifier_list,
0, smi_info);
if (err & NOTIFY_STOP_MASK) {
rv = SI_SM_CALL_WITHOUT_DELAY;
goto out;
}
err = smi_info->handlers->start_transaction(
smi_info->si_sm,
smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
if (err) {
return_hosed_msg(smi_info);
}
rv = SI_SM_CALL_WITHOUT_DELAY;
}
out:
spin_unlock(&(smi_info->msg_lock));
return rv;
}
static void start_enable_irq(struct smi_info *smi_info)
{
unsigned char msg[2];
/* If we are enabling interrupts, we have to tell the
BMC to use them. */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
smi_info->si_state = SI_ENABLE_INTERRUPTS1;
}
static void start_clear_flags(struct smi_info *smi_info)
{
unsigned char msg[3];
/* Make sure the watchdog pre-timeout flag is not set at startup. */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
msg[2] = WDT_PRE_TIMEOUT_INT;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
smi_info->si_state = SI_CLEARING_FLAGS;
}
/* When we have a situtaion where we run out of memory and cannot
allocate messages, we just leave them in the BMC and run the system
polled until we can allocate some memory. Once we have some
memory, we will re-enable the interrupt. */
static inline void disable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
disable_irq_nosync(smi_info->irq);
smi_info->interrupt_disabled = 1;
}
}
static inline void enable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
enable_irq(smi_info->irq);
smi_info->interrupt_disabled = 0;
}
}
static void handle_flags(struct smi_info *smi_info)
{
retry:
if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
/* Watchdog pre-timeout */
spin_lock(&smi_info->count_lock);
smi_info->watchdog_pretimeouts++;
spin_unlock(&smi_info->count_lock);
start_clear_flags(smi_info);
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
spin_unlock(&(smi_info->si_lock));
ipmi_smi_watchdog_pretimeout(smi_info->intf);
spin_lock(&(smi_info->si_lock));
} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
/* Messages available. */
smi_info->curr_msg = ipmi_alloc_smi_msg();
if (!smi_info->curr_msg) {
disable_si_irq(smi_info);
smi_info->si_state = SI_NORMAL;
return;
}
enable_si_irq(smi_info);
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
smi_info->curr_msg->data_size = 2;
smi_info->handlers->start_transaction(
smi_info->si_sm,
smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
smi_info->si_state = SI_GETTING_MESSAGES;
} else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
/* Events available. */
smi_info->curr_msg = ipmi_alloc_smi_msg();
if (!smi_info->curr_msg) {
disable_si_irq(smi_info);
smi_info->si_state = SI_NORMAL;
return;
}
enable_si_irq(smi_info);
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
smi_info->curr_msg->data_size = 2;
smi_info->handlers->start_transaction(
smi_info->si_sm,
smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
smi_info->si_state = SI_GETTING_EVENTS;
} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
smi_info->oem_data_avail_handler) {
if (smi_info->oem_data_avail_handler(smi_info))
goto retry;
} else {
smi_info->si_state = SI_NORMAL;
}
}
static void handle_transaction_done(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg;
#ifdef DEBUG_TIMING
struct timeval t;
do_gettimeofday(&t);
printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
switch (smi_info->si_state) {
case SI_NORMAL:
if (!smi_info->curr_msg)
break;
smi_info->curr_msg->rsp_size
= smi_info->handlers->get_result(
smi_info->si_sm,
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
/* Do this here becase deliver_recv_msg() releases the
lock, and a new message can be put in during the
time the lock is released. */
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
deliver_recv_msg(smi_info, msg);
break;
case SI_GETTING_FLAGS:
{
unsigned char msg[4];
unsigned int len;
/* We got the flags from the SMI, now handle them. */
len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
/* Error fetching flags, just give up for
now. */
smi_info->si_state = SI_NORMAL;
} else if (len < 4) {
/* Hmm, no flags. That's technically illegal, but
don't use uninitialized data. */
smi_info->si_state = SI_NORMAL;
} else {
smi_info->msg_flags = msg[3];
handle_flags(smi_info);
}
break;
}
case SI_CLEARING_FLAGS:
case SI_CLEARING_FLAGS_THEN_SET_IRQ:
{
unsigned char msg[3];
/* We cleared the flags. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
if (msg[2] != 0) {
/* Error clearing flags */
printk(KERN_WARNING
"ipmi_si: Error clearing flags: %2.2x\n",
msg[2]);
}
if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
start_enable_irq(smi_info);
else
smi_info->si_state = SI_NORMAL;
break;
}
case SI_GETTING_EVENTS:
{
smi_info->curr_msg->rsp_size
= smi_info->handlers->get_result(
smi_info->si_sm,
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
/* Do this here becase deliver_recv_msg() releases the
lock, and a new message can be put in during the
time the lock is released. */
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
if (msg->rsp[2] != 0) {
/* Error getting event, probably done. */
msg->done(msg);
/* Take off the event flag. */
smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
handle_flags(smi_info);
} else {
spin_lock(&smi_info->count_lock);
smi_info->events++;
spin_unlock(&smi_info->count_lock);
/* Do this before we deliver the message
because delivering the message releases the
lock and something else can mess with the
state. */
handle_flags(smi_info);
deliver_recv_msg(smi_info, msg);
}
break;
}
case SI_GETTING_MESSAGES:
{
smi_info->curr_msg->rsp_size
= smi_info->handlers->get_result(
smi_info->si_sm,
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
/* Do this here becase deliver_recv_msg() releases the
lock, and a new message can be put in during the
time the lock is released. */
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
if (msg->rsp[2] != 0) {
/* Error getting event, probably done. */
msg->done(msg);
/* Take off the msg flag. */
smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
handle_flags(smi_info);
} else {
spin_lock(&smi_info->count_lock);
smi_info->incoming_messages++;
spin_unlock(&smi_info->count_lock);
/* Do this before we deliver the message
because delivering the message releases the
lock and something else can mess with the
state. */
handle_flags(smi_info);
deliver_recv_msg(smi_info, msg);
}
break;
}
case SI_ENABLE_INTERRUPTS1:
{
unsigned char msg[4];
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
printk(KERN_WARNING
"ipmi_si: Could not enable interrupts"
", failed get, using polled mode.\n");
smi_info->si_state = SI_NORMAL;
} else {
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
msg[2] = msg[3] | 1; /* enable msg queue int */
smi_info->handlers->start_transaction(
smi_info->si_sm, msg, 3);
smi_info->si_state = SI_ENABLE_INTERRUPTS2;
}
break;
}
case SI_ENABLE_INTERRUPTS2:
{
unsigned char msg[4];
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
printk(KERN_WARNING
"ipmi_si: Could not enable interrupts"
", failed set, using polled mode.\n");
}
smi_info->si_state = SI_NORMAL;
break;
}
}
}
/* Called on timeouts and events. Timeouts should pass the elapsed
time, interrupts should pass in zero. */
static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
int time)
{
enum si_sm_result si_sm_result;
restart:
/* There used to be a loop here that waited a little while
(around 25us) before giving up. That turned out to be
pointless, the minimum delays I was seeing were in the 300us
range, which is far too long to wait in an interrupt. So
we just run until the state machine tells us something
happened or it needs a delay. */
si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
time = 0;
while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
{
si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
}
if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
{
spin_lock(&smi_info->count_lock);
smi_info->complete_transactions++;
spin_unlock(&smi_info->count_lock);
handle_transaction_done(smi_info);
si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
}
else if (si_sm_result == SI_SM_HOSED)
{
spin_lock(&smi_info->count_lock);
smi_info->hosed_count++;
spin_unlock(&smi_info->count_lock);
/* Do the before return_hosed_msg, because that
releases the lock. */
smi_info->si_state = SI_NORMAL;
if (smi_info->curr_msg != NULL) {
/* If we were handling a user message, format
a response to send to the upper layer to
tell it about the error. */
return_hosed_msg(smi_info);
}
si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
}
/* We prefer handling attn over new messages. */
if (si_sm_result == SI_SM_ATTN)
{
unsigned char msg[2];
spin_lock(&smi_info->count_lock);
smi_info->attentions++;
spin_unlock(&smi_info->count_lock);
/* Got a attn, send down a get message flags to see
what's causing it. It would be better to handle
this in the upper layer, but due to the way
interrupts work with the SMI, that's not really
possible. */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_MSG_FLAGS_CMD;
smi_info->handlers->start_transaction(
smi_info->si_sm, msg, 2);
smi_info->si_state = SI_GETTING_FLAGS;
goto restart;
}
/* If we are currently idle, try to start the next message. */
if (si_sm_result == SI_SM_IDLE) {
spin_lock(&smi_info->count_lock);
smi_info->idles++;
spin_unlock(&smi_info->count_lock);
si_sm_result = start_next_msg(smi_info);
if (si_sm_result != SI_SM_IDLE)
goto restart;
}
if ((si_sm_result == SI_SM_IDLE)
&& (atomic_read(&smi_info->req_events)))
{
/* We are idle and the upper layer requested that I fetch
events, so do so. */
unsigned char msg[2];
spin_lock(&smi_info->count_lock);
smi_info->flag_fetches++;
spin_unlock(&smi_info->count_lock);
atomic_set(&smi_info->req_events, 0);
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_MSG_FLAGS_CMD;
smi_info->handlers->start_transaction(
smi_info->si_sm, msg, 2);
smi_info->si_state = SI_GETTING_FLAGS;
goto restart;
}
return si_sm_result;
}
static void sender(void *send_info,
struct ipmi_smi_msg *msg,
int priority)
{
struct smi_info *smi_info = send_info;
enum si_sm_result result;
unsigned long flags;
#ifdef DEBUG_TIMING
struct timeval t;
#endif
spin_lock_irqsave(&(smi_info->msg_lock), flags);
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
if (smi_info->run_to_completion) {
/* If we are running to completion, then throw it in
the list and run transactions until everything is
clear. Priority doesn't matter here. */
list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
/* We have to release the msg lock and claim the smi
lock in this case, because of race conditions. */
spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
spin_lock_irqsave(&(smi_info->si_lock), flags);
result = smi_event_handler(smi_info, 0);
while (result != SI_SM_IDLE) {
udelay(SI_SHORT_TIMEOUT_USEC);
result = smi_event_handler(smi_info,
SI_SHORT_TIMEOUT_USEC);
}
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
return;
} else {
if (priority > 0) {
list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
} else {
list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
}
}
spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
spin_lock_irqsave(&(smi_info->si_lock), flags);
if ((smi_info->si_state == SI_NORMAL)
&& (smi_info->curr_msg == NULL))
{
start_next_msg(smi_info);
}
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
}
static void set_run_to_completion(void *send_info, int i_run_to_completion)
{
struct smi_info *smi_info = send_info;
enum si_sm_result result;
unsigned long flags;
spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_info->run_to_completion = i_run_to_completion;
if (i_run_to_completion) {
result = smi_event_handler(smi_info, 0);
while (result != SI_SM_IDLE) {
udelay(SI_SHORT_TIMEOUT_USEC);
result = smi_event_handler(smi_info,
SI_SHORT_TIMEOUT_USEC);
}
}
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
}
static int ipmi_thread(void *data)
{
struct smi_info *smi_info = data;
unsigned long flags;
enum si_sm_result smi_result;
set_user_nice(current, 19);
while (!kthread_should_stop()) {
spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_result = smi_event_handler(smi_info, 0);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
/* do nothing */
}
else if (smi_result == SI_SM_CALL_WITH_DELAY)
schedule();
else
schedule_timeout_interruptible(1);
}
return 0;
}
static void poll(void *send_info)
{
struct smi_info *smi_info = send_info;
smi_event_handler(smi_info, 0);
}
static void request_events(void *send_info)
{
struct smi_info *smi_info = send_info;
atomic_set(&smi_info->req_events, 1);
}
static int initialized = 0;
static void smi_timeout(unsigned long data)
{
struct smi_info *smi_info = (struct smi_info *) data;
enum si_sm_result smi_result;
unsigned long flags;
unsigned long jiffies_now;
long time_diff;
#ifdef DEBUG_TIMING
struct timeval t;
#endif
if (atomic_read(&smi_info->stop_operation))
return;
spin_lock_irqsave(&(smi_info->si_lock), flags);
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
jiffies_now = jiffies;
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
* SI_USEC_PER_JIFFY);
smi_result = smi_event_handler(smi_info, time_diff);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
smi_info->last_timeout_jiffies = jiffies_now;
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */
smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
spin_lock_irqsave(&smi_info->count_lock, flags);
smi_info->long_timeouts++;
spin_unlock_irqrestore(&smi_info->count_lock, flags);
goto do_add_timer;
}
/* If the state machine asks for a short delay, then shorten
the timer timeout. */
if (smi_result == SI_SM_CALL_WITH_DELAY) {
spin_lock_irqsave(&smi_info->count_lock, flags);
smi_info->short_timeouts++;
spin_unlock_irqrestore(&smi_info->count_lock, flags);
smi_info->si_timer.expires = jiffies + 1;
} else {
spin_lock_irqsave(&smi_info->count_lock, flags);
smi_info->long_timeouts++;
spin_unlock_irqrestore(&smi_info->count_lock, flags);
smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
}
do_add_timer:
add_timer(&(smi_info->si_timer));
}
static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
{
struct smi_info *smi_info = data;
unsigned long flags;
#ifdef DEBUG_TIMING
struct timeval t;
#endif
spin_lock_irqsave(&(smi_info->si_lock), flags);
spin_lock(&smi_info->count_lock);
smi_info->interrupts++;
spin_unlock(&smi_info->count_lock);
if (atomic_read(&smi_info->stop_operation))
goto out;
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
smi_event_handler(smi_info, 0);
out:
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
return IRQ_HANDLED;
}
static irqreturn_t si_bt_irq_handler(int irq, void *data, struct pt_regs *regs)
{
struct smi_info *smi_info = data;
/* We need to clear the IRQ flag for the BT interface. */
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_CLEAR_IRQ_BIT
| IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
return si_irq_handler(irq, data, regs);
}
static int smi_start_processing(void *send_info,
ipmi_smi_t intf)
{
struct smi_info *new_smi = send_info;
new_smi->intf = intf;
/* Set up the timer that drives the interface. */
setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
new_smi->last_timeout_jiffies = jiffies;
mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
if (new_smi->si_type != SI_BT) {
new_smi->thread = kthread_run(ipmi_thread, new_smi,
"kipmi%d", new_smi->intf_num);
if (IS_ERR(new_smi->thread)) {
printk(KERN_NOTICE "ipmi_si_intf: Could not start"
" kernel thread due to error %ld, only using"
" timers to drive the interface\n",
PTR_ERR(new_smi->thread));
new_smi->thread = NULL;
}
}
return 0;
}
static struct ipmi_smi_handlers handlers =
{
.owner = THIS_MODULE,
.start_processing = smi_start_processing,
.sender = sender,
.request_events = request_events,
.set_run_to_completion = set_run_to_completion,
.poll = poll,
};
/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
#define SI_MAX_PARMS 4
static LIST_HEAD(smi_infos);
static DEFINE_MUTEX(smi_infos_lock);
static int smi_num; /* Used to sequence the SMIs */
#define DEFAULT_REGSPACING 1
static int si_trydefaults = 1;
static char *si_type[SI_MAX_PARMS];
#define MAX_SI_TYPE_STR 30
static char si_type_str[MAX_SI_TYPE_STR];
static unsigned long addrs[SI_MAX_PARMS];
static int num_addrs;
static unsigned int ports[SI_MAX_PARMS];
static int num_ports;
static int irqs[SI_MAX_PARMS];
static int num_irqs;
static int regspacings[SI_MAX_PARMS];
static int num_regspacings = 0;
static int regsizes[SI_MAX_PARMS];
static int num_regsizes = 0;
static int regshifts[SI_MAX_PARMS];
static int num_regshifts = 0;
static int slave_addrs[SI_MAX_PARMS];
static int num_slave_addrs = 0;
module_param_named(trydefaults, si_trydefaults, bool, 0);
MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
" default scan of the KCS and SMIC interface at the standard"
" address");
module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
MODULE_PARM_DESC(type, "Defines the type of each interface, each"
" interface separated by commas. The types are 'kcs',"
" 'smic', and 'bt'. For example si_type=kcs,bt will set"
" the first interface to kcs and the second to bt");
module_param_array(addrs, long, &num_addrs, 0);
MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
" addresses separated by commas. Only use if an interface"
" is in memory. Otherwise, set it to zero or leave"
" it blank.");
module_param_array(ports, int, &num_ports, 0);
MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
" addresses separated by commas. Only use if an interface"
" is a port. Otherwise, set it to zero or leave"
" it blank.");
module_param_array(irqs, int, &num_irqs, 0);
MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
" addresses separated by commas. Only use if an interface"
" has an interrupt. Otherwise, set it to zero or leave"
" it blank.");
module_param_array(regspacings, int, &num_regspacings, 0);
MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
" and each successive register used by the interface. For"
" instance, if the start address is 0xca2 and the spacing"
" is 2, then the second address is at 0xca4. Defaults"
" to 1.");
module_param_array(regsizes, int, &num_regsizes, 0);
MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
" This should generally be 1, 2, 4, or 8 for an 8-bit,"
" 16-bit, 32-bit, or 64-bit register. Use this if you"
" the 8-bit IPMI register has to be read from a larger"
" register.");
module_param_array(regshifts, int, &num_regshifts, 0);
MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
" IPMI register, in bits. For instance, if the data"
" is read from a 32-bit word and the IPMI data is in"
" bit 8-15, then the shift would be 8");
module_param_array(slave_addrs, int, &num_slave_addrs, 0);
MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
" the controller. Normally this is 0x20, but can be"
" overridden by this parm. This is an array indexed"
" by interface number.");
#define IPMI_IO_ADDR_SPACE 0
#define IPMI_MEM_ADDR_SPACE 1
static char *addr_space_to_str[] = { "I/O", "memory" };
static void std_irq_cleanup(struct smi_info *info)
{
if (info->si_type == SI_BT)
/* Disable the interrupt in the BT interface. */
info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
free_irq(info->irq, info);
}
static int std_irq_setup(struct smi_info *info)
{
int rv;
if (!info->irq)
return 0;
if (info->si_type == SI_BT) {
rv = request_irq(info->irq,
si_bt_irq_handler,
IRQF_DISABLED,
DEVICE_NAME,
info);
if (!rv)
/* Enable the interrupt in the BT interface. */
info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
} else
rv = request_irq(info->irq,
si_irq_handler,
IRQF_DISABLED,
DEVICE_NAME,
info);
if (rv) {
printk(KERN_WARNING
"ipmi_si: %s unable to claim interrupt %d,"
" running polled\n",
DEVICE_NAME, info->irq);
info->irq = 0;
} else {
info->irq_cleanup = std_irq_cleanup;
printk(" Using irq %d\n", info->irq);
}
return rv;
}
static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
{
unsigned int addr = io->addr_data;
return inb(addr + (offset * io->regspacing));
}
static void port_outb(struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
unsigned int addr = io->addr_data;
outb(b, addr + (offset * io->regspacing));
}
static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
{
unsigned int addr = io->addr_data;
return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
}
static void port_outw(struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
unsigned int addr = io->addr_data;
outw(b << io->regshift, addr + (offset * io->regspacing));
}
static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
{
unsigned int addr = io->addr_data;
return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
}
static void port_outl(struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
unsigned int addr = io->addr_data;
outl(b << io->regshift, addr+(offset * io->regspacing));
}
static void port_cleanup(struct smi_info *info)
{
unsigned int addr = info->io.addr_data;
int idx;
if (addr) {
for (idx = 0; idx < info->io_size; idx++) {
release_region(addr + idx * info->io.regspacing,
info->io.regsize);
}
}
}
static int port_setup(struct smi_info *info)
{
unsigned int addr = info->io.addr_data;
int idx;
if (!addr)
return -ENODEV;
info->io_cleanup = port_cleanup;
/* Figure out the actual inb/inw/inl/etc routine to use based
upon the register size. */
switch (info->io.regsize) {
case 1:
info->io.inputb = port_inb;
info->io.outputb = port_outb;
break;
case 2:
info->io.inputb = port_inw;
info->io.outputb = port_outw;
break;
case 4:
info->io.inputb = port_inl;
info->io.outputb = port_outl;
break;
default:
printk("ipmi_si: Invalid register size: %d\n",
info->io.regsize);
return -EINVAL;
}
/* Some BIOSes reserve disjoint I/O regions in their ACPI
* tables. This causes problems when trying to register the
* entire I/O region. Therefore we must register each I/O
* port separately.
*/
for (idx = 0; idx < info->io_size; idx++) {
if (request_region(addr + idx * info->io.regspacing,
info->io.regsize, DEVICE_NAME) == NULL) {
/* Undo allocations */
while (idx--) {
release_region(addr + idx * info->io.regspacing,
info->io.regsize);
}
return -EIO;
}
}
return 0;
}
static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
{
return readb((io->addr)+(offset * io->regspacing));
}
static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
writeb(b, (io->addr)+(offset * io->regspacing));
}
static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
{
return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
&& 0xff;
}
static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
}
static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
{
return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
&& 0xff;
}
static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
}
#ifdef readq
static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
{
return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
&& 0xff;
}
static void mem_outq(struct si_sm_io *io, unsigned int offset,
unsigned char b)
{
writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
}
#endif
static void mem_cleanup(struct smi_info *info)
{
unsigned long addr = info->io.addr_data;
int mapsize;
if (info->io.addr) {
iounmap(info->io.addr);
mapsize = ((info->io_size * info->io.regspacing)
- (info->io.regspacing - info->io.regsize));
release_mem_region(addr, mapsize);
}
}
static int mem_setup(struct smi_info *info)
{
unsigned long addr = info->io.addr_data;
int mapsize;
if (!addr)
return -ENODEV;
info->io_cleanup = mem_cleanup;
/* Figure out the actual readb/readw/readl/etc routine to use based
upon the register size. */
switch (info->io.regsize) {
case 1:
info->io.inputb = intf_mem_inb;
info->io.outputb = intf_mem_outb;
break;
case 2:
info->io.inputb = intf_mem_inw;
info->io.outputb = intf_mem_outw;
break;
case 4:
info->io.inputb = intf_mem_inl;
info->io.outputb = intf_mem_outl;
break;
#ifdef readq
case 8:
info->io.inputb = mem_inq;
info->io.outputb = mem_outq;
break;
#endif
default:
printk("ipmi_si: Invalid register size: %d\n",
info->io.regsize);
return -EINVAL;
}
/* Calculate the total amount of memory to claim. This is an
* unusual looking calculation, but it avoids claiming any
* more memory than it has to. It will claim everything
* between the first address to the end of the last full
* register. */
mapsize = ((info->io_size * info->io.regspacing)
- (info->io.regspacing - info->io.regsize));
if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
return -EIO;
info->io.addr = ioremap(addr, mapsize);
if (info->io.addr == NULL) {
release_mem_region(addr, mapsize);
return -EIO;
}
return 0;
}
static __devinit void hardcode_find_bmc(void)
{
int i;
struct smi_info *info;
for (i = 0; i < SI_MAX_PARMS; i++) {
if (!ports[i] && !addrs[i])
continue;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return;
info->addr_source = "hardcoded";
if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
info->si_type = SI_KCS;
} else if (strcmp(si_type[i], "smic") == 0) {
info->si_type = SI_SMIC;
} else if (strcmp(si_type[i], "bt") == 0) {
info->si_type = SI_BT;
} else {
printk(KERN_WARNING
"ipmi_si: Interface type specified "
"for interface %d, was invalid: %s\n",
i, si_type[i]);
kfree(info);
continue;
}
if (ports[i]) {
/* An I/O port */
info->io_setup = port_setup;
info->io.addr_data = ports[i];
info->io.addr_type = IPMI_IO_ADDR_SPACE;
} else if (addrs[i]) {
/* A memory port */
info->io_setup = mem_setup;
info->io.addr_data = addrs[i];
info->io.addr_type = IPMI_MEM_ADDR_SPACE;
} else {
printk(KERN_WARNING
"ipmi_si: Interface type specified "
"for interface %d, "
"but port and address were not set or "
"set to zero.\n", i);
kfree(info);
continue;
}
info->io.addr = NULL;
info->io.regspacing = regspacings[i];
if (!info->io.regspacing)
info->io.regspacing = DEFAULT_REGSPACING;
info->io.regsize = regsizes[i];
if (!info->io.regsize)
info->io.regsize = DEFAULT_REGSPACING;
info->io.regshift = regshifts[i];
info->irq = irqs[i];
if (info->irq)
info->irq_setup = std_irq_setup;
try_smi_init(info);
}
}
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
/* Once we get an ACPI failure, we don't try any more, because we go
through the tables sequentially. Once we don't find a table, there
are no more. */
static int acpi_failure = 0;
/* For GPE-type interrupts. */
static u32 ipmi_acpi_gpe(void *context)
{
struct smi_info *smi_info = context;
unsigned long flags;
#ifdef DEBUG_TIMING
struct timeval t;
#endif
spin_lock_irqsave(&(smi_info->si_lock), flags);
spin_lock(&smi_info->count_lock);
smi_info->interrupts++;
spin_unlock(&smi_info->count_lock);
if (atomic_read(&smi_info->stop_operation))
goto out;
#ifdef DEBUG_TIMING
do_gettimeofday(&t);
printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
smi_event_handler(smi_info, 0);
out:
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
return ACPI_INTERRUPT_HANDLED;
}
static void acpi_gpe_irq_cleanup(struct smi_info *info)
{
if (!info->irq)
return;
acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
}
static int acpi_gpe_irq_setup(struct smi_info *info)
{
acpi_status status;
if (!info->irq)
return 0;
/* FIXME - is level triggered right? */
status = acpi_install_gpe_handler(NULL,
info->irq,
ACPI_GPE_LEVEL_TRIGGERED,
&ipmi_acpi_gpe,
info);
if (status != AE_OK) {
printk(KERN_WARNING
"ipmi_si: %s unable to claim ACPI GPE %d,"
" running polled\n",
DEVICE_NAME, info->irq);
info->irq = 0;
return -EINVAL;
} else {
info->irq_cleanup = acpi_gpe_irq_cleanup;
printk(" Using ACPI GPE %d\n", info->irq);
return 0;
}
}
/*
* Defined at
* http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
*/
struct SPMITable {
s8 Signature[4];
u32 Length;
u8 Revision;
u8 Checksum;
s8 OEMID[6];
s8 OEMTableID[8];
s8 OEMRevision[4];
s8 CreatorID[4];
s8 CreatorRevision[4];
u8 InterfaceType;
u8 IPMIlegacy;
s16 SpecificationRevision;
/*
* Bit 0 - SCI interrupt supported
* Bit 1 - I/O APIC/SAPIC
*/
u8 InterruptType;
/* If bit 0 of InterruptType is set, then this is the SCI
interrupt in the GPEx_STS register. */
u8 GPE;
s16 Reserved;
/* If bit 1 of InterruptType is set, then this is the I/O
APIC/SAPIC interrupt. */
u32 GlobalSystemInterrupt;
/* The actual register address. */
struct acpi_generic_address addr;
u8 UID[4];
s8 spmi_id[1]; /* A '\0' terminated array starts here. */
};
static __devinit int try_init_acpi(struct SPMITable *spmi)
{
struct smi_info *info;
char *io_type;
u8 addr_space;
if (spmi->IPMIlegacy != 1) {
printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
return -ENODEV;
}
if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
addr_space = IPMI_MEM_ADDR_SPACE;
else
addr_space = IPMI_IO_ADDR_SPACE;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
return -ENOMEM;
}
info->addr_source = "ACPI";
/* Figure out the interface type. */
switch (spmi->InterfaceType)
{
case 1: /* KCS */
info->si_type = SI_KCS;
break;
case 2: /* SMIC */
info->si_type = SI_SMIC;
break;
case 3: /* BT */
info->si_type = SI_BT;
break;
default:
printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
spmi->InterfaceType);
kfree(info);
return -EIO;
}
if (spmi->InterruptType & 1) {
/* We've got a GPE interrupt. */
info->irq = spmi->GPE;
info->irq_setup = acpi_gpe_irq_setup;
} else if (spmi->InterruptType & 2) {
/* We've got an APIC/SAPIC interrupt. */
info->irq = spmi->GlobalSystemInterrupt;
info->irq_setup = std_irq_setup;
} else {
/* Use the default interrupt setting. */
info->irq = 0;
info->irq_setup = NULL;
}
if (spmi->addr.register_bit_width) {
/* A (hopefully) properly formed register bit width. */
info->io.regspacing = spmi->addr.register_bit_width / 8;
} else {
info->io.regspacing = DEFAULT_REGSPACING;
}
info->io.regsize = info->io.regspacing;
info->io.regshift = spmi->addr.register_bit_offset;
if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
io_type = "memory";
info->io_setup = mem_setup;
info->io.addr_type = IPMI_IO_ADDR_SPACE;
} else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
io_type = "I/O";
info->io_setup = port_setup;
info->io.addr_type = IPMI_MEM_ADDR_SPACE;
} else {
kfree(info);
printk("ipmi_si: Unknown ACPI I/O Address type\n");
return -EIO;
}
info->io.addr_data = spmi->addr.address;
try_smi_init(info);
return 0;
}
static __devinit void acpi_find_bmc(void)
{
acpi_status status;
struct SPMITable *spmi;
int i;
if (acpi_disabled)
return;
if (acpi_failure)
return;
for (i = 0; ; i++) {
status = acpi_get_firmware_table("SPMI", i+1,
ACPI_LOGICAL_ADDRESSING,
(struct acpi_table_header **)
&spmi);
if (status != AE_OK)
return;
try_init_acpi(spmi);
}
}
#endif
#ifdef CONFIG_DMI
struct dmi_ipmi_data
{
u8 type;
u8 addr_space;
unsigned long base_addr;
u8 irq;
u8 offset;
u8 slave_addr;
};
static int __devinit decode_dmi(struct dmi_header *dm,
struct dmi_ipmi_data *dmi)
{
u8 *data = (u8 *)dm;
unsigned long base_addr;
u8 reg_spacing;
u8 len = dm->length;
dmi->type = data[4];
memcpy(&base_addr, data+8, sizeof(unsigned long));
if (len >= 0x11) {
if (base_addr & 1) {
/* I/O */
base_addr &= 0xFFFE;
dmi->addr_space = IPMI_IO_ADDR_SPACE;
}
else {
/* Memory */
dmi->addr_space = IPMI_MEM_ADDR_SPACE;
}
/* If bit 4 of byte 0x10 is set, then the lsb for the address
is odd. */
dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
dmi->irq = data[0x11];
/* The top two bits of byte 0x10 hold the register spacing. */
reg_spacing = (data[0x10] & 0xC0) >> 6;
switch(reg_spacing){
case 0x00: /* Byte boundaries */
dmi->offset = 1;
break;
case 0x01: /* 32-bit boundaries */
dmi->offset = 4;
break;
case 0x02: /* 16-byte boundaries */
dmi->offset = 16;
break;
default:
/* Some other interface, just ignore it. */
return -EIO;
}
} else {
/* Old DMI spec. */
/* Note that technically, the lower bit of the base
* address should be 1 if the address is I/O and 0 if
* the address is in memory. So many systems get that
* wrong (and all that I have seen are I/O) so we just
* ignore that bit and assume I/O. Systems that use
* memory should use the newer spec, anyway. */
dmi->base_addr = base_addr & 0xfffe;
dmi->addr_space = IPMI_IO_ADDR_SPACE;
dmi->offset = 1;
}
dmi->slave_addr = data[6];
return 0;
}
static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
{
struct smi_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
printk(KERN_ERR
"ipmi_si: Could not allocate SI data\n");
return;
}
info->addr_source = "SMBIOS";
switch (ipmi_data->type) {
case 0x01: /* KCS */
info->si_type = SI_KCS;
break;
case 0x02: /* SMIC */
info->si_type = SI_SMIC;
break;
case 0x03: /* BT */
info->si_type = SI_BT;
break;
default:
return;
}
switch (ipmi_data->addr_space) {
case IPMI_MEM_ADDR_SPACE:
info->io_setup = mem_setup;
info->io.addr_type = IPMI_MEM_ADDR_SPACE;
break;
case IPMI_IO_ADDR_SPACE:
info->io_setup = port_setup;
info->io.addr_type = IPMI_IO_ADDR_SPACE;
break;
default:
kfree(info);
printk(KERN_WARNING
"ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
ipmi_data->addr_space);
return;
}
info->io.addr_data = ipmi_data->base_addr;
info->io.regspacing = ipmi_data->offset;
if (!info->io.regspacing)
info->io.regspacing = DEFAULT_REGSPACING;
info->io.regsize = DEFAULT_REGSPACING;
info->io.regshift = 0;
info->slave_addr = ipmi_data->slave_addr;
info->irq = ipmi_data->irq;
if (info->irq)
info->irq_setup = std_irq_setup;
try_smi_init(info);
}
static void __devinit dmi_find_bmc(void)
{
struct dmi_device *dev = NULL;
struct dmi_ipmi_data data;
int rv;
while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
if (!rv)
try_init_dmi(&data);
}
}
#endif /* CONFIG_DMI */
#ifdef CONFIG_PCI
#define PCI_ERMC_CLASSCODE 0x0C0700
#define PCI_ERMC_CLASSCODE_MASK 0xffffff00
#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
#define PCI_HP_VENDOR_ID 0x103C
#define PCI_MMC_DEVICE_ID 0x121A
#define PCI_MMC_ADDR_CW 0x10
static void ipmi_pci_cleanup(struct smi_info *info)
{
struct pci_dev *pdev = info->addr_source_data;
pci_disable_device(pdev);
}
static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int rv;
int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
struct smi_info *info;
int first_reg_offset = 0;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ENOMEM;
info->addr_source = "PCI";
switch (class_type) {
case PCI_ERMC_CLASSCODE_TYPE_SMIC:
info->si_type = SI_SMIC;
break;
case PCI_ERMC_CLASSCODE_TYPE_KCS:
info->si_type = SI_KCS;
break;
case PCI_ERMC_CLASSCODE_TYPE_BT:
info->si_type = SI_BT;
break;
default:
kfree(info);
printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
pci_name(pdev), class_type);
return ENOMEM;
}
rv = pci_enable_device(pdev);
if (rv) {
printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
pci_name(pdev));
kfree(info);
return rv;
}
info->addr_source_cleanup = ipmi_pci_cleanup;
info->addr_source_data = pdev;
if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
first_reg_offset = 1;
if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
info->io_setup = port_setup;
info->io.addr_type = IPMI_IO_ADDR_SPACE;
} else {
info->io_setup = mem_setup;
info->io.addr_type = IPMI_MEM_ADDR_SPACE;
}
info->io.addr_data = pci_resource_start(pdev, 0);
info->io.regspacing = DEFAULT_REGSPACING;
info->io.regsize = DEFAULT_REGSPACING;
info->io.regshift = 0;
info->irq = pdev->irq;
if (info->irq)
info->irq_setup = std_irq_setup;
info->dev = &pdev->dev;
return try_smi_init(info);
}
static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
{
}
#ifdef CONFIG_PM
static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
return 0;
}
static int ipmi_pci_resume(struct pci_dev *pdev)
{
return 0;
}
#endif
static struct pci_device_id ipmi_pci_devices[] = {
{ PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
{ PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }
};
MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
static struct pci_driver ipmi_pci_driver = {
.name = DEVICE_NAME,
.id_table = ipmi_pci_devices,
.probe = ipmi_pci_probe,
.remove = __devexit_p(ipmi_pci_remove),
#ifdef CONFIG_PM
.suspend = ipmi_pci_suspend,
.resume = ipmi_pci_resume,
#endif
};
#endif /* CONFIG_PCI */
static int try_get_dev_id(struct smi_info *smi_info)
{
unsigned char msg[2];
unsigned char *resp;
unsigned long resp_len;
enum si_sm_result smi_result;
int rv = 0;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
/* Do a Get Device ID command, since it comes back with some
useful info. */
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_DEVICE_ID_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
for (;;)
{
if (smi_result == SI_SM_CALL_WITH_DELAY ||
smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
schedule_timeout_uninterruptible(1);
smi_result = smi_info->handlers->event(
smi_info->si_sm, 100);
}
else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
{
smi_result = smi_info->handlers->event(
smi_info->si_sm, 0);
}
else
break;
}
if (smi_result == SI_SM_HOSED) {
/* We couldn't get the state machine to run, so whatever's at
the port is probably not an IPMI SMI interface. */
rv = -ENODEV;
goto out;
}
/* Otherwise, we got some data. */
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 14) {
/* That's odd, it should be longer. */
rv = -EINVAL;
goto out;
}
if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
/* That's odd, it shouldn't be able to fail. */
rv = -EINVAL;
goto out;
}
/* Record info from the get device id, in case we need it. */
ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
out:
kfree(resp);
return rv;
}
static int type_file_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
char *out = (char *) page;
struct smi_info *smi = data;
switch (smi->si_type) {
case SI_KCS:
return sprintf(out, "kcs\n");
case SI_SMIC:
return sprintf(out, "smic\n");
case SI_BT:
return sprintf(out, "bt\n");
default:
return 0;
}
}
static int stat_file_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
char *out = (char *) page;
struct smi_info *smi = data;
out += sprintf(out, "interrupts_enabled: %d\n",
smi->irq && !smi->interrupt_disabled);
out += sprintf(out, "short_timeouts: %ld\n",
smi->short_timeouts);
out += sprintf(out, "long_timeouts: %ld\n",
smi->long_timeouts);
out += sprintf(out, "timeout_restarts: %ld\n",
smi->timeout_restarts);
out += sprintf(out, "idles: %ld\n",
smi->idles);
out += sprintf(out, "interrupts: %ld\n",
smi->interrupts);
out += sprintf(out, "attentions: %ld\n",
smi->attentions);
out += sprintf(out, "flag_fetches: %ld\n",
smi->flag_fetches);
out += sprintf(out, "hosed_count: %ld\n",
smi->hosed_count);
out += sprintf(out, "complete_transactions: %ld\n",
smi->complete_transactions);
out += sprintf(out, "events: %ld\n",
smi->events);
out += sprintf(out, "watchdog_pretimeouts: %ld\n",
smi->watchdog_pretimeouts);
out += sprintf(out, "incoming_messages: %ld\n",
smi->incoming_messages);
return (out - ((char *) page));
}
/*
* oem_data_avail_to_receive_msg_avail
* @info - smi_info structure with msg_flags set
*
* Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
* Returns 1 indicating need to re-run handle_flags().
*/
static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
{
smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
RECEIVE_MSG_AVAIL);
return 1;
}
/*
* setup_dell_poweredge_oem_data_handler
* @info - smi_info.device_id must be populated
*
* Systems that match, but have firmware version < 1.40 may assert
* OEM0_DATA_AVAIL on their own, without being told via Set Flags that
* it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
* upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
* as RECEIVE_MSG_AVAIL instead.
*
* As Dell has no plans to release IPMI 1.5 firmware that *ever*
* assert the OEM[012] bits, and if it did, the driver would have to
* change to handle that properly, we don't actually check for the
* firmware version.
* Device ID = 0x20 BMC on PowerEdge 8G servers
* Device Revision = 0x80
* Firmware Revision1 = 0x01 BMC version 1.40
* Firmware Revision2 = 0x40 BCD encoded
* IPMI Version = 0x51 IPMI 1.5
* Manufacturer ID = A2 02 00 Dell IANA
*
* Additionally, PowerEdge systems with IPMI < 1.5 may also assert
* OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
*
*/
#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
#define DELL_IANA_MFR_ID 0x0002a2
static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID) {
if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
smi_info->oem_data_avail_handler =
oem_data_avail_to_receive_msg_avail;
}
else if (ipmi_version_major(id) < 1 ||
(ipmi_version_major(id) == 1 &&
ipmi_version_minor(id) < 5)) {
smi_info->oem_data_avail_handler =
oem_data_avail_to_receive_msg_avail;
}
}
}
#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
static void return_hosed_msg_badsize(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg = smi_info->curr_msg;
/* Make it a reponse */
msg->rsp[0] = msg->data[0] | 4;
msg->rsp[1] = msg->data[1];
msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
msg->rsp_size = 3;
smi_info->curr_msg = NULL;
deliver_recv_msg(smi_info, msg);
}
/*
* dell_poweredge_bt_xaction_handler
* @info - smi_info.device_id must be populated
*
* Dell PowerEdge servers with the BT interface (x6xx and 1750) will
* not respond to a Get SDR command if the length of the data
* requested is exactly 0x3A, which leads to command timeouts and no
* data returned. This intercepts such commands, and causes userspace
* callers to try again with a different-sized buffer, which succeeds.
*/
#define STORAGE_NETFN 0x0A
#define STORAGE_CMD_GET_SDR 0x23
static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
unsigned long unused,
void *in)
{
struct smi_info *smi_info = in;
unsigned char *data = smi_info->curr_msg->data;
unsigned int size = smi_info->curr_msg->data_size;
if (size >= 8 &&
(data[0]>>2) == STORAGE_NETFN &&
data[1] == STORAGE_CMD_GET_SDR &&
data[7] == 0x3A) {
return_hosed_msg_badsize(smi_info);
return NOTIFY_STOP;
}
return NOTIFY_DONE;
}
static struct notifier_block dell_poweredge_bt_xaction_notifier = {
.notifier_call = dell_poweredge_bt_xaction_handler,
};
/*
* setup_dell_poweredge_bt_xaction_handler
* @info - smi_info.device_id must be filled in already
*
* Fills in smi_info.device_id.start_transaction_pre_hook
* when we know what function to use there.
*/
static void
setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID &&
smi_info->si_type == SI_BT)
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
}
/*
* setup_oem_data_handler
* @info - smi_info.device_id must be filled in already
*
* Fills in smi_info.device_id.oem_data_available_handler
* when we know what function to use there.
*/
static void setup_oem_data_handler(struct smi_info *smi_info)
{
setup_dell_poweredge_oem_data_handler(smi_info);
}
static void setup_xaction_handlers(struct smi_info *smi_info)
{
setup_dell_poweredge_bt_xaction_handler(smi_info);
}
static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
{
if (smi_info->intf) {
/* The timer and thread are only running if the
interface has been started up and registered. */
if (smi_info->thread != NULL)
kthread_stop(smi_info->thread);
del_timer_sync(&smi_info->si_timer);
}
}
static __devinitdata struct ipmi_default_vals
{
int type;
int port;
} ipmi_defaults[] =
{
{ .type = SI_KCS, .port = 0xca2 },
{ .type = SI_SMIC, .port = 0xca9 },
{ .type = SI_BT, .port = 0xe4 },
{ .port = 0 }
};
static __devinit void default_find_bmc(void)
{
struct smi_info *info;
int i;
for (i = 0; ; i++) {
if (!ipmi_defaults[i].port)
break;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return;
info->addr_source = NULL;
info->si_type = ipmi_defaults[i].type;
info->io_setup = port_setup;
info->io.addr_data = ipmi_defaults[i].port;
info->io.addr_type = IPMI_IO_ADDR_SPACE;
info->io.addr = NULL;
info->io.regspacing = DEFAULT_REGSPACING;
info->io.regsize = DEFAULT_REGSPACING;
info->io.regshift = 0;
if (try_smi_init(info) == 0) {
/* Found one... */
printk(KERN_INFO "ipmi_si: Found default %s state"
" machine at %s address 0x%lx\n",
si_to_str[info->si_type],
addr_space_to_str[info->io.addr_type],
info->io.addr_data);
return;
}
}
}
static int is_new_interface(struct smi_info *info)
{
struct smi_info *e;
list_for_each_entry(e, &smi_infos, link) {
if (e->io.addr_type != info->io.addr_type)
continue;
if (e->io.addr_data == info->io.addr_data)
return 0;
}
return 1;
}
static int try_smi_init(struct smi_info *new_smi)
{
int rv;
if (new_smi->addr_source) {
printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
" machine at %s address 0x%lx, slave address 0x%x,"
" irq %d\n",
new_smi->addr_source,
si_to_str[new_smi->si_type],
addr_space_to_str[new_smi->io.addr_type],
new_smi->io.addr_data,
new_smi->slave_addr, new_smi->irq);
}
mutex_lock(&smi_infos_lock);
if (!is_new_interface(new_smi)) {
printk(KERN_WARNING "ipmi_si: duplicate interface\n");
rv = -EBUSY;
goto out_err;
}
/* So we know not to free it unless we have allocated one. */
new_smi->intf = NULL;
new_smi->si_sm = NULL;
new_smi->handlers = NULL;
switch (new_smi->si_type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
break;
case SI_SMIC:
new_smi->handlers = &smic_smi_handlers;
break;
case SI_BT:
new_smi->handlers = &bt_smi_handlers;
break;
default:
/* No support for anything else yet. */
rv = -EIO;
goto out_err;
}
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
if (!new_smi->si_sm) {
printk(" Could not allocate state machine memory\n");
rv = -ENOMEM;
goto out_err;
}
new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
&new_smi->io);
/* Now that we know the I/O size, we can set up the I/O. */
rv = new_smi->io_setup(new_smi);
if (rv) {
printk(" Could not set up I/O space\n");
goto out_err;
}
spin_lock_init(&(new_smi->si_lock));
spin_lock_init(&(new_smi->msg_lock));
spin_lock_init(&(new_smi->count_lock));
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->addr_source)
printk(KERN_INFO "ipmi_si: Interface detection"
" failed\n");
rv = -ENODEV;
goto out_err;
}
/* Attempt a get device id command. If it fails, we probably
don't have a BMC here. */
rv = try_get_dev_id(new_smi);
if (rv) {
if (new_smi->addr_source)
printk(KERN_INFO "ipmi_si: There appears to be no BMC"
" at this location\n");
goto out_err;
}
setup_oem_data_handler(new_smi);
setup_xaction_handlers(new_smi);
/* Try to claim any interrupts. */
if (new_smi->irq_setup)
new_smi->irq_setup(new_smi);
INIT_LIST_HEAD(&(new_smi->xmit_msgs));
INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
new_smi->curr_msg = NULL;
atomic_set(&new_smi->req_events, 0);
new_smi->run_to_completion = 0;
new_smi->interrupt_disabled = 0;
atomic_set(&new_smi->stop_operation, 0);
new_smi->intf_num = smi_num;
smi_num++;
/* Start clearing the flags before we enable interrupts or the
timer to avoid racing with the timer. */
start_clear_flags(new_smi);
/* IRQ is defined to be set when non-zero. */
if (new_smi->irq)
new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
if (!new_smi->dev) {
/* If we don't already have a device from something
* else (like PCI), then register a new one. */
new_smi->pdev = platform_device_alloc("ipmi_si",
new_smi->intf_num);
if (rv) {
printk(KERN_ERR
"ipmi_si_intf:"
" Unable to allocate platform device\n");
goto out_err;
}
new_smi->dev = &new_smi->pdev->dev;
new_smi->dev->driver = &ipmi_driver;
rv = platform_device_register(new_smi->pdev);
if (rv) {
printk(KERN_ERR
"ipmi_si_intf:"
" Unable to register system interface device:"
" %d\n",
rv);
goto out_err;
}
new_smi->dev_registered = 1;
}
rv = ipmi_register_smi(&handlers,
new_smi,
&new_smi->device_id,
new_smi->dev,
new_smi->slave_addr);
if (rv) {
printk(KERN_ERR
"ipmi_si: Unable to register device: error %d\n",
rv);
goto out_err_stop_timer;
}
rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
type_file_read_proc, NULL,
new_smi, THIS_MODULE);
if (rv) {
printk(KERN_ERR
"ipmi_si: Unable to create proc entry: %d\n",
rv);
goto out_err_stop_timer;
}
rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
stat_file_read_proc, NULL,
new_smi, THIS_MODULE);
if (rv) {
printk(KERN_ERR
"ipmi_si: Unable to create proc entry: %d\n",
rv);
goto out_err_stop_timer;
}
list_add_tail(&new_smi->link, &smi_infos);
mutex_unlock(&smi_infos_lock);
printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
return 0;
out_err_stop_timer:
atomic_inc(&new_smi->stop_operation);
wait_for_timer_and_thread(new_smi);
out_err:
if (new_smi->intf)
ipmi_unregister_smi(new_smi->intf);
if (new_smi->irq_cleanup)
new_smi->irq_cleanup(new_smi);
/* Wait until we know that we are out of any interrupt
handlers might have been running before we freed the
interrupt. */
synchronize_sched();
if (new_smi->si_sm) {
if (new_smi->handlers)
new_smi->handlers->cleanup(new_smi->si_sm);
kfree(new_smi->si_sm);
}
if (new_smi->addr_source_cleanup)
new_smi->addr_source_cleanup(new_smi);
if (new_smi->io_cleanup)
new_smi->io_cleanup(new_smi);
if (new_smi->dev_registered)
platform_device_unregister(new_smi->pdev);
kfree(new_smi);
mutex_unlock(&smi_infos_lock);
return rv;
}
static __devinit int init_ipmi_si(void)
{
int i;
char *str;
int rv;
if (initialized)
return 0;
initialized = 1;
/* Register the device drivers. */
rv = driver_register(&ipmi_driver);
if (rv) {
printk(KERN_ERR
"init_ipmi_si: Unable to register driver: %d\n",
rv);
return rv;
}
/* Parse out the si_type string into its components. */
str = si_type_str;
if (*str != '\0') {
for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
si_type[i] = str;
str = strchr(str, ',');
if (str) {
*str = '\0';
str++;
} else {
break;
}
}
}
printk(KERN_INFO "IPMI System Interface driver.\n");
hardcode_find_bmc();
#ifdef CONFIG_DMI
dmi_find_bmc();
#endif
#ifdef CONFIG_ACPI
if (si_trydefaults)
acpi_find_bmc();
#endif
#ifdef CONFIG_PCI
pci_module_init(&ipmi_pci_driver);
#endif
if (si_trydefaults) {
mutex_lock(&smi_infos_lock);
if (list_empty(&smi_infos)) {
/* No BMC was found, try defaults. */
mutex_unlock(&smi_infos_lock);
default_find_bmc();
} else {
mutex_unlock(&smi_infos_lock);
}
}
mutex_lock(&smi_infos_lock);
if (list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
#ifdef CONFIG_PCI
pci_unregister_driver(&ipmi_pci_driver);
#endif
driver_unregister(&ipmi_driver);
printk("ipmi_si: Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
mutex_unlock(&smi_infos_lock);
return 0;
}
}
module_init(init_ipmi_si);
static void __devexit cleanup_one_si(struct smi_info *to_clean)
{
int rv;
unsigned long flags;
if (!to_clean)
return;
list_del(&to_clean->link);
/* Tell the timer and interrupt handlers that we are shutting
down. */
spin_lock_irqsave(&(to_clean->si_lock), flags);
spin_lock(&(to_clean->msg_lock));
atomic_inc(&to_clean->stop_operation);
if (to_clean->irq_cleanup)
to_clean->irq_cleanup(to_clean);
spin_unlock(&(to_clean->msg_lock));
spin_unlock_irqrestore(&(to_clean->si_lock), flags);
/* Wait until we know that we are out of any interrupt
handlers might have been running before we freed the
interrupt. */
synchronize_sched();
wait_for_timer_and_thread(to_clean);
/* Interrupts and timeouts are stopped, now make sure the
interface is in a clean state. */
while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
poll(to_clean);
schedule_timeout_uninterruptible(1);
}
rv = ipmi_unregister_smi(to_clean->intf);
if (rv) {
printk(KERN_ERR
"ipmi_si: Unable to unregister device: errno=%d\n",
rv);
}
to_clean->handlers->cleanup(to_clean->si_sm);
kfree(to_clean->si_sm);
if (to_clean->addr_source_cleanup)
to_clean->addr_source_cleanup(to_clean);
if (to_clean->io_cleanup)
to_clean->io_cleanup(to_clean);
if (to_clean->dev_registered)
platform_device_unregister(to_clean->pdev);
kfree(to_clean);
}
static __exit void cleanup_ipmi_si(void)
{
struct smi_info *e, *tmp_e;
if (!initialized)
return;
#ifdef CONFIG_PCI
pci_unregister_driver(&ipmi_pci_driver);
#endif
mutex_lock(&smi_infos_lock);
list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
cleanup_one_si(e);
mutex_unlock(&smi_infos_lock);
driver_unregister(&ipmi_driver);
}
module_exit(cleanup_ipmi_si);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corey Minyard <[email protected]>");
MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");
| gpl-2.0 |
rminnich/k9 | sys/src/ape/lib/ap/plan9/ctime.c | 5644 | /*
* This file is part of the UCB release of Plan 9. It is subject to the license
* terms in the LICENSE file found in the top-level directory of this
* distribution and at http://akaros.cs.berkeley.edu/files/Plan9License. No
* part of the UCB release of Plan 9, including this file, may be copied,
* modified, propagated, or distributed except according to the terms contained
* in the LICENSE file.
*/
/*
* This routine converts time as follows.
* The epoch is 0000 Jan 1 1970 GMT.
* The argument time is in seconds since then.
* The localtime(t) entry returns a pointer to an array
* containing
*
* seconds (0-59)
* minutes (0-59)
* hours (0-23)
* day of month (1-31)
* month (0-11)
* year-1970
* weekday (0-6, Sun is 0)
* day of the year
* daylight savings flag
*
* The routine gets the daylight savings time from the environment.
*
* asctime(tvec))
* where tvec is produced by localtime
* returns a ptr to a character string
* that has the ascii time in the form
*
* \\
* Thu Jan 01 00:00:00 1970n0
* 01234567890123456789012345
* 0 1 2
*
* ctime(t) just calls localtime, then asctime.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <time.h>
#include <unistd.h>
#include <string.h>
static char dmsize[12] =
{
31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
};
/*
* The following table is used for 1974 and 1975 and
* gives the day number of the first day after the Sunday of the
* change.
*/
static int dysize(int);
static void ct_numb(char*, int);
static void readtimezone(void);
static int rd_name(char**, char*);
static int rd_long(char**, long*);
#define TZSIZE 150
static
struct
{
char stname[4];
char dlname[4];
long stdiff;
long dldiff;
long dlpairs[TZSIZE];
} timezone;
char*
ctime(const time_t *t)
{
return asctime(localtime(t));
}
struct tm*
gmtime_r(const time_t *timp, struct tm *result)
{
int d0, d1;
long hms, day;
time_t tim;
tim = *timp;
/*
* break initial number into days
*/
hms = tim % 86400L;
day = tim / 86400L;
if(hms < 0) {
hms += 86400L;
day -= 1;
}
/*
* generate hours:minutes:seconds
*/
result->tm_sec = hms % 60;
d1 = hms / 60;
result->tm_min = d1 % 60;
d1 /= 60;
result->tm_hour = d1;
/*
* day is the day number.
* generate day of the week.
* The addend is 4 mod 7 (1/1/1970 was Thursday)
*/
result->tm_wday = (day + 7340036L) % 7;
/*
* year number
*/
if(day >= 0)
for(d1 = 70; day >= dysize(d1); d1++)
day -= dysize(d1);
else
for (d1 = 70; day < 0; d1--)
day += dysize(d1-1);
result->tm_year = d1;
result->tm_yday = d0 = day;
/*
* generate month
*/
if(dysize(d1) == 366)
dmsize[1] = 29;
for(d1 = 0; d0 >= dmsize[d1]; d1++)
d0 -= dmsize[d1];
dmsize[1] = 28;
result->tm_mday = d0 + 1;
result->tm_mon = d1;
result->tm_isdst = 0;
return result;
}
struct tm*
gmtime(const time_t *timp)
{
static struct tm xtime;
return gmtime_r(timp, &xtime);
}
struct tm*
localtime_r(const time_t *timp, struct tm *result)
{
struct tm *ct;
time_t t, tim;
long *p;
int dlflag;
tim = *timp;
if(timezone.stname[0] == 0)
readtimezone();
t = tim + timezone.stdiff;
dlflag = 0;
for(p = timezone.dlpairs; *p; p += 2)
if(t >= p[0])
if(t < p[1]) {
t = tim + timezone.dldiff;
dlflag++;
break;
}
ct = gmtime_r(&t, result);
ct->tm_isdst = dlflag;
return ct;
}
struct tm*
localtime(const time_t *timp)
{
static struct tm xtime;
return localtime_r(timp, &xtime);
}
char*
asctime_r(const struct tm *t, char *buf)
{
char *ncp;
strcpy(buf, "Thu Jan 01 00:00:00 1970\n");
ncp = &"SunMonTueWedThuFriSat"[t->tm_wday*3];
buf[0] = *ncp++;
buf[1] = *ncp++;
buf[2] = *ncp;
ncp = &"JanFebMarAprMayJunJulAugSepOctNovDec"[t->tm_mon*3];
buf[4] = *ncp++;
buf[5] = *ncp++;
buf[6] = *ncp;
ct_numb(buf+8, t->tm_mday);
ct_numb(buf+11, t->tm_hour+100);
ct_numb(buf+14, t->tm_min+100);
ct_numb(buf+17, t->tm_sec+100);
if(t->tm_year >= 100) {
buf[20] = '2';
buf[21] = '0';
}
ct_numb(buf+22, t->tm_year+100);
return buf;
}
char*
asctime(const struct tm *t)
{
static char cbuf[30];
return asctime_r(t, cbuf);
}
static
dysize(int y)
{
if((y%4) == 0)
return 366;
return 365;
}
static
void
ct_numb(char *cp, int n)
{
cp[0] = ' ';
if(n >= 10)
cp[0] = (n/10)%10 + '0';
cp[1] = n%10 + '0';
}
static
void
readtimezone(void)
{
char buf[TZSIZE*11+30], *p;
int i;
memset(buf, 0, sizeof(buf));
i = open("/env/timezone", 0);
if(i < 0)
goto error;
if(read(i, buf, sizeof(buf)) >= sizeof(buf))
goto error;
close(i);
p = buf;
if(rd_name(&p, timezone.stname))
goto error;
if(rd_long(&p, &timezone.stdiff))
goto error;
if(rd_name(&p, timezone.dlname))
goto error;
if(rd_long(&p, &timezone.dldiff))
goto error;
for(i=0; i<TZSIZE; i++) {
if(rd_long(&p, &timezone.dlpairs[i]))
goto error;
if(timezone.dlpairs[i] == 0)
return;
}
error:
timezone.stdiff = 0;
strcpy(timezone.stname, "GMT");
timezone.dlpairs[0] = 0;
}
static
rd_name(char **f, char *p)
{
int c, i;
for(;;) {
c = *(*f)++;
if(c != ' ' && c != '\n')
break;
}
for(i=0; i<3; i++) {
if(c == ' ' || c == '\n')
return 1;
*p++ = c;
c = *(*f)++;
}
if(c != ' ' && c != '\n')
return 1;
*p = 0;
return 0;
}
static
rd_long(char **f, long *p)
{
int c, s;
long l;
s = 0;
for(;;) {
c = *(*f)++;
if(c == '-') {
s++;
continue;
}
if(c != ' ' && c != '\n')
break;
}
if(c == 0) {
*p = 0;
return 0;
}
l = 0;
for(;;) {
if(c == ' ' || c == '\n')
break;
if(c < '0' || c > '9')
return 1;
l = l*10 + c-'0';
c = *(*f)++;
}
if(s)
l = -l;
*p = l;
return 0;
}
| gpl-2.0 |
sdwuyawen/linux2.6.21_helper2416 | arch/cris/arch-v32/drivers/gpio.c | 19756 | /* $Id: gpio.c,v 1.1.1.1 2007/06/12 07:27:13 eyryu Exp $
*
* ETRAX CRISv32 general port I/O device
*
* Copyright (c) 1999, 2000, 2001, 2002, 2003 Axis Communications AB
*
* Authors: Bjorn Wesen (initial version)
* Ola Knutsson (LED handling)
* Johan Adolfsson (read/set directions, write, port G,
* port to ETRAX FS.
*
* $Log: gpio.c,v $
* Revision 1.1.1.1 2007/06/12 07:27:13 eyryu
* s3c-linux-2.6.21.5
*
* Revision 1.16 2005/06/19 17:06:49 starvik
* Merge of Linux 2.6.12.
*
* Revision 1.15 2005/05/25 08:22:20 starvik
* Changed GPIO port order to fit packages/devices/axis-2.4.
*
* Revision 1.14 2005/04/24 18:35:08 starvik
* Updated with final register headers.
*
* Revision 1.13 2005/03/15 15:43:00 starvik
* dev_id needs to be supplied for shared IRQs.
*
* Revision 1.12 2005/03/10 17:12:00 starvik
* Protect alarm list with spinlock.
*
* Revision 1.11 2005/01/05 06:08:59 starvik
* No need to do local_irq_disable after local_irq_save.
*
* Revision 1.10 2004/11/19 08:38:31 starvik
* Removed old crap.
*
* Revision 1.9 2004/05/14 07:58:02 starvik
* Merge of changes from 2.4
*
* Revision 1.8 2003/09/11 07:29:50 starvik
* Merge of Linux 2.6.0-test5
*
* Revision 1.7 2003/07/10 13:25:46 starvik
* Compiles for 2.5.74
* Lindented ethernet.c
*
* Revision 1.6 2003/07/04 08:27:46 starvik
* Merge of Linux 2.5.74
*
* Revision 1.5 2003/06/10 08:26:37 johana
* Etrax -> ETRAX CRISv32
*
* Revision 1.4 2003/06/05 14:22:48 johana
* Initialise some_alarms.
*
* Revision 1.3 2003/06/05 10:15:46 johana
* New INTR_VECT macros.
* Enable interrupts in global config.
*
* Revision 1.2 2003/06/03 15:52:50 johana
* Initial CRIS v32 version.
*
* Revision 1.1 2003/06/03 08:53:15 johana
* Copy of os/lx25/arch/cris/arch-v10/drivers/gpio.c version 1.7.
*
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <asm/etraxgpio.h>
#include <asm/arch/hwregs/reg_map.h>
#include <asm/arch/hwregs/reg_rdwr.h>
#include <asm/arch/hwregs/gio_defs.h>
#include <asm/arch/hwregs/intr_vect_defs.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/irq.h>
/* The following gio ports on ETRAX FS is available:
* pa 8 bits, supports interrupts off, hi, low, set, posedge, negedge anyedge
* pb 18 bits
* pc 18 bits
* pd 18 bits
* pe 18 bits
* each port has a rw_px_dout, r_px_din and rw_px_oe register.
*/
#define GPIO_MAJOR 120 /* experimental MAJOR number */
#define D(x)
#if 0
static int dp_cnt;
#define DP(x) do { dp_cnt++; if (dp_cnt % 1000 == 0) x; }while(0)
#else
#define DP(x)
#endif
static char gpio_name[] = "etrax gpio";
#if 0
static wait_queue_head_t *gpio_wq;
#endif
static int gpio_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg);
static ssize_t gpio_write(struct file * file, const char * buf, size_t count,
loff_t *off);
static int gpio_open(struct inode *inode, struct file *filp);
static int gpio_release(struct inode *inode, struct file *filp);
static unsigned int gpio_poll(struct file *filp, struct poll_table_struct *wait);
/* private data per open() of this driver */
struct gpio_private {
struct gpio_private *next;
/* The IO_CFG_WRITE_MODE_VALUE only support 8 bits: */
unsigned char clk_mask;
unsigned char data_mask;
unsigned char write_msb;
unsigned char pad1;
/* These fields are generic */
unsigned long highalarm, lowalarm;
wait_queue_head_t alarm_wq;
int minor;
};
/* linked list of alarms to check for */
static struct gpio_private *alarmlist = 0;
static int gpio_some_alarms = 0; /* Set if someone uses alarm */
static unsigned long gpio_pa_high_alarms = 0;
static unsigned long gpio_pa_low_alarms = 0;
static DEFINE_SPINLOCK(alarm_lock);
#define NUM_PORTS (GPIO_MINOR_LAST+1)
#define GIO_REG_RD_ADDR(reg) (volatile unsigned long*) (regi_gio + REG_RD_ADDR_gio_##reg )
#define GIO_REG_WR_ADDR(reg) (volatile unsigned long*) (regi_gio + REG_RD_ADDR_gio_##reg )
unsigned long led_dummy;
static volatile unsigned long *data_out[NUM_PORTS] = {
GIO_REG_WR_ADDR(rw_pa_dout),
GIO_REG_WR_ADDR(rw_pb_dout),
&led_dummy,
GIO_REG_WR_ADDR(rw_pc_dout),
GIO_REG_WR_ADDR(rw_pd_dout),
GIO_REG_WR_ADDR(rw_pe_dout),
};
static volatile unsigned long *data_in[NUM_PORTS] = {
GIO_REG_RD_ADDR(r_pa_din),
GIO_REG_RD_ADDR(r_pb_din),
&led_dummy,
GIO_REG_RD_ADDR(r_pc_din),
GIO_REG_RD_ADDR(r_pd_din),
GIO_REG_RD_ADDR(r_pe_din),
};
static unsigned long changeable_dir[NUM_PORTS] = {
CONFIG_ETRAX_PA_CHANGEABLE_DIR,
CONFIG_ETRAX_PB_CHANGEABLE_DIR,
0,
CONFIG_ETRAX_PC_CHANGEABLE_DIR,
CONFIG_ETRAX_PD_CHANGEABLE_DIR,
CONFIG_ETRAX_PE_CHANGEABLE_DIR,
};
static unsigned long changeable_bits[NUM_PORTS] = {
CONFIG_ETRAX_PA_CHANGEABLE_BITS,
CONFIG_ETRAX_PB_CHANGEABLE_BITS,
0,
CONFIG_ETRAX_PC_CHANGEABLE_BITS,
CONFIG_ETRAX_PD_CHANGEABLE_BITS,
CONFIG_ETRAX_PE_CHANGEABLE_BITS,
};
static volatile unsigned long *dir_oe[NUM_PORTS] = {
GIO_REG_WR_ADDR(rw_pa_oe),
GIO_REG_WR_ADDR(rw_pb_oe),
&led_dummy,
GIO_REG_WR_ADDR(rw_pc_oe),
GIO_REG_WR_ADDR(rw_pd_oe),
GIO_REG_WR_ADDR(rw_pe_oe),
};
static unsigned int
gpio_poll(struct file *file,
poll_table *wait)
{
unsigned int mask = 0;
struct gpio_private *priv = (struct gpio_private *)file->private_data;
unsigned long data;
poll_wait(file, &priv->alarm_wq, wait);
if (priv->minor == GPIO_MINOR_A) {
reg_gio_rw_intr_cfg intr_cfg;
unsigned long tmp;
unsigned long flags;
local_irq_save(flags);
data = REG_TYPE_CONV(unsigned long, reg_gio_r_pa_din, REG_RD(gio, regi_gio, r_pa_din));
/* PA has support for interrupt
* lets activate high for those low and with highalarm set
*/
intr_cfg = REG_RD(gio, regi_gio, rw_intr_cfg);
tmp = ~data & priv->highalarm & 0xFF;
if (tmp & (1 << 0)) {
intr_cfg.pa0 = regk_gio_hi;
}
if (tmp & (1 << 1)) {
intr_cfg.pa1 = regk_gio_hi;
}
if (tmp & (1 << 2)) {
intr_cfg.pa2 = regk_gio_hi;
}
if (tmp & (1 << 3)) {
intr_cfg.pa3 = regk_gio_hi;
}
if (tmp & (1 << 4)) {
intr_cfg.pa4 = regk_gio_hi;
}
if (tmp & (1 << 5)) {
intr_cfg.pa5 = regk_gio_hi;
}
if (tmp & (1 << 6)) {
intr_cfg.pa6 = regk_gio_hi;
}
if (tmp & (1 << 7)) {
intr_cfg.pa7 = regk_gio_hi;
}
/*
* lets activate low for those high and with lowalarm set
*/
tmp = data & priv->lowalarm & 0xFF;
if (tmp & (1 << 0)) {
intr_cfg.pa0 = regk_gio_lo;
}
if (tmp & (1 << 1)) {
intr_cfg.pa1 = regk_gio_lo;
}
if (tmp & (1 << 2)) {
intr_cfg.pa2 = regk_gio_lo;
}
if (tmp & (1 << 3)) {
intr_cfg.pa3 = regk_gio_lo;
}
if (tmp & (1 << 4)) {
intr_cfg.pa4 = regk_gio_lo;
}
if (tmp & (1 << 5)) {
intr_cfg.pa5 = regk_gio_lo;
}
if (tmp & (1 << 6)) {
intr_cfg.pa6 = regk_gio_lo;
}
if (tmp & (1 << 7)) {
intr_cfg.pa7 = regk_gio_lo;
}
REG_WR(gio, regi_gio, rw_intr_cfg, intr_cfg);
local_irq_restore(flags);
} else if (priv->minor <= GPIO_MINOR_E)
data = *data_in[priv->minor];
else
return 0;
if ((data & priv->highalarm) ||
(~data & priv->lowalarm)) {
mask = POLLIN|POLLRDNORM;
}
DP(printk("gpio_poll ready: mask 0x%08X\n", mask));
return mask;
}
int etrax_gpio_wake_up_check(void)
{
struct gpio_private *priv = alarmlist;
unsigned long data = 0;
int ret = 0;
while (priv) {
data = *data_in[priv->minor];
if ((data & priv->highalarm) ||
(~data & priv->lowalarm)) {
DP(printk("etrax_gpio_wake_up_check %i\n",priv->minor));
wake_up_interruptible(&priv->alarm_wq);
ret = 1;
}
priv = priv->next;
}
return ret;
}
static irqreturn_t
gpio_poll_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
if (gpio_some_alarms) {
return IRQ_RETVAL(etrax_gpio_wake_up_check());
}
return IRQ_NONE;
}
static irqreturn_t
gpio_pa_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
reg_gio_rw_intr_mask intr_mask;
reg_gio_r_masked_intr masked_intr;
reg_gio_rw_ack_intr ack_intr;
unsigned long tmp;
unsigned long tmp2;
/* Find what PA interrupts are active */
masked_intr = REG_RD(gio, regi_gio, r_masked_intr);
tmp = REG_TYPE_CONV(unsigned long, reg_gio_r_masked_intr, masked_intr);
/* Find those that we have enabled */
spin_lock(&alarm_lock);
tmp &= (gpio_pa_high_alarms | gpio_pa_low_alarms);
spin_unlock(&alarm_lock);
/* Ack them */
ack_intr = REG_TYPE_CONV(reg_gio_rw_ack_intr, unsigned long, tmp);
REG_WR(gio, regi_gio, rw_ack_intr, ack_intr);
/* Disable those interrupts.. */
intr_mask = REG_RD(gio, regi_gio, rw_intr_mask);
tmp2 = REG_TYPE_CONV(unsigned long, reg_gio_rw_intr_mask, intr_mask);
tmp2 &= ~tmp;
intr_mask = REG_TYPE_CONV(reg_gio_rw_intr_mask, unsigned long, tmp2);
REG_WR(gio, regi_gio, rw_intr_mask, intr_mask);
if (gpio_some_alarms) {
return IRQ_RETVAL(etrax_gpio_wake_up_check());
}
return IRQ_NONE;
}
static ssize_t gpio_write(struct file * file, const char * buf, size_t count,
loff_t *off)
{
struct gpio_private *priv = (struct gpio_private *)file->private_data;
unsigned char data, clk_mask, data_mask, write_msb;
unsigned long flags;
unsigned long shadow;
volatile unsigned long *port;
ssize_t retval = count;
/* Only bits 0-7 may be used for write operations but allow all
devices except leds... */
if (priv->minor == GPIO_MINOR_LEDS) {
return -EFAULT;
}
if (!access_ok(VERIFY_READ, buf, count)) {
return -EFAULT;
}
clk_mask = priv->clk_mask;
data_mask = priv->data_mask;
/* It must have been configured using the IO_CFG_WRITE_MODE */
/* Perhaps a better error code? */
if (clk_mask == 0 || data_mask == 0) {
return -EPERM;
}
write_msb = priv->write_msb;
D(printk("gpio_write: %lu to data 0x%02X clk 0x%02X msb: %i\n",count, data_mask, clk_mask, write_msb));
port = data_out[priv->minor];
while (count--) {
int i;
data = *buf++;
if (priv->write_msb) {
for (i = 7; i >= 0;i--) {
local_irq_save(flags);
shadow = *port;
*port = shadow &= ~clk_mask;
if (data & 1<<i)
*port = shadow |= data_mask;
else
*port = shadow &= ~data_mask;
/* For FPGA: min 5.0ns (DCC) before CCLK high */
*port = shadow |= clk_mask;
local_irq_restore(flags);
}
} else {
for (i = 0; i <= 7;i++) {
local_irq_save(flags);
shadow = *port;
*port = shadow &= ~clk_mask;
if (data & 1<<i)
*port = shadow |= data_mask;
else
*port = shadow &= ~data_mask;
/* For FPGA: min 5.0ns (DCC) before CCLK high */
*port = shadow |= clk_mask;
local_irq_restore(flags);
}
}
}
return retval;
}
static int
gpio_open(struct inode *inode, struct file *filp)
{
struct gpio_private *priv;
int p = iminor(inode);
if (p > GPIO_MINOR_LAST)
return -EINVAL;
priv = kmalloc(sizeof(struct gpio_private),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->minor = p;
/* initialize the io/alarm struct and link it into our alarmlist */
priv->next = alarmlist;
alarmlist = priv;
priv->clk_mask = 0;
priv->data_mask = 0;
priv->highalarm = 0;
priv->lowalarm = 0;
init_waitqueue_head(&priv->alarm_wq);
filp->private_data = (void *)priv;
return 0;
}
static int
gpio_release(struct inode *inode, struct file *filp)
{
struct gpio_private *p = alarmlist;
struct gpio_private *todel = (struct gpio_private *)filp->private_data;
/* local copies while updating them: */
unsigned long a_high, a_low;
unsigned long some_alarms;
/* unlink from alarmlist and free the private structure */
if (p == todel) {
alarmlist = todel->next;
} else {
while (p->next != todel)
p = p->next;
p->next = todel->next;
}
kfree(todel);
/* Check if there are still any alarms set */
p = alarmlist;
some_alarms = 0;
a_high = 0;
a_low = 0;
while (p) {
if (p->minor == GPIO_MINOR_A) {
a_high |= p->highalarm;
a_low |= p->lowalarm;
}
if (p->highalarm | p->lowalarm) {
some_alarms = 1;
}
p = p->next;
}
spin_lock(&alarm_lock);
gpio_some_alarms = some_alarms;
gpio_pa_high_alarms = a_high;
gpio_pa_low_alarms = a_low;
spin_unlock(&alarm_lock);
return 0;
}
/* Main device API. ioctl's to read/set/clear bits, as well as to
* set alarms to wait for using a subsequent select().
*/
unsigned long inline setget_input(struct gpio_private *priv, unsigned long arg)
{
/* Set direction 0=unchanged 1=input,
* return mask with 1=input
*/
unsigned long flags;
unsigned long dir_shadow;
local_irq_save(flags);
dir_shadow = *dir_oe[priv->minor];
dir_shadow &= ~(arg & changeable_dir[priv->minor]);
*dir_oe[priv->minor] = dir_shadow;
local_irq_restore(flags);
if (priv->minor == GPIO_MINOR_A)
dir_shadow ^= 0xFF; /* Only 8 bits */
else
dir_shadow ^= 0x3FFFF; /* Only 18 bits */
return dir_shadow;
} /* setget_input */
unsigned long inline setget_output(struct gpio_private *priv, unsigned long arg)
{
unsigned long flags;
unsigned long dir_shadow;
local_irq_save(flags);
dir_shadow = *dir_oe[priv->minor];
dir_shadow |= (arg & changeable_dir[priv->minor]);
*dir_oe[priv->minor] = dir_shadow;
local_irq_restore(flags);
return dir_shadow;
} /* setget_output */
static int
gpio_leds_ioctl(unsigned int cmd, unsigned long arg);
static int
gpio_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
unsigned long flags;
unsigned long val;
unsigned long shadow;
struct gpio_private *priv = (struct gpio_private *)file->private_data;
if (_IOC_TYPE(cmd) != ETRAXGPIO_IOCTYPE) {
return -EINVAL;
}
switch (_IOC_NR(cmd)) {
case IO_READBITS: /* Use IO_READ_INBITS and IO_READ_OUTBITS instead */
// read the port
return *data_in[priv->minor];
break;
case IO_SETBITS:
local_irq_save(flags);
if (arg & 0x04)
printk("GPIO SET 2\n");
// set changeable bits with a 1 in arg
shadow = *data_out[priv->minor];
shadow |= (arg & changeable_bits[priv->minor]);
*data_out[priv->minor] = shadow;
local_irq_restore(flags);
break;
case IO_CLRBITS:
local_irq_save(flags);
if (arg & 0x04)
printk("GPIO CLR 2\n");
// clear changeable bits with a 1 in arg
shadow = *data_out[priv->minor];
shadow &= ~(arg & changeable_bits[priv->minor]);
*data_out[priv->minor] = shadow;
local_irq_restore(flags);
break;
case IO_HIGHALARM:
// set alarm when bits with 1 in arg go high
priv->highalarm |= arg;
spin_lock(&alarm_lock);
gpio_some_alarms = 1;
if (priv->minor == GPIO_MINOR_A) {
gpio_pa_high_alarms |= arg;
}
spin_unlock(&alarm_lock);
break;
case IO_LOWALARM:
// set alarm when bits with 1 in arg go low
priv->lowalarm |= arg;
spin_lock(&alarm_lock);
gpio_some_alarms = 1;
if (priv->minor == GPIO_MINOR_A) {
gpio_pa_low_alarms |= arg;
}
spin_unlock(&alarm_lock);
break;
case IO_CLRALARM:
// clear alarm for bits with 1 in arg
priv->highalarm &= ~arg;
priv->lowalarm &= ~arg;
spin_lock(&alarm_lock);
if (priv->minor == GPIO_MINOR_A) {
if (gpio_pa_high_alarms & arg ||
gpio_pa_low_alarms & arg) {
/* Must update the gpio_pa_*alarms masks */
}
}
spin_unlock(&alarm_lock);
break;
case IO_READDIR: /* Use IO_SETGET_INPUT/OUTPUT instead! */
/* Read direction 0=input 1=output */
return *dir_oe[priv->minor];
case IO_SETINPUT: /* Use IO_SETGET_INPUT instead! */
/* Set direction 0=unchanged 1=input,
* return mask with 1=input
*/
return setget_input(priv, arg);
break;
case IO_SETOUTPUT: /* Use IO_SETGET_OUTPUT instead! */
/* Set direction 0=unchanged 1=output,
* return mask with 1=output
*/
return setget_output(priv, arg);
case IO_CFG_WRITE_MODE:
{
unsigned long dir_shadow;
dir_shadow = *dir_oe[priv->minor];
priv->clk_mask = arg & 0xFF;
priv->data_mask = (arg >> 8) & 0xFF;
priv->write_msb = (arg >> 16) & 0x01;
/* Check if we're allowed to change the bits and
* the direction is correct
*/
if (!((priv->clk_mask & changeable_bits[priv->minor]) &&
(priv->data_mask & changeable_bits[priv->minor]) &&
(priv->clk_mask & dir_shadow) &&
(priv->data_mask & dir_shadow)))
{
priv->clk_mask = 0;
priv->data_mask = 0;
return -EPERM;
}
break;
}
case IO_READ_INBITS:
/* *arg is result of reading the input pins */
val = *data_in[priv->minor];
if (copy_to_user((unsigned long*)arg, &val, sizeof(val)))
return -EFAULT;
return 0;
break;
case IO_READ_OUTBITS:
/* *arg is result of reading the output shadow */
val = *data_out[priv->minor];
if (copy_to_user((unsigned long*)arg, &val, sizeof(val)))
return -EFAULT;
break;
case IO_SETGET_INPUT:
/* bits set in *arg is set to input,
* *arg updated with current input pins.
*/
if (copy_from_user(&val, (unsigned long*)arg, sizeof(val)))
return -EFAULT;
val = setget_input(priv, val);
if (copy_to_user((unsigned long*)arg, &val, sizeof(val)))
return -EFAULT;
break;
case IO_SETGET_OUTPUT:
/* bits set in *arg is set to output,
* *arg updated with current output pins.
*/
if (copy_from_user(&val, (unsigned long*)arg, sizeof(val)))
return -EFAULT;
val = setget_output(priv, val);
if (copy_to_user((unsigned long*)arg, &val, sizeof(val)))
return -EFAULT;
break;
default:
if (priv->minor == GPIO_MINOR_LEDS)
return gpio_leds_ioctl(cmd, arg);
else
return -EINVAL;
} /* switch */
return 0;
}
static int
gpio_leds_ioctl(unsigned int cmd, unsigned long arg)
{
unsigned char green;
unsigned char red;
switch (_IOC_NR(cmd)) {
case IO_LEDACTIVE_SET:
green = ((unsigned char) arg) & 1;
red = (((unsigned char) arg) >> 1) & 1;
LED_ACTIVE_SET_G(green);
LED_ACTIVE_SET_R(red);
break;
default:
return -EINVAL;
} /* switch */
return 0;
}
const struct file_operations gpio_fops = {
.owner = THIS_MODULE,
.poll = gpio_poll,
.ioctl = gpio_ioctl,
.write = gpio_write,
.open = gpio_open,
.release = gpio_release,
};
/* main driver initialization routine, called from mem.c */
static __init int
gpio_init(void)
{
int res;
reg_intr_vect_rw_mask intr_mask;
/* do the formalities */
res = register_chrdev(GPIO_MAJOR, gpio_name, &gpio_fops);
if (res < 0) {
printk(KERN_ERR "gpio: couldn't get a major number.\n");
return res;
}
/* Clear all leds */
LED_NETWORK_SET(0);
LED_ACTIVE_SET(0);
LED_DISK_READ(0);
LED_DISK_WRITE(0);
printk("ETRAX FS GPIO driver v2.5, (c) 2003-2005 Axis Communications AB\n");
/* We call etrax_gpio_wake_up_check() from timer interrupt and
* from cpu_idle() in kernel/process.c
* The check in cpu_idle() reduces latency from ~15 ms to ~6 ms
* in some tests.
*/
if (request_irq(TIMER_INTR_VECT, gpio_poll_timer_interrupt,
IRQF_SHARED | IRQF_DISABLED,"gpio poll", &alarmlist)) {
printk("err: timer0 irq for gpio\n");
}
if (request_irq(GEN_IO_INTR_VECT, gpio_pa_interrupt,
IRQF_SHARED | IRQF_DISABLED,"gpio PA", &alarmlist)) {
printk("err: PA irq for gpio\n");
}
/* enable the gio and timer irq in global config */
intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
intr_mask.timer = 1;
intr_mask.gen_io = 1;
REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
return res;
}
/* this makes sure that gpio_init is called during kernel boot */
module_init(gpio_init);
| gpl-2.0 |
Evangileon/asterisk | apps/app_minivm.c | 115669 | /*
* Asterisk -- An open source telephony toolkit.
*
* Copyright (C) 1999 - 2005, Digium, Inc.
* and Edvina AB, Sollentuna, Sweden
*
* Mark Spencer <[email protected]> (Comedian Mail)
* and Olle E. Johansson, Edvina.net <[email protected]> (Mini-Voicemail changes)
*
* See http://www.asterisk.org for more information about
* the Asterisk project. Please do not directly contact
* any of the maintainers of this project for assistance;
* the project provides a web site, mailing lists and IRC
* channels for your use.
*
* This program is free software, distributed under the terms of
* the GNU General Public License Version 2. See the LICENSE file
* at the top of the source tree.
*/
/*! \file
*
* \brief MiniVoiceMail - A Minimal Voicemail System for Asterisk
*
* A voicemail system in small building blocks, working together
* based on the Comedian Mail voicemail system (app_voicemail.c).
*
* \par See also
* \arg \ref Config_minivm
* \arg \ref Config_minivm_examples
* \arg \ref App_minivm
*
* \ingroup applications
*
* \page App_minivm Asterisk Mini-voicemail - A minimal voicemail system
*
* This is a minimal voicemail system, building blocks for something
* else. It is built for multi-language systems.
* The current version is focused on accounts where voicemail is
* forwarded to users in e-mail. It's work in progress, with loosed ends hanging
* around from the old voicemail system and it's configuration.
*
* Hopefully, we can expand this to be a full replacement of voicemail() and voicemailmain()
* in the future.
*
* Dialplan applications
* - minivmRecord - record voicemail and send as e-mail ( \ref minivm_record_exec() )
* - minivmGreet - Play user's greeting or default greeting ( \ref minivm_greet_exec() )
* - minivmNotify - Notify user of message ( \ref minivm_notify_exec() )
* - minivmDelete - Delete voicemail message ( \ref minivm_delete_exec() )
* - minivmAccMess - Record personal messages (busy | unavailable | temporary)
*
* Dialplan functions
* - MINIVMACCOUNT() - A dialplan function
* - MINIVMCOUNTER() - Manage voicemail-related counters for accounts or domains
*
* CLI Commands
* - minivm list accounts
* - minivm list zones
* - minivm list templates
* - minivm show stats
* - minivm show settings
*
* Some notes
* - General configuration in minivm.conf
* - Users in realtime or configuration file
* - Or configured on the command line with just the e-mail address
*
* Voicemail accounts are identified by userid and domain
*
* Language codes are like setlocale - langcode_countrycode
* \note Don't use language codes like the rest of Asterisk, two letter countrycode. Use
* language_country like setlocale().
*
* Examples:
* - Swedish, Sweden sv_se
* - Swedish, Finland sv_fi
* - English, USA en_us
* - English, GB en_gb
*
* \par See also
* \arg \ref Config_minivm
* \arg \ref Config_minivm_examples
* \arg \ref Minivm_directories
* \arg \ref app_minivm.c
* \arg Comedian mail: app_voicemail.c
* \arg \ref descrip_minivm_accmess
* \arg \ref descrip_minivm_greet
* \arg \ref descrip_minivm_record
* \arg \ref descrip_minivm_delete
* \arg \ref descrip_minivm_notify
*
* \arg \ref App_minivm_todo
*/
/*! \page Minivm_directories Asterisk Mini-Voicemail Directory structure
*
* The directory structure for storing voicemail
* - AST_SPOOL_DIR - usually /var/spool/asterisk (configurable in asterisk.conf)
* - MVM_SPOOL_DIR - should be configurable, usually AST_SPOOL_DIR/voicemail
* - Domain MVM_SPOOL_DIR/domain
* - Username MVM_SPOOL_DIR/domain/username
* - /greet : Recording of account owner's name
* - /busy : Busy message
* - /unavailable : Unavailable message
* - /temp : Temporary message
*
* For account [email protected] the account directory would as a default be
* \b /var/spool/asterisk/voicemail/localdomain.xx/anita
*
* To avoid transcoding, these sound files should be converted into several formats
* They are recorded in the format closest to the incoming streams
*
*
* Back: \ref App_minivm
*/
/*! \page Config_minivm_examples Example dialplan for Mini-Voicemail
* \section Example dialplan scripts for Mini-Voicemail
* \verbinclude extensions_minivm.conf.sample
*
* Back: \ref App_minivm
*/
/*! \page App_minivm_todo Asterisk Mini-Voicemail - todo
* - configure accounts from AMI?
* - test, test, test, test
* - fix "vm-theextensionis.gsm" voiceprompt from Allison in various formats
* "The extension you are calling"
* - For trunk, consider using channel storage for information passing between small applications
* - Set default directory for voicemail
* - New app for creating directory for account if it does not exist
* - Re-insert code for IMAP storage at some point
* - Jabber integration for notifications
* - Figure out how to handle video in voicemail
* - Integration with the HTTP server
* - New app for moving messages between mailboxes, and optionally mark it as "new"
*
* For Asterisk 1.4/trunk
* - Use string fields for minivm_account
*
* Back: \ref App_minivm
*/
/*** MODULEINFO
<support_level>extended</support_level>
***/
#include "asterisk.h"
ASTERISK_FILE_VERSION(__FILE__, "$Revision: 379608 $")
#include <ctype.h>
#include <sys/time.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <time.h>
#include <dirent.h>
#include <locale.h>
#include "asterisk/paths.h" /* use various paths */
#include "asterisk/lock.h"
#include "asterisk/file.h"
#include "asterisk/channel.h"
#include "asterisk/pbx.h"
#include "asterisk/config.h"
#include "asterisk/say.h"
#include "asterisk/module.h"
#include "asterisk/app.h"
#include "asterisk/manager.h"
#include "asterisk/dsp.h"
#include "asterisk/localtime.h"
#include "asterisk/cli.h"
#include "asterisk/utils.h"
#include "asterisk/linkedlists.h"
#include "asterisk/callerid.h"
#include "asterisk/event.h"
/*** DOCUMENTATION
<application name="MinivmRecord" language="en_US">
<synopsis>
Receive Mini-Voicemail and forward via e-mail.
</synopsis>
<syntax>
<parameter name="mailbox" required="true" argsep="@">
<argument name="username" required="true">
<para>Voicemail username</para>
</argument>
<argument name="domain" required="true">
<para>Voicemail domain</para>
</argument>
</parameter>
<parameter name="options" required="false">
<optionlist>
<option name="0">
<para>Jump to the <literal>o</literal> extension in the current dialplan context.</para>
</option>
<option name="*">
<para>Jump to the <literal>a</literal> extension in the current dialplan context.</para>
</option>
<option name="g">
<argument name="gain">
<para>Amount of gain to use</para>
</argument>
<para>Use the specified amount of gain when recording the voicemail message.
The units are whole-number decibels (dB).</para>
</option>
</optionlist>
</parameter>
</syntax>
<description>
<para>This application is part of the Mini-Voicemail system, configured in <filename>minivm.conf</filename></para>
<para>MiniVM records audio file in configured format and forwards message to e-mail and pager.</para>
<para>If there's no user account for that address, a temporary account will be used with default options.</para>
<para>The recorded file name and path will be stored in <variable>MVM_FILENAME</variable> and the duration
of the message will be stored in <variable>MVM_DURATION</variable></para>
<note><para>If the caller hangs up after the recording, the only way to send the message and clean up is to
execute in the <literal>h</literal> extension. The application will exit if any of the following DTMF digits
are received and the requested extension exist in the current context.</para></note>
<variablelist>
<variable name="MVM_RECORD_STATUS">
<para>This is the status of the record operation</para>
<value name="SUCCESS" />
<value name="USEREXIT" />
<value name="FAILED" />
</variable>
</variablelist>
</description>
</application>
<application name="MinivmGreet" language="en_US">
<synopsis>
Play Mini-Voicemail prompts.
</synopsis>
<syntax>
<parameter name="mailbox" required="true" argsep="@">
<argument name="username" required="true">
<para>Voicemail username</para>
</argument>
<argument name="domain" required="true">
<para>Voicemail domain</para>
</argument>
</parameter>
<parameter name="options" required="false">
<optionlist>
<option name="b">
<para>Play the <literal>busy</literal> greeting to the calling party.</para>
</option>
<option name="s">
<para>Skip the playback of instructions for leaving a message to the calling party.</para>
</option>
<option name="u">
<para>Play the <literal>unavailable</literal> greeting.</para>
</option>
</optionlist>
</parameter>
</syntax>
<description>
<para>This application is part of the Mini-Voicemail system, configured in minivm.conf.</para>
<para>MinivmGreet() plays default prompts or user specific prompts for an account.</para>
<para>Busy and unavailable messages can be choosen, but will be overridden if a temporary
message exists for the account.</para>
<variablelist>
<variable name="MVM_GREET_STATUS">
<para>This is the status of the greeting playback.</para>
<value name="SUCCESS" />
<value name="USEREXIT" />
<value name="FAILED" />
</variable>
</variablelist>
</description>
</application>
<application name="MinivmNotify" language="en_US">
<synopsis>
Notify voicemail owner about new messages.
</synopsis>
<syntax>
<parameter name="mailbox" required="true" argsep="@">
<argument name="username" required="true">
<para>Voicemail username</para>
</argument>
<argument name="domain" required="true">
<para>Voicemail domain</para>
</argument>
</parameter>
<parameter name="options" required="false">
<optionlist>
<option name="template">
<para>E-mail template to use for voicemail notification</para>
</option>
</optionlist>
</parameter>
</syntax>
<description>
<para>This application is part of the Mini-Voicemail system, configured in minivm.conf.</para>
<para>MiniVMnotify forwards messages about new voicemail to e-mail and pager. If there's no user
account for that address, a temporary account will be used with default options (set in
<filename>minivm.conf</filename>).</para>
<para>If the channel variable <variable>MVM_COUNTER</variable> is set, this will be used in the message
file name and available in the template for the message.</para>
<para>If no template is given, the default email template will be used to send email and default pager
template to send paging message (if the user account is configured with a paging address.</para>
<variablelist>
<variable name="MVM_NOTIFY_STATUS">
<para>This is the status of the notification attempt</para>
<value name="SUCCESS" />
<value name="FAILED" />
</variable>
</variablelist>
</description>
</application>
<application name="MinivmDelete" language="en_US">
<synopsis>
Delete Mini-Voicemail voicemail messages.
</synopsis>
<syntax>
<parameter name="filename" required="true">
<para>File to delete</para>
</parameter>
</syntax>
<description>
<para>This application is part of the Mini-Voicemail system, configured in <filename>minivm.conf</filename>.</para>
<para>It deletes voicemail file set in MVM_FILENAME or given filename.</para>
<variablelist>
<variable name="MVM_DELETE_STATUS">
<para>This is the status of the delete operation.</para>
<value name="SUCCESS" />
<value name="FAILED" />
</variable>
</variablelist>
</description>
</application>
<application name="MinivmAccMess" language="en_US">
<synopsis>
Record account specific messages.
</synopsis>
<syntax>
<parameter name="mailbox" required="true" argsep="@">
<argument name="username" required="true">
<para>Voicemail username</para>
</argument>
<argument name="domain" required="true">
<para>Voicemail domain</para>
</argument>
</parameter>
<parameter name="options" required="false">
<optionlist>
<option name="u">
<para>Record the <literal>unavailable</literal> greeting.</para>
</option>
<option name="b">
<para>Record the <literal>busy</literal> greeting.</para>
</option>
<option name="t">
<para>Record the temporary greeting.</para>
</option>
<option name="n">
<para>Account name.</para>
</option>
</optionlist>
</parameter>
</syntax>
<description>
<para>This application is part of the Mini-Voicemail system, configured in <filename>minivm.conf</filename>.</para>
<para>Use this application to record account specific audio/video messages for busy, unavailable
and temporary messages.</para>
<para>Account specific directories will be created if they do not exist.</para>
<variablelist>
<variable name="MVM_ACCMESS_STATUS">
<para>This is the result of the attempt to record the specified greeting.</para>
<para><literal>FAILED</literal> is set if the file can't be created.</para>
<value name="SUCCESS" />
<value name="FAILED" />
</variable>
</variablelist>
</description>
</application>
<application name="MinivmMWI" language="en_US">
<synopsis>
Send Message Waiting Notification to subscriber(s) of mailbox.
</synopsis>
<syntax>
<parameter name="mailbox" required="true" argsep="@">
<argument name="username" required="true">
<para>Voicemail username</para>
</argument>
<argument name="domain" required="true">
<para>Voicemail domain</para>
</argument>
</parameter>
<parameter name="urgent" required="true">
<para>Number of urgent messages in mailbox.</para>
</parameter>
<parameter name="new" required="true">
<para>Number of new messages in mailbox.</para>
</parameter>
<parameter name="old" required="true">
<para>Number of old messages in mailbox.</para>
</parameter>
</syntax>
<description>
<para>This application is part of the Mini-Voicemail system, configured in <filename>minivm.conf</filename>.</para>
<para>MinivmMWI is used to send message waiting indication to any devices whose channels have
subscribed to the mailbox passed in the first parameter.</para>
</description>
</application>
<function name="MINIVMCOUNTER" language="en_US">
<synopsis>
Reads or sets counters for MiniVoicemail message.
</synopsis>
<syntax argsep=":">
<parameter name="account" required="true">
<para>If account is given and it exists, the counter is specific for the account.</para>
<para>If account is a domain and the domain directory exists, counters are specific for a domain.</para>
</parameter>
<parameter name="name" required="true">
<para>The name of the counter is a string, up to 10 characters.</para>
</parameter>
<parameter name="operand">
<para>The counters never goes below zero. Valid operands for changing the value of a counter when assigning a value are:</para>
<enumlist>
<enum name="i"><para>Increment by value.</para></enum>
<enum name="d"><para>Decrement by value.</para></enum>
<enum name="s"><para>Set to value.</para></enum>
</enumlist>
</parameter>
</syntax>
<description>
<para>The operation is atomic and the counter is locked while changing the value. The counters are stored as text files in the minivm account directories. It might be better to use realtime functions if you are using a database to operate your Asterisk.</para>
</description>
<see-also>
<ref type="application">MinivmRecord</ref>
<ref type="application">MinivmGreet</ref>
<ref type="application">MinivmNotify</ref>
<ref type="application">MinivmDelete</ref>
<ref type="application">MinivmAccMess</ref>
<ref type="application">MinivmMWI</ref>
<ref type="function">MINIVMACCOUNT</ref>
</see-also>
</function>
<function name="MINIVMACCOUNT" language="en_US">
<synopsis>
Gets MiniVoicemail account information.
</synopsis>
<syntax argsep=":">
<parameter name="account" required="true" />
<parameter name="item" required="true">
<para>Valid items are:</para>
<enumlist>
<enum name="path">
<para>Path to account mailbox (if account exists, otherwise temporary mailbox).</para>
</enum>
<enum name="hasaccount">
<para>1 is static Minivm account exists, 0 otherwise.</para>
</enum>
<enum name="fullname">
<para>Full name of account owner.</para>
</enum>
<enum name="email">
<para>Email address used for account.</para>
</enum>
<enum name="etemplate">
<para>Email template for account (default template if none is configured).</para>
</enum>
<enum name="ptemplate">
<para>Pager template for account (default template if none is configured).</para>
</enum>
<enum name="accountcode">
<para>Account code for the voicemail account.</para>
</enum>
<enum name="pincode">
<para>Pin code for voicemail account.</para>
</enum>
<enum name="timezone">
<para>Time zone for voicemail account.</para>
</enum>
<enum name="language">
<para>Language for voicemail account.</para>
</enum>
<enum name="<channel variable name>">
<para>Channel variable value (set in configuration for account).</para>
</enum>
</enumlist>
</parameter>
</syntax>
<description>
<para />
</description>
<see-also>
<ref type="application">MinivmRecord</ref>
<ref type="application">MinivmGreet</ref>
<ref type="application">MinivmNotify</ref>
<ref type="application">MinivmDelete</ref>
<ref type="application">MinivmAccMess</ref>
<ref type="application">MinivmMWI</ref>
<ref type="function">MINIVMCOUNTER</ref>
</see-also>
</function>
***/
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
#define MVM_REVIEW (1 << 0) /*!< Review message */
#define MVM_OPERATOR (1 << 1) /*!< Operator exit during voicemail recording */
#define MVM_REALTIME (1 << 2) /*!< This user is a realtime account */
#define MVM_SVMAIL (1 << 3)
#define MVM_ENVELOPE (1 << 4)
#define MVM_PBXSKIP (1 << 9)
#define MVM_ALLOCED (1 << 13)
/*! \brief Default mail command to mail voicemail. Change it with the
mailcmd= command in voicemail.conf */
#define SENDMAIL "/usr/sbin/sendmail -t"
#define SOUND_INTRO "vm-intro"
#define B64_BASEMAXINLINE 256 /*!< Buffer size for Base 64 attachment encoding */
#define B64_BASELINELEN 72 /*!< Line length for Base 64 endoded messages */
#define EOL "\r\n"
#define MAX_DATETIME_FORMAT 512
#define MAX_NUM_CID_CONTEXTS 10
#define ERROR_LOCK_PATH -100
#define VOICEMAIL_DIR_MODE 0700
#define VOICEMAIL_CONFIG "minivm.conf"
#define ASTERISK_USERNAME "asterisk" /*!< Default username for sending mail is asterisk\@localhost */
/*! \brief Message types for notification */
enum mvm_messagetype {
MVM_MESSAGE_EMAIL,
MVM_MESSAGE_PAGE
/* For trunk: MVM_MESSAGE_JABBER, */
};
static char MVM_SPOOL_DIR[PATH_MAX];
/* Module declarations */
static char *app_minivm_record = "MinivmRecord"; /* Leave a message */
static char *app_minivm_greet = "MinivmGreet"; /* Play voicemail prompts */
static char *app_minivm_notify = "MinivmNotify"; /* Notify about voicemail by using one of several methods */
static char *app_minivm_delete = "MinivmDelete"; /* Notify about voicemail by using one of several methods */
static char *app_minivm_accmess = "MinivmAccMess"; /* Record personal voicemail messages */
static char *app_minivm_mwi = "MinivmMWI";
enum minivm_option_flags {
OPT_SILENT = (1 << 0),
OPT_BUSY_GREETING = (1 << 1),
OPT_UNAVAIL_GREETING = (1 << 2),
OPT_TEMP_GREETING = (1 << 3),
OPT_NAME_GREETING = (1 << 4),
OPT_RECORDGAIN = (1 << 5),
};
enum minivm_option_args {
OPT_ARG_RECORDGAIN = 0,
OPT_ARG_ARRAY_SIZE = 1,
};
AST_APP_OPTIONS(minivm_app_options, {
AST_APP_OPTION('s', OPT_SILENT),
AST_APP_OPTION('b', OPT_BUSY_GREETING),
AST_APP_OPTION('u', OPT_UNAVAIL_GREETING),
AST_APP_OPTION_ARG('g', OPT_RECORDGAIN, OPT_ARG_RECORDGAIN),
});
AST_APP_OPTIONS(minivm_accmess_options, {
AST_APP_OPTION('b', OPT_BUSY_GREETING),
AST_APP_OPTION('u', OPT_UNAVAIL_GREETING),
AST_APP_OPTION('t', OPT_TEMP_GREETING),
AST_APP_OPTION('n', OPT_NAME_GREETING),
});
/*!\internal
* \brief Structure for linked list of Mini-Voicemail users: \ref minivm_accounts */
struct minivm_account {
char username[AST_MAX_CONTEXT]; /*!< Mailbox username */
char domain[AST_MAX_CONTEXT]; /*!< Voicemail domain */
char pincode[10]; /*!< Secret pin code, numbers only */
char fullname[120]; /*!< Full name, for directory app */
char email[80]; /*!< E-mail address - override */
char pager[80]; /*!< E-mail address to pager (no attachment) */
char accountcode[AST_MAX_ACCOUNT_CODE]; /*!< Voicemail account account code */
char serveremail[80]; /*!< From: Mail address */
char externnotify[160]; /*!< Configurable notification command */
char language[MAX_LANGUAGE]; /*!< Config: Language setting */
char zonetag[80]; /*!< Time zone */
char uniqueid[20]; /*!< Unique integer identifier */
char exit[80]; /*!< Options for exiting from voicemail() */
char attachfmt[80]; /*!< Format for voicemail audio file attachment */
char etemplate[80]; /*!< Pager template */
char ptemplate[80]; /*!< Voicemail format */
unsigned int flags; /*!< MVM_ flags */
struct ast_variable *chanvars; /*!< Variables for e-mail template */
double volgain; /*!< Volume gain for voicemails sent via e-mail */
AST_LIST_ENTRY(minivm_account) list;
};
/*!\internal
* \brief The list of e-mail accounts */
static AST_LIST_HEAD_STATIC(minivm_accounts, minivm_account);
/*!\internal
* \brief Linked list of e-mail templates in various languages
* These are used as templates for e-mails, pager messages and jabber messages
* \ref message_templates
*/
struct minivm_template {
char name[80]; /*!< Template name */
char *body; /*!< Body of this template */
char fromaddress[100]; /*!< Who's sending the e-mail? */
char serveremail[80]; /*!< From: Mail address */
char subject[100]; /*!< Subject line */
char charset[32]; /*!< Default character set for this template */
char locale[20]; /*!< Locale for setlocale() */
char dateformat[80]; /*!< Date format to use in this attachment */
int attachment; /*!< Attachment of media yes/no - no for pager messages */
AST_LIST_ENTRY(minivm_template) list; /*!< List mechanics */
};
/*! \brief The list of e-mail templates */
static AST_LIST_HEAD_STATIC(message_templates, minivm_template);
/*! \brief Options for leaving voicemail with the voicemail() application */
struct leave_vm_options {
unsigned int flags;
signed char record_gain;
};
/*! \brief Structure for base64 encoding */
struct b64_baseio {
int iocp;
int iolen;
int linelength;
int ateof;
unsigned char iobuf[B64_BASEMAXINLINE];
};
/*! \brief Voicemail time zones */
struct minivm_zone {
char name[80]; /*!< Name of this time zone */
char timezone[80]; /*!< Timezone definition */
char msg_format[BUFSIZ]; /*!< Not used in minivm ...yet */
AST_LIST_ENTRY(minivm_zone) list; /*!< List mechanics */
};
/*! \brief The list of e-mail time zones */
static AST_LIST_HEAD_STATIC(minivm_zones, minivm_zone);
/*! \brief Structure for gathering statistics */
struct minivm_stats {
int voicemailaccounts; /*!< Number of static accounts */
int timezones; /*!< Number of time zones */
int templates; /*!< Number of templates */
struct timeval reset; /*!< Time for last reset */
int receivedmessages; /*!< Number of received messages since reset */
struct timeval lastreceived; /*!< Time for last voicemail sent */
};
/*! \brief Statistics for voicemail */
static struct minivm_stats global_stats;
AST_MUTEX_DEFINE_STATIC(minivmlock); /*!< Lock to protect voicemail system */
AST_MUTEX_DEFINE_STATIC(minivmloglock); /*!< Lock to protect voicemail system log file */
static FILE *minivmlogfile; /*!< The minivm log file */
static int global_vmminmessage; /*!< Minimum duration of messages */
static int global_vmmaxmessage; /*!< Maximum duration of message */
static int global_maxsilence; /*!< Maximum silence during recording */
static int global_maxgreet; /*!< Maximum length of prompts */
static int global_silencethreshold = 128;
static char global_mailcmd[160]; /*!< Configurable mail cmd */
static char global_externnotify[160]; /*!< External notification application */
static char global_logfile[PATH_MAX]; /*!< Global log file for messages */
static char default_vmformat[80];
static struct ast_flags globalflags = {0}; /*!< Global voicemail flags */
static int global_saydurationminfo;
static double global_volgain; /*!< Volume gain for voicmemail via e-mail */
/*!\internal
* \brief Default dateformat, can be overridden in configuration file */
#define DEFAULT_DATEFORMAT "%A, %B %d, %Y at %r"
#define DEFAULT_CHARSET "ISO-8859-1"
/* Forward declarations */
static char *message_template_parse_filebody(const char *filename);
static char *message_template_parse_emailbody(const char *body);
static int create_vmaccount(char *name, struct ast_variable *var, int realtime);
static struct minivm_account *find_user_realtime(const char *domain, const char *username);
static char *handle_minivm_reload(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a);
/*!\internal
* \brief Create message template */
static struct minivm_template *message_template_create(const char *name)
{
struct minivm_template *template;
template = ast_calloc(1, sizeof(*template));
if (!template)
return NULL;
/* Set some defaults for templates */
ast_copy_string(template->name, name, sizeof(template->name));
ast_copy_string(template->dateformat, DEFAULT_DATEFORMAT, sizeof(template->dateformat));
ast_copy_string(template->charset, DEFAULT_CHARSET, sizeof(template->charset));
ast_copy_string(template->subject, "New message in mailbox ${MVM_USERNAME}@${MVM_DOMAIN}", sizeof(template->subject));
template->attachment = TRUE;
return template;
}
/*!\internal
* \brief Release memory allocated by message template */
static void message_template_free(struct minivm_template *template)
{
if (template->body)
ast_free(template->body);
ast_free (template);
}
/*!\internal
* \brief Build message template from configuration */
static int message_template_build(const char *name, struct ast_variable *var)
{
struct minivm_template *template;
int error = 0;
template = message_template_create(name);
if (!template) {
ast_log(LOG_ERROR, "Out of memory, can't allocate message template object %s.\n", name);
return -1;
}
while (var) {
ast_debug(3, "Configuring template option %s = \"%s\" for template %s\n", var->name, var->value, name);
if (!strcasecmp(var->name, "fromaddress")) {
ast_copy_string(template->fromaddress, var->value, sizeof(template->fromaddress));
} else if (!strcasecmp(var->name, "fromemail")) {
ast_copy_string(template->serveremail, var->value, sizeof(template->serveremail));
} else if (!strcasecmp(var->name, "subject")) {
ast_copy_string(template->subject, var->value, sizeof(template->subject));
} else if (!strcasecmp(var->name, "locale")) {
ast_copy_string(template->locale, var->value, sizeof(template->locale));
} else if (!strcasecmp(var->name, "attachmedia")) {
template->attachment = ast_true(var->value);
} else if (!strcasecmp(var->name, "dateformat")) {
ast_copy_string(template->dateformat, var->value, sizeof(template->dateformat));
} else if (!strcasecmp(var->name, "charset")) {
ast_copy_string(template->charset, var->value, sizeof(template->charset));
} else if (!strcasecmp(var->name, "templatefile")) {
if (template->body)
ast_free(template->body);
template->body = message_template_parse_filebody(var->value);
if (!template->body) {
ast_log(LOG_ERROR, "Error reading message body definition file %s\n", var->value);
error++;
}
} else if (!strcasecmp(var->name, "messagebody")) {
if (template->body)
ast_free(template->body);
template->body = message_template_parse_emailbody(var->value);
if (!template->body) {
ast_log(LOG_ERROR, "Error parsing message body definition:\n %s\n", var->value);
error++;
}
} else {
ast_log(LOG_ERROR, "Unknown message template configuration option \"%s=%s\"\n", var->name, var->value);
error++;
}
var = var->next;
}
if (error)
ast_log(LOG_ERROR, "-- %d errors found parsing message template definition %s\n", error, name);
AST_LIST_LOCK(&message_templates);
AST_LIST_INSERT_TAIL(&message_templates, template, list);
AST_LIST_UNLOCK(&message_templates);
global_stats.templates++;
return error;
}
/*!\internal
* \brief Find named template */
static struct minivm_template *message_template_find(const char *name)
{
struct minivm_template *this, *res = NULL;
if (ast_strlen_zero(name))
return NULL;
AST_LIST_LOCK(&message_templates);
AST_LIST_TRAVERSE(&message_templates, this, list) {
if (!strcasecmp(this->name, name)) {
res = this;
break;
}
}
AST_LIST_UNLOCK(&message_templates);
return res;
}
/*!\internal
* \brief Clear list of templates */
static void message_destroy_list(void)
{
struct minivm_template *this;
AST_LIST_LOCK(&message_templates);
while ((this = AST_LIST_REMOVE_HEAD(&message_templates, list))) {
message_template_free(this);
}
AST_LIST_UNLOCK(&message_templates);
}
/*!\internal
* \brief read buffer from file (base64 conversion) */
static int b64_inbuf(struct b64_baseio *bio, FILE *fi)
{
int l;
if (bio->ateof)
return 0;
if ((l = fread(bio->iobuf, 1, B64_BASEMAXINLINE,fi)) <= 0) {
if (ferror(fi))
return -1;
bio->ateof = 1;
return 0;
}
bio->iolen= l;
bio->iocp= 0;
return 1;
}
/*!\internal
* \brief read character from file to buffer (base64 conversion) */
static int b64_inchar(struct b64_baseio *bio, FILE *fi)
{
if (bio->iocp >= bio->iolen) {
if (!b64_inbuf(bio, fi))
return EOF;
}
return bio->iobuf[bio->iocp++];
}
/*!\internal
* \brief write buffer to file (base64 conversion) */
static int b64_ochar(struct b64_baseio *bio, int c, FILE *so)
{
if (bio->linelength >= B64_BASELINELEN) {
if (fputs(EOL,so) == EOF)
return -1;
bio->linelength= 0;
}
if (putc(((unsigned char) c), so) == EOF)
return -1;
bio->linelength++;
return 1;
}
/*!\internal
* \brief Encode file to base64 encoding for email attachment (base64 conversion) */
static int base_encode(char *filename, FILE *so)
{
unsigned char dtable[B64_BASEMAXINLINE];
int i,hiteof= 0;
FILE *fi;
struct b64_baseio bio;
memset(&bio, 0, sizeof(bio));
bio.iocp = B64_BASEMAXINLINE;
if (!(fi = fopen(filename, "rb"))) {
ast_log(LOG_WARNING, "Failed to open file: %s: %s\n", filename, strerror(errno));
return -1;
}
for (i= 0; i<9; i++) {
dtable[i]= 'A'+i;
dtable[i+9]= 'J'+i;
dtable[26+i]= 'a'+i;
dtable[26+i+9]= 'j'+i;
}
for (i= 0; i < 8; i++) {
dtable[i+18]= 'S'+i;
dtable[26+i+18]= 's'+i;
}
for (i= 0; i < 10; i++) {
dtable[52+i]= '0'+i;
}
dtable[62]= '+';
dtable[63]= '/';
while (!hiteof){
unsigned char igroup[3], ogroup[4];
int c,n;
igroup[0]= igroup[1]= igroup[2]= 0;
for (n= 0; n < 3; n++) {
if ((c = b64_inchar(&bio, fi)) == EOF) {
hiteof= 1;
break;
}
igroup[n]= (unsigned char)c;
}
if (n> 0) {
ogroup[0]= dtable[igroup[0]>>2];
ogroup[1]= dtable[((igroup[0]&3)<<4) | (igroup[1]>>4)];
ogroup[2]= dtable[((igroup[1]&0xF)<<2) | (igroup[2]>>6)];
ogroup[3]= dtable[igroup[2]&0x3F];
if (n<3) {
ogroup[3]= '=';
if (n<2)
ogroup[2]= '=';
}
for (i= 0;i<4;i++)
b64_ochar(&bio, ogroup[i], so);
}
}
/* Put end of line - line feed */
if (fputs(EOL, so) == EOF)
return 0;
fclose(fi);
return 1;
}
static int get_date(char *s, int len)
{
struct ast_tm tm;
struct timeval now = ast_tvnow();
ast_localtime(&now, &tm, NULL);
return ast_strftime(s, len, "%a %b %e %r %Z %Y", &tm);
}
/*!\internal
* \brief Free user structure - if it's allocated */
static void free_user(struct minivm_account *vmu)
{
if (vmu->chanvars)
ast_variables_destroy(vmu->chanvars);
ast_free(vmu);
}
/*!\internal
* \brief Prepare for voicemail template by adding channel variables
* to the channel
*/
static void prep_email_sub_vars(struct ast_channel *channel, const struct minivm_account *vmu, const char *cidnum, const char *cidname, const char *dur, const char *date, const char *counter)
{
char callerid[256];
struct ast_variable *var;
if (!channel) {
ast_log(LOG_ERROR, "No allocated channel, giving up...\n");
return;
}
for (var = vmu->chanvars ; var ; var = var->next) {
pbx_builtin_setvar_helper(channel, var->name, var->value);
}
/* Prepare variables for substition in email body and subject */
pbx_builtin_setvar_helper(channel, "MVM_NAME", vmu->fullname);
pbx_builtin_setvar_helper(channel, "MVM_DUR", dur);
pbx_builtin_setvar_helper(channel, "MVM_DOMAIN", vmu->domain);
pbx_builtin_setvar_helper(channel, "MVM_USERNAME", vmu->username);
pbx_builtin_setvar_helper(channel, "MVM_CALLERID", ast_callerid_merge(callerid, sizeof(callerid), cidname, cidnum, "Unknown Caller"));
pbx_builtin_setvar_helper(channel, "MVM_CIDNAME", (cidname ? cidname : "an unknown caller"));
pbx_builtin_setvar_helper(channel, "MVM_CIDNUM", (cidnum ? cidnum : "an unknown caller"));
pbx_builtin_setvar_helper(channel, "MVM_DATE", date);
if (!ast_strlen_zero(counter))
pbx_builtin_setvar_helper(channel, "MVM_COUNTER", counter);
}
/*!\internal
* \brief Set default values for Mini-Voicemail users */
static void populate_defaults(struct minivm_account *vmu)
{
ast_copy_flags(vmu, (&globalflags), AST_FLAGS_ALL);
ast_copy_string(vmu->attachfmt, default_vmformat, sizeof(vmu->attachfmt));
vmu->volgain = global_volgain;
}
/*!\internal
* \brief Allocate new vm user and set default values */
static struct minivm_account *mvm_user_alloc(void)
{
struct minivm_account *new;
new = ast_calloc(1, sizeof(*new));
if (!new)
return NULL;
populate_defaults(new);
return new;
}
/*!\internal
* \brief Clear list of users */
static void vmaccounts_destroy_list(void)
{
struct minivm_account *this;
AST_LIST_LOCK(&minivm_accounts);
while ((this = AST_LIST_REMOVE_HEAD(&minivm_accounts, list)))
ast_free(this);
AST_LIST_UNLOCK(&minivm_accounts);
}
/*!\internal
* \brief Find user from static memory object list */
static struct minivm_account *find_account(const char *domain, const char *username, int createtemp)
{
struct minivm_account *vmu = NULL, *cur;
if (ast_strlen_zero(domain) || ast_strlen_zero(username)) {
ast_log(LOG_NOTICE, "No username or domain? \n");
return NULL;
}
ast_debug(3, "Looking for voicemail user %s in domain %s\n", username, domain);
AST_LIST_LOCK(&minivm_accounts);
AST_LIST_TRAVERSE(&minivm_accounts, cur, list) {
/* Is this the voicemail account we're looking for? */
if (!strcasecmp(domain, cur->domain) && !strcasecmp(username, cur->username))
break;
}
AST_LIST_UNLOCK(&minivm_accounts);
if (cur) {
ast_debug(3, "Found account for %s@%s\n", username, domain);
vmu = cur;
} else
vmu = find_user_realtime(domain, username);
if (createtemp && !vmu) {
/* Create a temporary user, send e-mail and be gone */
vmu = mvm_user_alloc();
ast_set2_flag(vmu, TRUE, MVM_ALLOCED);
if (vmu) {
ast_copy_string(vmu->username, username, sizeof(vmu->username));
ast_copy_string(vmu->domain, domain, sizeof(vmu->domain));
ast_debug(1, "Created temporary account\n");
}
}
return vmu;
}
/*!\internal
* \brief Find user in realtime storage
* \return pointer to minivm_account structure
*/
static struct minivm_account *find_user_realtime(const char *domain, const char *username)
{
struct ast_variable *var;
struct minivm_account *retval;
char name[MAXHOSTNAMELEN];
retval = mvm_user_alloc();
if (!retval)
return NULL;
if (username)
ast_copy_string(retval->username, username, sizeof(retval->username));
populate_defaults(retval);
var = ast_load_realtime("minivm", "username", username, "domain", domain, SENTINEL);
if (!var) {
ast_free(retval);
return NULL;
}
snprintf(name, sizeof(name), "%s@%s", username, domain);
create_vmaccount(name, var, TRUE);
ast_variables_destroy(var);
return retval;
}
/*!\internal
* \brief Check if the string would need encoding within the MIME standard, to
* avoid confusing certain mail software that expects messages to be 7-bit
* clean.
*/
static int check_mime(const char *str)
{
for (; *str; str++) {
if (*str > 126 || *str < 32 || strchr("()<>@,:;/\"[]?.=", *str)) {
return 1;
}
}
return 0;
}
/*!\internal
* \brief Encode a string according to the MIME rules for encoding strings
* that are not 7-bit clean or contain control characters.
*
* Additionally, if the encoded string would exceed the MIME limit of 76
* characters per line, then the encoding will be broken up into multiple
* sections, separated by a space character, in order to facilitate
* breaking up the associated header across multiple lines.
*
* \param end An expandable buffer for holding the result
* \param maxlen \see ast_str
* \param charset Character set in which the result should be encoded
* \param start A string to be encoded
* \param preamble The length of the first line already used for this string,
* to ensure that each line maintains a maximum length of 76 chars.
* \param postamble the length of any additional characters appended to the
* line, used to ensure proper field wrapping.
* \return The encoded string.
*/
static const char *ast_str_encode_mime(struct ast_str **end, ssize_t maxlen, const char *charset, const char *start, size_t preamble, size_t postamble)
{
struct ast_str *tmp = ast_str_alloca(80);
int first_section = 1;
ast_str_reset(*end);
ast_str_set(&tmp, -1, "=?%s?Q?", charset);
for (; *start; start++) {
int need_encoding = 0;
if (*start < 33 || *start > 126 || strchr("()<>@,:;/\"[]?.=_", *start)) {
need_encoding = 1;
}
if ((first_section && need_encoding && preamble + ast_str_strlen(tmp) > 70) ||
(first_section && !need_encoding && preamble + ast_str_strlen(tmp) > 72) ||
(!first_section && need_encoding && ast_str_strlen(tmp) > 70) ||
(!first_section && !need_encoding && ast_str_strlen(tmp) > 72)) {
/* Start new line */
ast_str_append(end, maxlen, "%s%s?=", first_section ? "" : " ", ast_str_buffer(tmp));
ast_str_set(&tmp, -1, "=?%s?Q?", charset);
first_section = 0;
}
if (need_encoding && *start == ' ') {
ast_str_append(&tmp, -1, "_");
} else if (need_encoding) {
ast_str_append(&tmp, -1, "=%hhX", *start);
} else {
ast_str_append(&tmp, -1, "%c", *start);
}
}
ast_str_append(end, maxlen, "%s%s?=%s", first_section ? "" : " ", ast_str_buffer(tmp), ast_str_strlen(tmp) + postamble > 74 ? " " : "");
return ast_str_buffer(*end);
}
/*!\internal
* \brief Wraps a character sequence in double quotes, escaping occurences of quotes within the string.
* \param from The string to work with.
* \param buf The destination buffer to write the modified quoted string.
* \param maxlen Always zero. \see ast_str
*
* \return The destination string with quotes wrapped on it (the to field).
*/
static const char *ast_str_quote(struct ast_str **buf, ssize_t maxlen, const char *from)
{
const char *ptr;
/* We're only ever passing 0 to maxlen, so short output isn't possible */
ast_str_set(buf, maxlen, "\"");
for (ptr = from; *ptr; ptr++) {
if (*ptr == '"' || *ptr == '\\') {
ast_str_append(buf, maxlen, "\\%c", *ptr);
} else {
ast_str_append(buf, maxlen, "%c", *ptr);
}
}
ast_str_append(buf, maxlen, "\"");
return ast_str_buffer(*buf);
}
/*!\internal
* \brief Send voicemail with audio file as an attachment */
static int sendmail(struct minivm_template *template, struct minivm_account *vmu, char *cidnum, char *cidname, const char *filename, char *format, int duration, int attach_user_voicemail, enum mvm_messagetype type, const char *counter)
{
FILE *p = NULL;
int pfd;
char email[256] = "";
char who[256] = "";
char date[256];
char bound[256];
char fname[PATH_MAX];
char dur[PATH_MAX];
char tmp[80] = "/tmp/astmail-XXXXXX";
char tmp2[PATH_MAX];
char newtmp[PATH_MAX]; /* Only used with volgain */
struct timeval now;
struct ast_tm tm;
struct minivm_zone *the_zone = NULL;
struct ast_channel *ast;
char *finalfilename = "";
struct ast_str *str1 = ast_str_create(16), *str2 = ast_str_create(16);
char *fromaddress;
char *fromemail;
if (!str1 || !str2) {
ast_free(str1);
ast_free(str2);
return -1;
}
if (type == MVM_MESSAGE_EMAIL) {
if (vmu && !ast_strlen_zero(vmu->email)) {
ast_copy_string(email, vmu->email, sizeof(email));
} else if (!ast_strlen_zero(vmu->username) && !ast_strlen_zero(vmu->domain))
snprintf(email, sizeof(email), "%s@%s", vmu->username, vmu->domain);
} else if (type == MVM_MESSAGE_PAGE) {
ast_copy_string(email, vmu->pager, sizeof(email));
}
if (ast_strlen_zero(email)) {
ast_log(LOG_WARNING, "No address to send message to.\n");
ast_free(str1);
ast_free(str2);
return -1;
}
ast_debug(3, "Sending mail to %s@%s - Using template %s\n", vmu->username, vmu->domain, template->name);
if (!strcmp(format, "wav49"))
format = "WAV";
/* If we have a gain option, process it now with sox */
if (type == MVM_MESSAGE_EMAIL && (vmu->volgain < -.001 || vmu->volgain > .001) ) {
char tmpcmd[PATH_MAX];
int tmpfd;
ast_copy_string(newtmp, "/tmp/XXXXXX", sizeof(newtmp));
ast_debug(3, "newtmp: %s\n", newtmp);
tmpfd = mkstemp(newtmp);
if (tmpfd < 0) {
ast_log(LOG_WARNING, "Failed to create temporary file for volgain: %d\n", errno);
ast_free(str1);
ast_free(str2);
return -1;
}
snprintf(tmpcmd, sizeof(tmpcmd), "sox -v %.4f %s.%s %s.%s", vmu->volgain, filename, format, newtmp, format);
ast_safe_system(tmpcmd);
close(tmpfd);
finalfilename = newtmp;
ast_debug(3, "VOLGAIN: Stored at: %s.%s - Level: %.4f - Mailbox: %s\n", filename, format, vmu->volgain, vmu->username);
} else {
finalfilename = ast_strdupa(filename);
}
/* Create file name */
snprintf(fname, sizeof(fname), "%s.%s", finalfilename, format);
if (template->attachment)
ast_debug(1, "Attaching file '%s', format '%s', uservm is '%d'\n", finalfilename, format, attach_user_voicemail);
/* Make a temporary file instead of piping directly to sendmail, in case the mail
command hangs */
pfd = mkstemp(tmp);
if (pfd > -1) {
p = fdopen(pfd, "w");
if (!p) {
close(pfd);
pfd = -1;
}
ast_debug(1, "Opening temp file for e-mail: %s\n", tmp);
}
if (!p) {
ast_log(LOG_WARNING, "Unable to open temporary file '%s'\n", tmp);
ast_free(str1);
ast_free(str2);
return -1;
}
/* Allocate channel used for chanvar substitution */
ast = ast_dummy_channel_alloc();
if (!ast) {
ast_free(str1);
ast_free(str2);
return -1;
}
snprintf(dur, sizeof(dur), "%d:%02d", duration / 60, duration % 60);
/* Does this user have a timezone specified? */
if (!ast_strlen_zero(vmu->zonetag)) {
/* Find the zone in the list */
struct minivm_zone *z;
AST_LIST_LOCK(&minivm_zones);
AST_LIST_TRAVERSE(&minivm_zones, z, list) {
if (strcmp(z->name, vmu->zonetag))
continue;
the_zone = z;
}
AST_LIST_UNLOCK(&minivm_zones);
}
now = ast_tvnow();
ast_localtime(&now, &tm, the_zone ? the_zone->timezone : NULL);
ast_strftime(date, sizeof(date), "%a, %d %b %Y %H:%M:%S %z", &tm);
/* Start printing the email to the temporary file */
fprintf(p, "Date: %s\n", date);
/* Set date format for voicemail mail */
ast_strftime(date, sizeof(date), template->dateformat, &tm);
/* Populate channel with channel variables for substitution */
prep_email_sub_vars(ast, vmu, cidnum, cidname, dur, date, counter);
/* Find email address to use */
/* If there's a server e-mail adress in the account, user that, othterwise template */
fromemail = ast_strlen_zero(vmu->serveremail) ? template->serveremail : vmu->serveremail;
/* Find name to user for server e-mail */
fromaddress = ast_strlen_zero(template->fromaddress) ? "" : template->fromaddress;
/* If needed, add hostname as domain */
if (ast_strlen_zero(fromemail))
fromemail = "asterisk";
if (strchr(fromemail, '@'))
ast_copy_string(who, fromemail, sizeof(who));
else {
char host[MAXHOSTNAMELEN];
gethostname(host, sizeof(host)-1);
snprintf(who, sizeof(who), "%s@%s", fromemail, host);
}
if (ast_strlen_zero(fromaddress)) {
fprintf(p, "From: Asterisk PBX <%s>\n", who);
} else {
ast_debug(4, "Fromaddress template: %s\n", fromaddress);
ast_str_substitute_variables(&str1, 0, ast, fromaddress);
if (check_mime(ast_str_buffer(str1))) {
int first_line = 1;
char *ptr;
ast_str_encode_mime(&str2, 0, template->charset, ast_str_buffer(str1), strlen("From: "), strlen(who) + 3);
while ((ptr = strchr(ast_str_buffer(str2), ' '))) {
*ptr = '\0';
fprintf(p, "%s %s\n", first_line ? "From:" : "", ast_str_buffer(str2));
first_line = 0;
/* Substring is smaller, so this will never grow */
ast_str_set(&str2, 0, "%s", ptr + 1);
}
fprintf(p, "%s %s <%s>\n", first_line ? "From:" : "", ast_str_buffer(str2), who);
} else {
fprintf(p, "From: %s <%s>\n", ast_str_quote(&str2, 0, ast_str_buffer(str1)), who);
}
}
fprintf(p, "Message-ID: <Asterisk-%d-%s-%d-%s>\n", (unsigned int)ast_random(), vmu->username, (int)getpid(), who);
if (ast_strlen_zero(vmu->email)) {
snprintf(email, sizeof(email), "%s@%s", vmu->username, vmu->domain);
} else {
ast_copy_string(email, vmu->email, sizeof(email));
}
if (check_mime(vmu->fullname)) {
int first_line = 1;
char *ptr;
ast_str_encode_mime(&str2, 0, template->charset, vmu->fullname, strlen("To: "), strlen(email) + 3);
while ((ptr = strchr(ast_str_buffer(str2), ' '))) {
*ptr = '\0';
fprintf(p, "%s %s\n", first_line ? "To:" : "", ast_str_buffer(str2));
first_line = 0;
/* Substring is smaller, so this will never grow */
ast_str_set(&str2, 0, "%s", ptr + 1);
}
fprintf(p, "%s %s <%s>\n", first_line ? "To:" : "", ast_str_buffer(str2), email);
} else {
fprintf(p, "To: %s <%s>\n", ast_str_quote(&str2, 0, vmu->fullname), email);
}
if (!ast_strlen_zero(template->subject)) {
ast_str_substitute_variables(&str1, 0, ast, template->subject);
if (check_mime(ast_str_buffer(str1))) {
int first_line = 1;
char *ptr;
ast_str_encode_mime(&str2, 0, template->charset, ast_str_buffer(str1), strlen("Subject: "), 0);
while ((ptr = strchr(ast_str_buffer(str2), ' '))) {
*ptr = '\0';
fprintf(p, "%s %s\n", first_line ? "Subject:" : "", ast_str_buffer(str2));
first_line = 0;
/* Substring is smaller, so this will never grow */
ast_str_set(&str2, 0, "%s", ptr + 1);
}
fprintf(p, "%s %s\n", first_line ? "Subject:" : "", ast_str_buffer(str2));
} else {
fprintf(p, "Subject: %s\n", ast_str_buffer(str1));
}
} else {
fprintf(p, "Subject: New message in mailbox %s@%s\n", vmu->username, vmu->domain);
ast_debug(1, "Using default subject for this email \n");
}
if (option_debug > 2)
fprintf(p, "X-Asterisk-debug: template %s user account %s@%s\n", template->name, vmu->username, vmu->domain);
fprintf(p, "MIME-Version: 1.0\n");
/* Something unique. */
snprintf(bound, sizeof(bound), "voicemail_%s%d%d", vmu->username, (int)getpid(), (unsigned int)ast_random());
fprintf(p, "Content-Type: multipart/mixed; boundary=\"%s\"\n\n\n", bound);
fprintf(p, "--%s\n", bound);
fprintf(p, "Content-Type: text/plain; charset=%s\nContent-Transfer-Encoding: 8bit\n\n", template->charset);
if (!ast_strlen_zero(template->body)) {
ast_str_substitute_variables(&str1, 0, ast, template->body);
ast_debug(3, "Message now: %s\n-----\n", ast_str_buffer(str1));
fprintf(p, "%s\n", ast_str_buffer(str1));
} else {
fprintf(p, "Dear %s:\n\n\tJust wanted to let you know you were just left a %s long message \n"
"in mailbox %s from %s, on %s so you might\n"
"want to check it when you get a chance. Thanks!\n\n\t\t\t\t--Asterisk\n\n", vmu->fullname,
dur, vmu->username, (cidname ? cidname : (cidnum ? cidnum : "an unknown caller")), date);
ast_debug(3, "Using default message body (no template)\n-----\n");
}
/* Eww. We want formats to tell us their own MIME type */
if (template->attachment) {
char *ctype = "audio/x-";
ast_debug(3, "Attaching file to message: %s\n", fname);
if (!strcasecmp(format, "ogg"))
ctype = "application/";
fprintf(p, "--%s\n", bound);
fprintf(p, "Content-Type: %s%s; name=\"voicemailmsg.%s\"\n", ctype, format, format);
fprintf(p, "Content-Transfer-Encoding: base64\n");
fprintf(p, "Content-Description: Voicemail sound attachment.\n");
fprintf(p, "Content-Disposition: attachment; filename=\"voicemail%s.%s\"\n\n", counter ? counter : "", format);
base_encode(fname, p);
fprintf(p, "\n\n--%s--\n.\n", bound);
}
fclose(p);
snprintf(tmp2, sizeof(tmp2), "( %s < %s ; rm -f %s ) &", global_mailcmd, tmp, tmp);
ast_safe_system(tmp2);
ast_debug(1, "Sent message to %s with command '%s' - %s\n", vmu->email, global_mailcmd, template->attachment ? "(media attachment)" : "");
ast_debug(3, "Actual command used: %s\n", tmp2);
ast = ast_channel_unref(ast);
ast_free(str1);
ast_free(str2);
return 0;
}
/*!\internal
* \brief Create directory based on components */
static int make_dir(char *dest, int len, const char *domain, const char *username, const char *folder)
{
return snprintf(dest, len, "%s%s/%s%s%s", MVM_SPOOL_DIR, domain, username, ast_strlen_zero(folder) ? "" : "/", folder ? folder : "");
}
/*!\internal
* \brief Checks if directory exists. Does not create directory, but builds string in dest
* \param dest String. base directory.
* \param len Int. Length base directory string.
* \param domain String. Ignored if is null or empty string.
* \param username String. Ignored if is null or empty string.
* \param folder String. Ignored if is null or empty string.
* \return 0 on failure, 1 on success.
*/
static int check_dirpath(char *dest, int len, char *domain, char *username, char *folder)
{
struct stat filestat;
make_dir(dest, len, domain, username, folder ? folder : "");
if (stat(dest, &filestat)== -1)
return FALSE;
else
return TRUE;
}
/*!\internal
* \brief basically mkdir -p $dest/$domain/$username/$folder
* \param dest String. base directory.
* \param len Length of directory string
* \param domain String. Ignored if is null or empty string.
* \param folder String. Ignored if is null or empty string.
* \param username String. Ignored if is null or empty string.
* \return -1 on failure, 0 on success.
*/
static int create_dirpath(char *dest, int len, char *domain, char *username, char *folder)
{
int res;
make_dir(dest, len, domain, username, folder);
if ((res = ast_mkdir(dest, 0777))) {
ast_log(LOG_WARNING, "ast_mkdir '%s' failed: %s\n", dest, strerror(res));
return -1;
}
ast_debug(2, "Creating directory for %s@%s folder %s : %s\n", username, domain, folder, dest);
return 0;
}
/*!\internal
* \brief Play intro message before recording voicemail
*/
static int invent_message(struct ast_channel *chan, char *domain, char *username, int busy, char *ecodes)
{
int res;
char fn[PATH_MAX];
ast_debug(2, "Still preparing to play message ...\n");
snprintf(fn, sizeof(fn), "%s%s/%s/greet", MVM_SPOOL_DIR, domain, username);
if (ast_fileexists(fn, NULL, NULL) > 0) {
res = ast_streamfile(chan, fn, chan->language);
if (res)
return -1;
res = ast_waitstream(chan, ecodes);
if (res)
return res;
} else {
int numericusername = 1;
char *i = username;
ast_debug(2, "No personal prompts. Using default prompt set for language\n");
while (*i) {
ast_debug(2, "Numeric? Checking %c\n", *i);
if (!isdigit(*i)) {
numericusername = FALSE;
break;
}
i++;
}
if (numericusername) {
if (ast_streamfile(chan, "vm-theperson", chan->language))
return -1;
if ((res = ast_waitstream(chan, ecodes)))
return res;
res = ast_say_digit_str(chan, username, ecodes, chan->language);
if (res)
return res;
} else {
if (ast_streamfile(chan, "vm-theextensionis", chan->language))
return -1;
if ((res = ast_waitstream(chan, ecodes)))
return res;
}
}
res = ast_streamfile(chan, busy ? "vm-isonphone" : "vm-isunavail", chan->language);
if (res)
return -1;
res = ast_waitstream(chan, ecodes);
return res;
}
/*!\internal
* \brief Delete media files and attribute file */
static int vm_delete(char *file)
{
int res;
ast_debug(1, "Deleting voicemail file %s\n", file);
res = unlink(file); /* Remove the meta data file */
res |= ast_filedelete(file, NULL); /* remove the media file */
return res;
}
/*!\internal
* \brief Record voicemail message & let caller review or re-record it, or set options if applicable */
static int play_record_review(struct ast_channel *chan, char *playfile, char *recordfile, int maxtime, char *fmt,
int outsidecaller, struct minivm_account *vmu, int *duration, int *sound_duration, const char *unlockdir,
signed char record_gain)
{
int cmd = 0;
int max_attempts = 3;
int attempts = 0;
int recorded = 0;
int message_exists = 0;
signed char zero_gain = 0;
char *acceptdtmf = "#";
char *canceldtmf = "";
/* Note that urgent and private are for flagging messages as such in the future */
/* barf if no pointer passed to store duration in */
if (duration == NULL) {
ast_log(LOG_WARNING, "Error play_record_review called without duration pointer\n");
return -1;
}
cmd = '3'; /* Want to start by recording */
while ((cmd >= 0) && (cmd != 't')) {
switch (cmd) {
case '1':
ast_verb(3, "Saving message as is\n");
ast_stream_and_wait(chan, "vm-msgsaved", "");
cmd = 't';
break;
case '2':
/* Review */
ast_verb(3, "Reviewing the message\n");
ast_streamfile(chan, recordfile, chan->language);
cmd = ast_waitstream(chan, AST_DIGIT_ANY);
break;
case '3':
message_exists = 0;
/* Record */
if (recorded == 1)
ast_verb(3, "Re-recording the message\n");
else
ast_verb(3, "Recording the message\n");
if (recorded && outsidecaller)
cmd = ast_play_and_wait(chan, "beep");
recorded = 1;
/* After an attempt has been made to record message, we have to take care of INTRO and beep for incoming messages, but not for greetings */
if (record_gain)
ast_channel_setoption(chan, AST_OPTION_RXGAIN, &record_gain, sizeof(record_gain), 0);
if (ast_test_flag(vmu, MVM_OPERATOR))
canceldtmf = "0";
cmd = ast_play_and_record_full(chan, playfile, recordfile, maxtime, fmt, duration, sound_duration, global_silencethreshold, global_maxsilence, unlockdir, acceptdtmf, canceldtmf);
if (record_gain)
ast_channel_setoption(chan, AST_OPTION_RXGAIN, &zero_gain, sizeof(zero_gain), 0);
if (cmd == -1) /* User has hung up, no options to give */
return cmd;
if (cmd == '0')
break;
else if (cmd == '*')
break;
else {
/* If all is well, a message exists */
message_exists = 1;
cmd = 0;
}
break;
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
case '*':
case '#':
cmd = ast_play_and_wait(chan, "vm-sorry");
break;
case '0':
if(!ast_test_flag(vmu, MVM_OPERATOR)) {
cmd = ast_play_and_wait(chan, "vm-sorry");
break;
}
if (message_exists || recorded) {
cmd = ast_play_and_wait(chan, "vm-saveoper");
if (!cmd)
cmd = ast_waitfordigit(chan, 3000);
if (cmd == '1') {
ast_play_and_wait(chan, "vm-msgsaved");
cmd = '0';
} else {
ast_play_and_wait(chan, "vm-deleted");
vm_delete(recordfile);
cmd = '0';
}
}
return cmd;
default:
/* If the caller is an ouside caller, and the review option is enabled,
allow them to review the message, but let the owner of the box review
their OGM's */
if (outsidecaller && !ast_test_flag(vmu, MVM_REVIEW))
return cmd;
if (message_exists) {
cmd = ast_play_and_wait(chan, "vm-review");
} else {
cmd = ast_play_and_wait(chan, "vm-torerecord");
if (!cmd)
cmd = ast_waitfordigit(chan, 600);
}
if (!cmd && outsidecaller && ast_test_flag(vmu, MVM_OPERATOR)) {
cmd = ast_play_and_wait(chan, "vm-reachoper");
if (!cmd)
cmd = ast_waitfordigit(chan, 600);
}
if (!cmd)
cmd = ast_waitfordigit(chan, 6000);
if (!cmd) {
attempts++;
}
if (attempts > max_attempts) {
cmd = 't';
}
}
}
if (outsidecaller)
ast_play_and_wait(chan, "vm-goodbye");
if (cmd == 't')
cmd = 0;
return cmd;
}
/*! \brief Run external notification for voicemail message */
static void run_externnotify(struct ast_channel *chan, struct minivm_account *vmu)
{
char arguments[BUFSIZ];
if (ast_strlen_zero(vmu->externnotify) && ast_strlen_zero(global_externnotify))
return;
snprintf(arguments, sizeof(arguments), "%s %s@%s %s %s&",
ast_strlen_zero(vmu->externnotify) ? global_externnotify : vmu->externnotify,
vmu->username, vmu->domain,
(chan->caller.id.name.valid && chan->caller.id.name.str)
? chan->caller.id.name.str : "",
(chan->caller.id.number.valid && chan->caller.id.number.str)
? chan->caller.id.number.str : "");
ast_debug(1, "Executing: %s\n", arguments);
ast_safe_system(arguments);
}
/*!\internal
* \brief Send message to voicemail account owner */
static int notify_new_message(struct ast_channel *chan, const char *templatename, struct minivm_account *vmu, const char *filename, long duration, const char *format, char *cidnum, char *cidname)
{
char *stringp;
struct minivm_template *etemplate;
char *messageformat;
int res = 0;
char oldlocale[100];
const char *counter;
if (!ast_strlen_zero(vmu->attachfmt)) {
if (strstr(format, vmu->attachfmt)) {
format = vmu->attachfmt;
} else {
ast_log(LOG_WARNING, "Attachment format '%s' is not one of the recorded formats '%s'. Falling back to default format for '%s@%s'.\n", vmu->attachfmt, format, vmu->username, vmu->domain);
}
}
etemplate = message_template_find(vmu->etemplate);
if (!etemplate)
etemplate = message_template_find(templatename);
if (!etemplate)
etemplate = message_template_find("email-default");
/* Attach only the first format */
stringp = messageformat = ast_strdupa(format);
strsep(&stringp, "|");
if (!ast_strlen_zero(etemplate->locale)) {
char *new_locale;
ast_copy_string(oldlocale, setlocale(LC_TIME, NULL), sizeof(oldlocale));
ast_debug(2, "Changing locale from %s to %s\n", oldlocale, etemplate->locale);
new_locale = setlocale(LC_TIME, etemplate->locale);
if (new_locale == NULL) {
ast_log(LOG_WARNING, "-_-_- Changing to new locale did not work. Locale: %s\n", etemplate->locale);
}
}
/* Read counter if available */
ast_channel_lock(chan);
if ((counter = pbx_builtin_getvar_helper(chan, "MVM_COUNTER"))) {
counter = ast_strdupa(counter);
}
ast_channel_unlock(chan);
if (ast_strlen_zero(counter)) {
ast_debug(2, "MVM_COUNTER not found\n");
} else {
ast_debug(2, "MVM_COUNTER found - will use it with value %s\n", counter);
}
res = sendmail(etemplate, vmu, cidnum, cidname, filename, messageformat, duration, etemplate->attachment, MVM_MESSAGE_EMAIL, counter);
if (res == 0 && !ast_strlen_zero(vmu->pager)) {
/* Find template for paging */
etemplate = message_template_find(vmu->ptemplate);
if (!etemplate)
etemplate = message_template_find("pager-default");
if (etemplate->locale) {
ast_copy_string(oldlocale, setlocale(LC_TIME, ""), sizeof(oldlocale));
setlocale(LC_TIME, etemplate->locale);
}
res = sendmail(etemplate, vmu, cidnum, cidname, filename, messageformat, duration, etemplate->attachment, MVM_MESSAGE_PAGE, counter);
}
ast_manager_event(chan, EVENT_FLAG_CALL, "MiniVoiceMail", "Action: SentNotification\rn\nMailbox: %s@%s\r\nCounter: %s\r\n", vmu->username, vmu->domain, counter);
run_externnotify(chan, vmu); /* Run external notification */
if (etemplate->locale) {
setlocale(LC_TIME, oldlocale); /* Rest to old locale */
}
return res;
}
/*!\internal
* \brief Record voicemail message, store into file prepared for sending e-mail */
static int leave_voicemail(struct ast_channel *chan, char *username, struct leave_vm_options *options)
{
char tmptxtfile[PATH_MAX];
char callerid[256];
FILE *txt;
int res = 0, txtdes;
int duration = 0;
int sound_duration = 0;
char date[256];
char tmpdir[PATH_MAX];
char ext_context[256] = "";
char fmt[80];
char *domain;
char tmp[256] = "";
struct minivm_account *vmu;
int userdir;
ast_copy_string(tmp, username, sizeof(tmp));
username = tmp;
domain = strchr(tmp, '@');
if (domain) {
*domain = '\0';
domain++;
}
if (!(vmu = find_account(domain, username, TRUE))) {
/* We could not find user, let's exit */
ast_log(LOG_ERROR, "Can't allocate temporary account for '%s@%s'\n", username, domain);
pbx_builtin_setvar_helper(chan, "MVM_RECORD_STATUS", "FAILED");
return 0;
}
/* Setup pre-file if appropriate */
if (strcmp(vmu->domain, "localhost"))
snprintf(ext_context, sizeof(ext_context), "%s@%s", username, vmu->domain);
else
ast_copy_string(ext_context, vmu->domain, sizeof(ext_context));
/* The meat of recording the message... All the announcements and beeps have been played*/
if (ast_strlen_zero(vmu->attachfmt))
ast_copy_string(fmt, default_vmformat, sizeof(fmt));
else
ast_copy_string(fmt, vmu->attachfmt, sizeof(fmt));
if (ast_strlen_zero(fmt)) {
ast_log(LOG_WARNING, "No format for saving voicemail? Default %s\n", default_vmformat);
pbx_builtin_setvar_helper(chan, "MVM_RECORD_STATUS", "FAILED");
return res;
}
userdir = check_dirpath(tmpdir, sizeof(tmpdir), vmu->domain, username, "tmp");
/* If we have no user directory, use generic temporary directory */
if (!userdir) {
create_dirpath(tmpdir, sizeof(tmpdir), "0000_minivm_temp", "mediafiles", "");
ast_debug(3, "Creating temporary directory %s\n", tmpdir);
}
snprintf(tmptxtfile, sizeof(tmptxtfile), "%s/XXXXXX", tmpdir);
/* XXX This file needs to be in temp directory */
txtdes = mkstemp(tmptxtfile);
if (txtdes < 0) {
ast_log(LOG_ERROR, "Unable to create message file %s: %s\n", tmptxtfile, strerror(errno));
res = ast_streamfile(chan, "vm-mailboxfull", chan->language);
if (!res)
res = ast_waitstream(chan, "");
pbx_builtin_setvar_helper(chan, "MVM_RECORD_STATUS", "FAILED");
return res;
}
if (res >= 0) {
/* Unless we're *really* silent, try to send the beep */
res = ast_streamfile(chan, "beep", chan->language);
if (!res)
res = ast_waitstream(chan, "");
}
/* OEJ XXX Maybe this can be turned into a log file? Hmm. */
/* Store information */
ast_debug(2, "Open file for metadata: %s\n", tmptxtfile);
res = play_record_review(chan, NULL, tmptxtfile, global_vmmaxmessage, fmt, 1, vmu, &duration, &sound_duration, NULL, options->record_gain);
txt = fdopen(txtdes, "w+");
if (!txt) {
ast_log(LOG_WARNING, "Error opening text file for output\n");
} else {
struct ast_tm tm;
struct timeval now = ast_tvnow();
char timebuf[30];
char logbuf[BUFSIZ];
get_date(date, sizeof(date));
ast_localtime(&now, &tm, NULL);
ast_strftime(timebuf, sizeof(timebuf), "%H:%M:%S", &tm);
ast_callerid_merge(callerid, sizeof(callerid),
S_COR(chan->caller.id.name.valid, chan->caller.id.name.str, NULL),
S_COR(chan->caller.id.number.valid, chan->caller.id.number.str, NULL),
"Unknown");
snprintf(logbuf, sizeof(logbuf),
/* "Mailbox:domain:macrocontext:exten:priority:callerchan:callerid:origdate:origtime:duration:durationstatus:accountcode" */
"%s:%s:%s:%s:%d:%s:%s:%s:%s:%d:%s:%s\n",
username,
chan->context,
chan->macrocontext,
chan->exten,
chan->priority,
chan->name,
callerid,
date,
timebuf,
duration,
duration < global_vmminmessage ? "IGNORED" : "OK",
vmu->accountcode
);
fprintf(txt, "%s", logbuf);
if (minivmlogfile) {
ast_mutex_lock(&minivmloglock);
fprintf(minivmlogfile, "%s", logbuf);
ast_mutex_unlock(&minivmloglock);
}
if (sound_duration < global_vmminmessage) {
ast_verb(3, "Recording was %d seconds long but needs to be at least %d - abandoning\n", sound_duration, global_vmminmessage);
fclose(txt);
ast_filedelete(tmptxtfile, NULL);
unlink(tmptxtfile);
pbx_builtin_setvar_helper(chan, "MVM_RECORD_STATUS", "FAILED");
return 0;
}
fclose(txt); /* Close log file */
if (ast_fileexists(tmptxtfile, NULL, NULL) <= 0) {
ast_debug(1, "The recorded media file is gone, so we should remove the .txt file too!\n");
unlink(tmptxtfile);
pbx_builtin_setvar_helper(chan, "MVM_RECORD_STATUS", "FAILED");
if(ast_test_flag(vmu, MVM_ALLOCED))
free_user(vmu);
return 0;
}
/* Set channel variables for the notify application */
pbx_builtin_setvar_helper(chan, "MVM_FILENAME", tmptxtfile);
snprintf(timebuf, sizeof(timebuf), "%d", duration);
pbx_builtin_setvar_helper(chan, "MVM_DURATION", timebuf);
pbx_builtin_setvar_helper(chan, "MVM_FORMAT", fmt);
}
global_stats.lastreceived = ast_tvnow();
global_stats.receivedmessages++;
#if 0
/* Go ahead and delete audio files from system, they're not needed any more */
if (ast_fileexists(tmptxtfile, NULL, NULL) <= 0) {
ast_filedelete(tmptxtfile, NULL);
/* Even not being used at the moment, it's better to convert ast_log to ast_debug anyway */
ast_debug(2, "-_-_- Deleted audio file after notification :: %s \n", tmptxtfile);
}
#endif
if (res > 0)
res = 0;
if(ast_test_flag(vmu, MVM_ALLOCED))
free_user(vmu);
pbx_builtin_setvar_helper(chan, "MVM_RECORD_STATUS", "SUCCESS");
return res;
}
/*!\internal
* \brief Queue a message waiting event */
static void queue_mwi_event(const char *mbx, const char *ctx, int urgent, int new, int old)
{
struct ast_event *event;
char *mailbox, *context;
mailbox = ast_strdupa(mbx);
context = ast_strdupa(ctx);
if (ast_strlen_zero(context)) {
context = "default";
}
if (!(event = ast_event_new(AST_EVENT_MWI,
AST_EVENT_IE_MAILBOX, AST_EVENT_IE_PLTYPE_STR, mailbox,
AST_EVENT_IE_CONTEXT, AST_EVENT_IE_PLTYPE_STR, context,
AST_EVENT_IE_NEWMSGS, AST_EVENT_IE_PLTYPE_UINT, (new+urgent),
AST_EVENT_IE_OLDMSGS, AST_EVENT_IE_PLTYPE_UINT, old,
AST_EVENT_IE_END))) {
return;
}
ast_event_queue_and_cache(event);
}
/*!\internal
* \brief Send MWI using interal Asterisk event subsystem */
static int minivm_mwi_exec(struct ast_channel *chan, const char *data)
{
int argc;
char *argv[4];
int res = 0;
char *tmpptr;
char tmp[PATH_MAX];
char *mailbox;
char *domain;
if (ast_strlen_zero(data)) {
ast_log(LOG_ERROR, "Minivm needs at least an account argument \n");
return -1;
}
tmpptr = ast_strdupa((char *)data);
argc = ast_app_separate_args(tmpptr, ',', argv, ARRAY_LEN(argv));
if (argc < 4) {
ast_log(LOG_ERROR, "%d arguments passed to MiniVM_MWI, need 4.\n", argc);
return -1;
}
ast_copy_string(tmp, argv[0], sizeof(tmp));
mailbox = tmp;
domain = strchr(tmp, '@');
if (domain) {
*domain = '\0';
domain++;
}
if (ast_strlen_zero(domain) || ast_strlen_zero(mailbox)) {
ast_log(LOG_ERROR, "Need mailbox@context as argument. Sorry. Argument 0 %s\n", argv[0]);
return -1;
}
queue_mwi_event(mailbox, domain, atoi(argv[1]), atoi(argv[2]), atoi(argv[3]));
return res;
}
/*!\internal
* \brief Notify voicemail account owners - either generic template or user specific */
static int minivm_notify_exec(struct ast_channel *chan, const char *data)
{
int argc;
char *argv[2];
int res = 0;
char tmp[PATH_MAX];
char *domain;
char *tmpptr;
struct minivm_account *vmu;
char *username;
const char *template = "";
const char *filename;
const char *format;
const char *duration_string;
if (ast_strlen_zero(data)) {
ast_log(LOG_ERROR, "Minivm needs at least an account argument \n");
return -1;
}
tmpptr = ast_strdupa((char *)data);
argc = ast_app_separate_args(tmpptr, ',', argv, ARRAY_LEN(argv));
if (argc == 2 && !ast_strlen_zero(argv[1]))
template = argv[1];
ast_copy_string(tmp, argv[0], sizeof(tmp));
username = tmp;
domain = strchr(tmp, '@');
if (domain) {
*domain = '\0';
domain++;
}
if (ast_strlen_zero(domain) || ast_strlen_zero(username)) {
ast_log(LOG_ERROR, "Need username@domain as argument. Sorry. Argument 0 %s\n", argv[0]);
return -1;
}
if(!(vmu = find_account(domain, username, TRUE))) {
/* We could not find user, let's exit */
ast_log(LOG_WARNING, "Could not allocate temporary memory for '%s@%s'\n", username, domain);
pbx_builtin_setvar_helper(chan, "MVM_NOTIFY_STATUS", "FAILED");
return -1;
}
ast_channel_lock(chan);
if ((filename = pbx_builtin_getvar_helper(chan, "MVM_FILENAME"))) {
filename = ast_strdupa(filename);
}
ast_channel_unlock(chan);
/* Notify of new message to e-mail and pager */
if (!ast_strlen_zero(filename)) {
ast_channel_lock(chan);
if ((format = pbx_builtin_getvar_helper(chan, "MVM_FORMAT"))) {
format = ast_strdupa(format);
}
if ((duration_string = pbx_builtin_getvar_helper(chan, "MVM_DURATION"))) {
duration_string = ast_strdupa(duration_string);
}
ast_channel_unlock(chan);
res = notify_new_message(chan, template, vmu, filename, atoi(duration_string),
format,
S_COR(chan->caller.id.number.valid, chan->caller.id.number.str, NULL),
S_COR(chan->caller.id.name.valid, chan->caller.id.name.str, NULL));
}
pbx_builtin_setvar_helper(chan, "MVM_NOTIFY_STATUS", res == 0 ? "SUCCESS" : "FAILED");
if(ast_test_flag(vmu, MVM_ALLOCED))
free_user(vmu);
/* Ok, we're ready to rock and roll. Return to dialplan */
return res;
}
/*!\internal
* \brief Dialplan function to record voicemail */
static int minivm_record_exec(struct ast_channel *chan, const char *data)
{
int res = 0;
char *tmp;
struct leave_vm_options leave_options;
int argc;
char *argv[2];
struct ast_flags flags = { 0 };
char *opts[OPT_ARG_ARRAY_SIZE];
memset(&leave_options, 0, sizeof(leave_options));
/* Answer channel if it's not already answered */
if (chan->_state != AST_STATE_UP)
ast_answer(chan);
if (ast_strlen_zero(data)) {
ast_log(LOG_ERROR, "Minivm needs at least an account argument \n");
return -1;
}
tmp = ast_strdupa((char *)data);
argc = ast_app_separate_args(tmp, ',', argv, ARRAY_LEN(argv));
if (argc == 2) {
if (ast_app_parse_options(minivm_app_options, &flags, opts, argv[1])) {
return -1;
}
ast_copy_flags(&leave_options, &flags, OPT_SILENT | OPT_BUSY_GREETING | OPT_UNAVAIL_GREETING );
if (ast_test_flag(&flags, OPT_RECORDGAIN)) {
int gain;
if (sscanf(opts[OPT_ARG_RECORDGAIN], "%30d", &gain) != 1) {
ast_log(LOG_WARNING, "Invalid value '%s' provided for record gain option\n", opts[OPT_ARG_RECORDGAIN]);
return -1;
} else
leave_options.record_gain = (signed char) gain;
}
}
/* Now run the appliation and good luck to you! */
res = leave_voicemail(chan, argv[0], &leave_options);
if (res == ERROR_LOCK_PATH) {
ast_log(LOG_ERROR, "Could not leave voicemail. The path is already locked.\n");
pbx_builtin_setvar_helper(chan, "MVM_RECORD_STATUS", "FAILED");
res = 0;
}
pbx_builtin_setvar_helper(chan, "MVM_RECORD_STATUS", "SUCCESS");
return res;
}
/*!\internal
* \brief Play voicemail prompts - either generic or user specific */
static int minivm_greet_exec(struct ast_channel *chan, const char *data)
{
struct leave_vm_options leave_options = { 0, '\0'};
int argc;
char *argv[2];
struct ast_flags flags = { 0 };
char *opts[OPT_ARG_ARRAY_SIZE];
int res = 0;
int ausemacro = 0;
int ousemacro = 0;
int ouseexten = 0;
char tmp[PATH_MAX];
char dest[PATH_MAX];
char prefile[PATH_MAX] = "";
char tempfile[PATH_MAX] = "";
char ext_context[256] = "";
char *domain;
char ecodes[16] = "#";
char *tmpptr;
struct minivm_account *vmu;
char *username = argv[0];
if (ast_strlen_zero(data)) {
ast_log(LOG_ERROR, "Minivm needs at least an account argument \n");
return -1;
}
tmpptr = ast_strdupa((char *)data);
argc = ast_app_separate_args(tmpptr, ',', argv, ARRAY_LEN(argv));
if (argc == 2) {
if (ast_app_parse_options(minivm_app_options, &flags, opts, argv[1]))
return -1;
ast_copy_flags(&leave_options, &flags, OPT_SILENT | OPT_BUSY_GREETING | OPT_UNAVAIL_GREETING );
}
ast_copy_string(tmp, argv[0], sizeof(tmp));
username = tmp;
domain = strchr(tmp, '@');
if (domain) {
*domain = '\0';
domain++;
}
if (ast_strlen_zero(domain) || ast_strlen_zero(username)) {
ast_log(LOG_ERROR, "Need username@domain as argument. Sorry. Argument: %s\n", argv[0]);
return -1;
}
ast_debug(1, "Trying to find configuration for user %s in domain %s\n", username, domain);
if (!(vmu = find_account(domain, username, TRUE))) {
ast_log(LOG_ERROR, "Could not allocate memory. \n");
return -1;
}
/* Answer channel if it's not already answered */
if (chan->_state != AST_STATE_UP)
ast_answer(chan);
/* Setup pre-file if appropriate */
if (strcmp(vmu->domain, "localhost"))
snprintf(ext_context, sizeof(ext_context), "%s@%s", username, vmu->domain);
else
ast_copy_string(ext_context, vmu->domain, sizeof(ext_context));
if (ast_test_flag(&leave_options, OPT_BUSY_GREETING)) {
res = check_dirpath(dest, sizeof(dest), vmu->domain, username, "busy");
if (res)
snprintf(prefile, sizeof(prefile), "%s%s/%s/busy", MVM_SPOOL_DIR, vmu->domain, username);
} else if (ast_test_flag(&leave_options, OPT_UNAVAIL_GREETING)) {
res = check_dirpath(dest, sizeof(dest), vmu->domain, username, "unavail");
if (res)
snprintf(prefile, sizeof(prefile), "%s%s/%s/unavail", MVM_SPOOL_DIR, vmu->domain, username);
}
/* Check for temporary greeting - it overrides busy and unavail */
snprintf(tempfile, sizeof(tempfile), "%s%s/%s/temp", MVM_SPOOL_DIR, vmu->domain, username);
if (!(res = check_dirpath(dest, sizeof(dest), vmu->domain, username, "temp"))) {
ast_debug(2, "Temporary message directory does not exist, using default (%s)\n", tempfile);
ast_copy_string(prefile, tempfile, sizeof(prefile));
}
ast_debug(2, "Preparing to play message ...\n");
/* Check current or macro-calling context for special extensions */
if (ast_test_flag(vmu, MVM_OPERATOR)) {
if (!ast_strlen_zero(vmu->exit)) {
if (ast_exists_extension(chan, vmu->exit, "o", 1,
S_COR(chan->caller.id.number.valid, chan->caller.id.number.str, NULL))) {
strncat(ecodes, "0", sizeof(ecodes) - strlen(ecodes) - 1);
ouseexten = 1;
}
} else if (ast_exists_extension(chan, chan->context, "o", 1,
S_COR(chan->caller.id.number.valid, chan->caller.id.number.str, NULL))) {
strncat(ecodes, "0", sizeof(ecodes) - strlen(ecodes) - 1);
ouseexten = 1;
}
else if (!ast_strlen_zero(chan->macrocontext)
&& ast_exists_extension(chan, chan->macrocontext, "o", 1,
S_COR(chan->caller.id.number.valid, chan->caller.id.number.str, NULL))) {
strncat(ecodes, "0", sizeof(ecodes) - strlen(ecodes) - 1);
ousemacro = 1;
}
}
if (!ast_strlen_zero(vmu->exit)) {
if (ast_exists_extension(chan, vmu->exit, "a", 1,
S_COR(chan->caller.id.number.valid, chan->caller.id.number.str, NULL))) {
strncat(ecodes, "*", sizeof(ecodes) - strlen(ecodes) - 1);
}
} else if (ast_exists_extension(chan, chan->context, "a", 1,
S_COR(chan->caller.id.number.valid, chan->caller.id.number.str, NULL))) {
strncat(ecodes, "*", sizeof(ecodes) - strlen(ecodes) - 1);
} else if (!ast_strlen_zero(chan->macrocontext)
&& ast_exists_extension(chan, chan->macrocontext, "a", 1,
S_COR(chan->caller.id.number.valid, chan->caller.id.number.str, NULL))) {
strncat(ecodes, "*", sizeof(ecodes) - strlen(ecodes) - 1);
ausemacro = 1;
}
res = 0; /* Reset */
/* Play the beginning intro if desired */
if (!ast_strlen_zero(prefile)) {
if (ast_streamfile(chan, prefile, chan->language) > -1)
res = ast_waitstream(chan, ecodes);
} else {
ast_debug(2, "%s doesn't exist, doing what we can\n", prefile);
res = invent_message(chan, vmu->domain, username, ast_test_flag(&leave_options, OPT_BUSY_GREETING), ecodes);
}
if (res < 0) {
ast_debug(2, "Hang up during prefile playback\n");
pbx_builtin_setvar_helper(chan, "MVM_GREET_STATUS", "FAILED");
if(ast_test_flag(vmu, MVM_ALLOCED))
free_user(vmu);
return -1;
}
if (res == '#') {
/* On a '#' we skip the instructions */
ast_set_flag(&leave_options, OPT_SILENT);
res = 0;
}
if (!res && !ast_test_flag(&leave_options, OPT_SILENT)) {
res = ast_streamfile(chan, SOUND_INTRO, chan->language);
if (!res)
res = ast_waitstream(chan, ecodes);
if (res == '#') {
ast_set_flag(&leave_options, OPT_SILENT);
res = 0;
}
}
if (res > 0)
ast_stopstream(chan);
/* Check for a '*' here in case the caller wants to escape from voicemail to something
other than the operator -- an automated attendant or mailbox login for example */
if (res == '*') {
chan->exten[0] = 'a';
chan->exten[1] = '\0';
if (!ast_strlen_zero(vmu->exit)) {
ast_copy_string(chan->context, vmu->exit, sizeof(chan->context));
} else if (ausemacro && !ast_strlen_zero(chan->macrocontext)) {
ast_copy_string(chan->context, chan->macrocontext, sizeof(chan->context));
}
chan->priority = 0;
pbx_builtin_setvar_helper(chan, "MVM_GREET_STATUS", "USEREXIT");
res = 0;
} else if (res == '0') { /* Check for a '0' here */
if(ouseexten || ousemacro) {
chan->exten[0] = 'o';
chan->exten[1] = '\0';
if (!ast_strlen_zero(vmu->exit)) {
ast_copy_string(chan->context, vmu->exit, sizeof(chan->context));
} else if (ousemacro && !ast_strlen_zero(chan->macrocontext)) {
ast_copy_string(chan->context, chan->macrocontext, sizeof(chan->context));
}
ast_play_and_wait(chan, "transfer");
chan->priority = 0;
pbx_builtin_setvar_helper(chan, "MVM_GREET_STATUS", "USEREXIT");
}
res = 0;
} else if (res < 0) {
pbx_builtin_setvar_helper(chan, "MVM_GREET_STATUS", "FAILED");
res = -1;
} else
pbx_builtin_setvar_helper(chan, "MVM_GREET_STATUS", "SUCCESS");
if(ast_test_flag(vmu, MVM_ALLOCED))
free_user(vmu);
/* Ok, we're ready to rock and roll. Return to dialplan */
return res;
}
/*!\internal
* \brief Dialplan application to delete voicemail */
static int minivm_delete_exec(struct ast_channel *chan, const char *data)
{
int res = 0;
char filename[BUFSIZ];
if (!ast_strlen_zero(data)) {
ast_copy_string(filename, (char *) data, sizeof(filename));
} else {
ast_channel_lock(chan);
ast_copy_string(filename, pbx_builtin_getvar_helper(chan, "MVM_FILENAME"), sizeof(filename));
ast_channel_unlock(chan);
}
if (ast_strlen_zero(filename)) {
ast_log(LOG_ERROR, "No filename given in application arguments or channel variable MVM_FILENAME\n");
return res;
}
/* Go ahead and delete audio files from system, they're not needed any more */
/* We should look for both audio and text files here */
if (ast_fileexists(filename, NULL, NULL) > 0) {
res = vm_delete(filename);
if (res) {
ast_debug(2, "Can't delete file: %s\n", filename);
pbx_builtin_setvar_helper(chan, "MVM_DELETE_STATUS", "FAILED");
} else {
ast_debug(2, "Deleted voicemail file :: %s \n", filename);
pbx_builtin_setvar_helper(chan, "MVM_DELETE_STATUS", "SUCCESS");
}
} else {
ast_debug(2, "Filename does not exist: %s\n", filename);
pbx_builtin_setvar_helper(chan, "MVM_DELETE_STATUS", "FAILED");
}
return res;
}
/*! \brief Record specific messages for voicemail account */
static int minivm_accmess_exec(struct ast_channel *chan, const char *data)
{
int argc = 0;
char *argv[2];
char filename[PATH_MAX];
char tmp[PATH_MAX];
char *domain;
char *tmpptr = NULL;
struct minivm_account *vmu;
char *username;
struct ast_flags flags = { 0 };
char *opts[OPT_ARG_ARRAY_SIZE];
int error = FALSE;
char *message = NULL;
char *prompt = NULL;
int duration;
if (ast_strlen_zero(data)) {
ast_log(LOG_ERROR, "MinivmAccmess needs at least two arguments: account and option\n");
error = TRUE;
} else {
tmpptr = ast_strdupa((char *)data);
argc = ast_app_separate_args(tmpptr, ',', argv, ARRAY_LEN(argv));
}
if (argc <=1) {
ast_log(LOG_ERROR, "MinivmAccmess needs at least two arguments: account and option\n");
error = TRUE;
}
if (!error && strlen(argv[1]) > 1) {
ast_log(LOG_ERROR, "MinivmAccmess can only handle one option at a time. Bad option string: %s\n", argv[1]);
error = TRUE;
}
if (!error && ast_app_parse_options(minivm_accmess_options, &flags, opts, argv[1])) {
ast_log(LOG_ERROR, "Can't parse option %s\n", argv[1]);
error = TRUE;
}
if (error) {
pbx_builtin_setvar_helper(chan, "MVM_ACCMESS_STATUS", "FAILED");
return -1;
}
ast_copy_string(tmp, argv[0], sizeof(tmp));
username = tmp;
domain = strchr(tmp, '@');
if (domain) {
*domain = '\0';
domain++;
}
if (ast_strlen_zero(domain) || ast_strlen_zero(username)) {
ast_log(LOG_ERROR, "Need username@domain as argument. Sorry. Argument 0 %s\n", argv[0]);
pbx_builtin_setvar_helper(chan, "MVM_ACCMESS_STATUS", "FAILED");
return -1;
}
if(!(vmu = find_account(domain, username, TRUE))) {
/* We could not find user, let's exit */
ast_log(LOG_WARNING, "Could not allocate temporary memory for '%s@%s'\n", username, domain);
pbx_builtin_setvar_helper(chan, "MVM_ACCMESS_STATUS", "FAILED");
return -1;
}
/* Answer channel if it's not already answered */
if (chan->_state != AST_STATE_UP)
ast_answer(chan);
/* Here's where the action is */
if (ast_test_flag(&flags, OPT_BUSY_GREETING)) {
message = "busy";
prompt = "vm-rec-busy";
} else if (ast_test_flag(&flags, OPT_UNAVAIL_GREETING)) {
message = "unavailable";
prompt = "vm-rec-unv";
} else if (ast_test_flag(&flags, OPT_TEMP_GREETING)) {
message = "temp";
prompt = "vm-rec-temp";
} else if (ast_test_flag(&flags, OPT_NAME_GREETING)) {
message = "greet";
prompt = "vm-rec-name";
}
snprintf(filename,sizeof(filename), "%s%s/%s/%s", MVM_SPOOL_DIR, vmu->domain, vmu->username, message);
/* Maybe we should check the result of play_record_review ? */
play_record_review(chan, prompt, filename, global_maxgreet, default_vmformat, 0, vmu, &duration, NULL, NULL, FALSE);
ast_debug(1, "Recorded new %s message in %s (duration %d)\n", message, filename, duration);
if(ast_test_flag(vmu, MVM_ALLOCED))
free_user(vmu);
pbx_builtin_setvar_helper(chan, "MVM_ACCMESS_STATUS", "SUCCESS");
/* Ok, we're ready to rock and roll. Return to dialplan */
return 0;
}
/*! \brief Append new mailbox to mailbox list from configuration file */
static int create_vmaccount(char *name, struct ast_variable *var, int realtime)
{
struct minivm_account *vmu;
char *domain;
char *username;
char accbuf[BUFSIZ];
ast_debug(3, "Creating %s account for [%s]\n", realtime ? "realtime" : "static", name);
ast_copy_string(accbuf, name, sizeof(accbuf));
username = accbuf;
domain = strchr(accbuf, '@');
if (domain) {
*domain = '\0';
domain++;
}
if (ast_strlen_zero(domain)) {
ast_log(LOG_ERROR, "No domain given for mini-voicemail account %s. Not configured.\n", name);
return 0;
}
ast_debug(3, "Creating static account for user %s domain %s\n", username, domain);
/* Allocate user account */
vmu = ast_calloc(1, sizeof(*vmu));
if (!vmu)
return 0;
ast_copy_string(vmu->domain, domain, sizeof(vmu->domain));
ast_copy_string(vmu->username, username, sizeof(vmu->username));
populate_defaults(vmu);
ast_debug(3, "...Configuring account %s\n", name);
while (var) {
ast_debug(3, "Configuring %s = \"%s\" for account %s\n", var->name, var->value, name);
if (!strcasecmp(var->name, "serveremail")) {
ast_copy_string(vmu->serveremail, var->value, sizeof(vmu->serveremail));
} else if (!strcasecmp(var->name, "email")) {
ast_copy_string(vmu->email, var->value, sizeof(vmu->email));
} else if (!strcasecmp(var->name, "accountcode")) {
ast_copy_string(vmu->accountcode, var->value, sizeof(vmu->accountcode));
} else if (!strcasecmp(var->name, "pincode")) {
ast_copy_string(vmu->pincode, var->value, sizeof(vmu->pincode));
} else if (!strcasecmp(var->name, "domain")) {
ast_copy_string(vmu->domain, var->value, sizeof(vmu->domain));
} else if (!strcasecmp(var->name, "language")) {
ast_copy_string(vmu->language, var->value, sizeof(vmu->language));
} else if (!strcasecmp(var->name, "timezone")) {
ast_copy_string(vmu->zonetag, var->value, sizeof(vmu->zonetag));
} else if (!strcasecmp(var->name, "externnotify")) {
ast_copy_string(vmu->externnotify, var->value, sizeof(vmu->externnotify));
} else if (!strcasecmp(var->name, "etemplate")) {
ast_copy_string(vmu->etemplate, var->value, sizeof(vmu->etemplate));
} else if (!strcasecmp(var->name, "ptemplate")) {
ast_copy_string(vmu->ptemplate, var->value, sizeof(vmu->ptemplate));
} else if (!strcasecmp(var->name, "fullname")) {
ast_copy_string(vmu->fullname, var->value, sizeof(vmu->fullname));
} else if (!strcasecmp(var->name, "setvar")) {
char *varval;
char *varname = ast_strdupa(var->value);
struct ast_variable *tmpvar;
if ((varval = strchr(varname, '='))) {
*varval = '\0';
varval++;
if ((tmpvar = ast_variable_new(varname, varval, ""))) {
tmpvar->next = vmu->chanvars;
vmu->chanvars = tmpvar;
}
}
} else if (!strcasecmp(var->name, "pager")) {
ast_copy_string(vmu->pager, var->value, sizeof(vmu->pager));
} else if (!strcasecmp(var->name, "volgain")) {
sscanf(var->value, "%30lf", &vmu->volgain);
} else {
ast_log(LOG_ERROR, "Unknown configuration option for minivm account %s : %s\n", name, var->name);
}
var = var->next;
}
ast_debug(3, "...Linking account %s\n", name);
AST_LIST_LOCK(&minivm_accounts);
AST_LIST_INSERT_TAIL(&minivm_accounts, vmu, list);
AST_LIST_UNLOCK(&minivm_accounts);
global_stats.voicemailaccounts++;
ast_debug(2, "MVM :: Created account %s@%s - tz %s etemplate %s %s\n", username, domain, ast_strlen_zero(vmu->zonetag) ? "" : vmu->zonetag, ast_strlen_zero(vmu->etemplate) ? "" : vmu->etemplate, realtime ? "(realtime)" : "");
return 0;
}
/*! \brief Free Mini Voicemail timezone */
static void free_zone(struct minivm_zone *z)
{
ast_free(z);
}
/*! \brief Clear list of timezones */
static void timezone_destroy_list(void)
{
struct minivm_zone *this;
AST_LIST_LOCK(&minivm_zones);
while ((this = AST_LIST_REMOVE_HEAD(&minivm_zones, list)))
free_zone(this);
AST_LIST_UNLOCK(&minivm_zones);
}
/*! \brief Add time zone to memory list */
static int timezone_add(const char *zonename, const char *config)
{
struct minivm_zone *newzone;
char *msg_format, *timezone_str;
newzone = ast_calloc(1, sizeof(*newzone));
if (newzone == NULL)
return 0;
msg_format = ast_strdupa(config);
timezone_str = strsep(&msg_format, "|");
if (!msg_format) {
ast_log(LOG_WARNING, "Invalid timezone definition : %s\n", zonename);
ast_free(newzone);
return 0;
}
ast_copy_string(newzone->name, zonename, sizeof(newzone->name));
ast_copy_string(newzone->timezone, timezone_str, sizeof(newzone->timezone));
ast_copy_string(newzone->msg_format, msg_format, sizeof(newzone->msg_format));
AST_LIST_LOCK(&minivm_zones);
AST_LIST_INSERT_TAIL(&minivm_zones, newzone, list);
AST_LIST_UNLOCK(&minivm_zones);
global_stats.timezones++;
return 0;
}
/*! \brief Read message template from file */
static char *message_template_parse_filebody(const char *filename) {
char buf[BUFSIZ * 6];
char readbuf[BUFSIZ];
char filenamebuf[BUFSIZ];
char *writepos;
char *messagebody;
FILE *fi;
int lines = 0;
if (ast_strlen_zero(filename))
return NULL;
if (*filename == '/')
ast_copy_string(filenamebuf, filename, sizeof(filenamebuf));
else
snprintf(filenamebuf, sizeof(filenamebuf), "%s/%s", ast_config_AST_CONFIG_DIR, filename);
if (!(fi = fopen(filenamebuf, "r"))) {
ast_log(LOG_ERROR, "Can't read message template from file: %s\n", filenamebuf);
return NULL;
}
writepos = buf;
while (fgets(readbuf, sizeof(readbuf), fi)) {
lines ++;
if (writepos != buf) {
*writepos = '\n'; /* Replace EOL with new line */
writepos++;
}
ast_copy_string(writepos, readbuf, sizeof(buf) - (writepos - buf));
writepos += strlen(readbuf) - 1;
}
fclose(fi);
messagebody = ast_calloc(1, strlen(buf + 1));
ast_copy_string(messagebody, buf, strlen(buf) + 1);
ast_debug(4, "---> Size of allocation %d\n", (int) strlen(buf + 1) );
ast_debug(4, "---> Done reading message template : \n%s\n---- END message template--- \n", messagebody);
return messagebody;
}
/*! \brief Parse emailbody template from configuration file */
static char *message_template_parse_emailbody(const char *configuration)
{
char *tmpread, *tmpwrite;
char *emailbody = ast_strdup(configuration);
/* substitute strings \t and \n into the apropriate characters */
tmpread = tmpwrite = emailbody;
while ((tmpwrite = strchr(tmpread,'\\'))) {
int len = strlen("\n");
switch (tmpwrite[1]) {
case 'n':
memmove(tmpwrite + len, tmpwrite + 2, strlen(tmpwrite + 2) + 1);
strncpy(tmpwrite, "\n", len);
break;
case 't':
memmove(tmpwrite + len, tmpwrite + 2, strlen(tmpwrite + 2) + 1);
strncpy(tmpwrite, "\t", len);
break;
default:
ast_log(LOG_NOTICE, "Substitution routine does not support this character: %c\n", tmpwrite[1]);
}
tmpread = tmpwrite + len;
}
return emailbody;
}
/*! \brief Apply general configuration options */
static int apply_general_options(struct ast_variable *var)
{
int error = 0;
while (var) {
/* Mail command */
if (!strcmp(var->name, "mailcmd")) {
ast_copy_string(global_mailcmd, var->value, sizeof(global_mailcmd)); /* User setting */
} else if (!strcmp(var->name, "maxgreet")) {
global_maxgreet = atoi(var->value);
} else if (!strcmp(var->name, "maxsilence")) {
global_maxsilence = atoi(var->value);
if (global_maxsilence > 0)
global_maxsilence *= 1000;
} else if (!strcmp(var->name, "logfile")) {
if (!ast_strlen_zero(var->value) ) {
if(*(var->value) == '/')
ast_copy_string(global_logfile, var->value, sizeof(global_logfile));
else
snprintf(global_logfile, sizeof(global_logfile), "%s/%s", ast_config_AST_LOG_DIR, var->value);
}
} else if (!strcmp(var->name, "externnotify")) {
/* External voicemail notify application */
ast_copy_string(global_externnotify, var->value, sizeof(global_externnotify));
} else if (!strcmp(var->name, "silencetreshold")) {
/* Silence treshold */
global_silencethreshold = atoi(var->value);
} else if (!strcmp(var->name, "maxmessage")) {
int x;
if (sscanf(var->value, "%30d", &x) == 1) {
global_vmmaxmessage = x;
} else {
error ++;
ast_log(LOG_WARNING, "Invalid max message time length\n");
}
} else if (!strcmp(var->name, "minmessage")) {
int x;
if (sscanf(var->value, "%30d", &x) == 1) {
global_vmminmessage = x;
if (global_maxsilence <= global_vmminmessage)
ast_log(LOG_WARNING, "maxsilence should be less than minmessage or you may get empty messages\n");
} else {
error ++;
ast_log(LOG_WARNING, "Invalid min message time length\n");
}
} else if (!strcmp(var->name, "format")) {
ast_copy_string(default_vmformat, var->value, sizeof(default_vmformat));
} else if (!strcmp(var->name, "review")) {
ast_set2_flag((&globalflags), ast_true(var->value), MVM_REVIEW);
} else if (!strcmp(var->name, "operator")) {
ast_set2_flag((&globalflags), ast_true(var->value), MVM_OPERATOR);
}
var = var->next;
}
return error;
}
/*! \brief Load minivoicemail configuration */
static int load_config(int reload)
{
struct ast_config *cfg;
struct ast_variable *var;
char *cat;
const char *chanvar;
int error = 0;
struct minivm_template *template;
struct ast_flags config_flags = { reload ? CONFIG_FLAG_FILEUNCHANGED : 0 };
cfg = ast_config_load(VOICEMAIL_CONFIG, config_flags);
if (cfg == CONFIG_STATUS_FILEUNCHANGED) {
return 0;
} else if (cfg == CONFIG_STATUS_FILEINVALID) {
ast_log(LOG_ERROR, "Config file " VOICEMAIL_CONFIG " is in an invalid format. Aborting.\n");
return 0;
}
ast_mutex_lock(&minivmlock);
/* Destroy lists to reconfigure */
message_destroy_list(); /* Destroy list of voicemail message templates */
timezone_destroy_list(); /* Destroy list of timezones */
vmaccounts_destroy_list(); /* Destroy list of voicemail accounts */
ast_debug(2, "Destroyed memory objects...\n");
/* First, set some default settings */
global_externnotify[0] = '\0';
global_logfile[0] = '\0';
global_vmmaxmessage = 2000;
global_maxgreet = 2000;
global_vmminmessage = 0;
strcpy(global_mailcmd, SENDMAIL);
global_maxsilence = 0;
global_saydurationminfo = 2;
ast_copy_string(default_vmformat, "wav", sizeof(default_vmformat));
ast_set2_flag((&globalflags), FALSE, MVM_REVIEW);
ast_set2_flag((&globalflags), FALSE, MVM_OPERATOR);
/* Reset statistics */
memset(&global_stats, 0, sizeof(global_stats));
global_stats.reset = ast_tvnow();
global_silencethreshold = ast_dsp_get_threshold_from_settings(THRESHOLD_SILENCE);
/* Make sure we could load configuration file */
if (!cfg) {
ast_log(LOG_WARNING, "Failed to load configuration file. Module activated with default settings.\n");
ast_mutex_unlock(&minivmlock);
return 0;
}
ast_debug(2, "Loaded configuration file, now parsing\n");
/* General settings */
cat = ast_category_browse(cfg, NULL);
while (cat) {
ast_debug(3, "Found configuration section [%s]\n", cat);
if (!strcasecmp(cat, "general")) {
/* Nothing right now */
error += apply_general_options(ast_variable_browse(cfg, cat));
} else if (!strncasecmp(cat, "template-", 9)) {
/* Template */
char *name = cat + 9;
/* Now build and link template to list */
error += message_template_build(name, ast_variable_browse(cfg, cat));
} else {
var = ast_variable_browse(cfg, cat);
if (!strcasecmp(cat, "zonemessages")) {
/* Timezones in this context */
while (var) {
timezone_add(var->name, var->value);
var = var->next;
}
} else {
/* Create mailbox from this */
error += create_vmaccount(cat, var, FALSE);
}
}
/* Find next section in configuration file */
cat = ast_category_browse(cfg, cat);
}
/* Configure the default email template */
message_template_build("email-default", NULL);
template = message_template_find("email-default");
/* Load date format config for voicemail mail */
if ((chanvar = ast_variable_retrieve(cfg, "general", "emaildateformat")))
ast_copy_string(template->dateformat, chanvar, sizeof(template->dateformat));
if ((chanvar = ast_variable_retrieve(cfg, "general", "emailfromstring")))
ast_copy_string(template->fromaddress, chanvar, sizeof(template->fromaddress));
if ((chanvar = ast_variable_retrieve(cfg, "general", "emailaaddress")))
ast_copy_string(template->serveremail, chanvar, sizeof(template->serveremail));
if ((chanvar = ast_variable_retrieve(cfg, "general", "emailcharset")))
ast_copy_string(template->charset, chanvar, sizeof(template->charset));
if ((chanvar = ast_variable_retrieve(cfg, "general", "emailsubject")))
ast_copy_string(template->subject, chanvar, sizeof(template->subject));
if ((chanvar = ast_variable_retrieve(cfg, "general", "emailbody")))
template->body = message_template_parse_emailbody(chanvar);
template->attachment = TRUE;
message_template_build("pager-default", NULL);
template = message_template_find("pager-default");
if ((chanvar = ast_variable_retrieve(cfg, "general", "pagerfromstring")))
ast_copy_string(template->fromaddress, chanvar, sizeof(template->fromaddress));
if ((chanvar = ast_variable_retrieve(cfg, "general", "pageraddress")))
ast_copy_string(template->serveremail, chanvar, sizeof(template->serveremail));
if ((chanvar = ast_variable_retrieve(cfg, "general", "pagercharset")))
ast_copy_string(template->charset, chanvar, sizeof(template->charset));
if ((chanvar = ast_variable_retrieve(cfg, "general", "pagersubject")))
ast_copy_string(template->subject, chanvar,sizeof(template->subject));
if ((chanvar = ast_variable_retrieve(cfg, "general", "pagerbody")))
template->body = message_template_parse_emailbody(chanvar);
template->attachment = FALSE;
if (error)
ast_log(LOG_ERROR, "--- A total of %d errors found in mini-voicemail configuration\n", error);
ast_mutex_unlock(&minivmlock);
ast_config_destroy(cfg);
/* Close log file if it's open and disabled */
if(minivmlogfile)
fclose(minivmlogfile);
/* Open log file if it's enabled */
if(!ast_strlen_zero(global_logfile)) {
minivmlogfile = fopen(global_logfile, "a");
if(!minivmlogfile)
ast_log(LOG_ERROR, "Failed to open minivm log file %s : %s\n", global_logfile, strerror(errno));
if (minivmlogfile)
ast_debug(3, "Opened log file %s \n", global_logfile);
}
return 0;
}
/*! \brief CLI routine for listing templates */
static char *handle_minivm_list_templates(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a)
{
struct minivm_template *this;
#define HVLT_OUTPUT_FORMAT "%-15s %-10s %-10s %-15.15s %-50s\n"
int count = 0;
switch (cmd) {
case CLI_INIT:
e->command = "minivm list templates";
e->usage =
"Usage: minivm list templates\n"
" Lists message templates for e-mail, paging and IM\n";
return NULL;
case CLI_GENERATE:
return NULL;
}
if (a->argc > 3)
return CLI_SHOWUSAGE;
AST_LIST_LOCK(&message_templates);
if (AST_LIST_EMPTY(&message_templates)) {
ast_cli(a->fd, "There are no message templates defined\n");
AST_LIST_UNLOCK(&message_templates);
return CLI_FAILURE;
}
ast_cli(a->fd, HVLT_OUTPUT_FORMAT, "Template name", "Charset", "Locale", "Attach media", "Subject");
ast_cli(a->fd, HVLT_OUTPUT_FORMAT, "-------------", "-------", "------", "------------", "-------");
AST_LIST_TRAVERSE(&message_templates, this, list) {
ast_cli(a->fd, HVLT_OUTPUT_FORMAT, this->name,
this->charset ? this->charset : "-",
this->locale ? this->locale : "-",
this->attachment ? "Yes" : "No",
this->subject ? this->subject : "-");
count++;
}
AST_LIST_UNLOCK(&message_templates);
ast_cli(a->fd, "\n * Total: %d minivoicemail message templates\n", count);
return CLI_SUCCESS;
}
static char *complete_minivm_show_users(const char *line, const char *word, int pos, int state)
{
int which = 0;
int wordlen;
struct minivm_account *vmu;
const char *domain = "";
/* 0 - voicemail; 1 - list; 2 - accounts; 3 - for; 4 - <domain> */
if (pos > 4)
return NULL;
if (pos == 3)
return (state == 0) ? ast_strdup("for") : NULL;
wordlen = strlen(word);
AST_LIST_TRAVERSE(&minivm_accounts, vmu, list) {
if (!strncasecmp(word, vmu->domain, wordlen)) {
if (domain && strcmp(domain, vmu->domain) && ++which > state)
return ast_strdup(vmu->domain);
/* ignore repeated domains ? */
domain = vmu->domain;
}
}
return NULL;
}
/*! \brief CLI command to list voicemail accounts */
static char *handle_minivm_show_users(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a)
{
struct minivm_account *vmu;
#define HMSU_OUTPUT_FORMAT "%-23s %-15s %-15s %-10s %-10s %-50s\n"
int count = 0;
switch (cmd) {
case CLI_INIT:
e->command = "minivm list accounts";
e->usage =
"Usage: minivm list accounts\n"
" Lists all mailboxes currently set up\n";
return NULL;
case CLI_GENERATE:
return complete_minivm_show_users(a->line, a->word, a->pos, a->n);
}
if ((a->argc < 3) || (a->argc > 5) || (a->argc == 4))
return CLI_SHOWUSAGE;
if ((a->argc == 5) && strcmp(a->argv[3],"for"))
return CLI_SHOWUSAGE;
AST_LIST_LOCK(&minivm_accounts);
if (AST_LIST_EMPTY(&minivm_accounts)) {
ast_cli(a->fd, "There are no voicemail users currently defined\n");
AST_LIST_UNLOCK(&minivm_accounts);
return CLI_FAILURE;
}
ast_cli(a->fd, HMSU_OUTPUT_FORMAT, "User", "E-Template", "P-template", "Zone", "Format", "Full name");
ast_cli(a->fd, HMSU_OUTPUT_FORMAT, "----", "----------", "----------", "----", "------", "---------");
AST_LIST_TRAVERSE(&minivm_accounts, vmu, list) {
char tmp[256] = "";
if ((a->argc == 3) || ((a->argc == 5) && !strcmp(a->argv[4], vmu->domain))) {
count++;
snprintf(tmp, sizeof(tmp), "%s@%s", vmu->username, vmu->domain);
ast_cli(a->fd, HMSU_OUTPUT_FORMAT, tmp, vmu->etemplate ? vmu->etemplate : "-",
vmu->ptemplate ? vmu->ptemplate : "-",
vmu->zonetag ? vmu->zonetag : "-",
vmu->attachfmt ? vmu->attachfmt : "-",
vmu->fullname);
}
}
AST_LIST_UNLOCK(&minivm_accounts);
ast_cli(a->fd, "\n * Total: %d minivoicemail accounts\n", count);
return CLI_SUCCESS;
}
/*! \brief Show a list of voicemail zones in the CLI */
static char *handle_minivm_show_zones(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a)
{
struct minivm_zone *zone;
#define HMSZ_OUTPUT_FORMAT "%-15s %-20s %-45s\n"
char *res = CLI_SUCCESS;
switch (cmd) {
case CLI_INIT:
e->command = "minivm list zones";
e->usage =
"Usage: minivm list zones\n"
" Lists zone message formats\n";
return NULL;
case CLI_GENERATE:
return NULL;
}
if (a->argc != e->args)
return CLI_SHOWUSAGE;
AST_LIST_LOCK(&minivm_zones);
if (!AST_LIST_EMPTY(&minivm_zones)) {
ast_cli(a->fd, HMSZ_OUTPUT_FORMAT, "Zone", "Timezone", "Message Format");
ast_cli(a->fd, HMSZ_OUTPUT_FORMAT, "----", "--------", "--------------");
AST_LIST_TRAVERSE(&minivm_zones, zone, list) {
ast_cli(a->fd, HMSZ_OUTPUT_FORMAT, zone->name, zone->timezone, zone->msg_format);
}
} else {
ast_cli(a->fd, "There are no voicemail zones currently defined\n");
res = CLI_FAILURE;
}
AST_LIST_UNLOCK(&minivm_zones);
return res;
}
/*! \brief CLI Show settings */
static char *handle_minivm_show_settings(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a)
{
switch (cmd) {
case CLI_INIT:
e->command = "minivm show settings";
e->usage =
"Usage: minivm show settings\n"
" Display Mini-Voicemail general settings\n";
return NULL;
case CLI_GENERATE:
return NULL;
}
ast_cli(a->fd, "* Mini-Voicemail general settings\n");
ast_cli(a->fd, " -------------------------------\n");
ast_cli(a->fd, "\n");
ast_cli(a->fd, " Mail command (shell): %s\n", global_mailcmd);
ast_cli(a->fd, " Max silence: %d\n", global_maxsilence);
ast_cli(a->fd, " Silence threshold: %d\n", global_silencethreshold);
ast_cli(a->fd, " Max message length (secs): %d\n", global_vmmaxmessage);
ast_cli(a->fd, " Min message length (secs): %d\n", global_vmminmessage);
ast_cli(a->fd, " Default format: %s\n", default_vmformat);
ast_cli(a->fd, " Extern notify (shell): %s\n", global_externnotify);
ast_cli(a->fd, " Logfile: %s\n", global_logfile[0] ? global_logfile : "<disabled>");
ast_cli(a->fd, " Operator exit: %s\n", ast_test_flag(&globalflags, MVM_OPERATOR) ? "Yes" : "No");
ast_cli(a->fd, " Message review: %s\n", ast_test_flag(&globalflags, MVM_REVIEW) ? "Yes" : "No");
ast_cli(a->fd, "\n");
return CLI_SUCCESS;
}
/*! \brief Show stats */
static char *handle_minivm_show_stats(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a)
{
struct ast_tm timebuf;
char buf[BUFSIZ];
switch (cmd) {
case CLI_INIT:
e->command = "minivm show stats";
e->usage =
"Usage: minivm show stats\n"
" Display Mini-Voicemail counters\n";
return NULL;
case CLI_GENERATE:
return NULL;
}
ast_cli(a->fd, "* Mini-Voicemail statistics\n");
ast_cli(a->fd, " -------------------------\n");
ast_cli(a->fd, "\n");
ast_cli(a->fd, " Voicemail accounts: %5d\n", global_stats.voicemailaccounts);
ast_cli(a->fd, " Templates: %5d\n", global_stats.templates);
ast_cli(a->fd, " Timezones: %5d\n", global_stats.timezones);
if (global_stats.receivedmessages == 0) {
ast_cli(a->fd, " Received messages since last reset: <none>\n");
} else {
ast_cli(a->fd, " Received messages since last reset: %d\n", global_stats.receivedmessages);
ast_localtime(&global_stats.lastreceived, &timebuf, NULL);
ast_strftime(buf, sizeof(buf), "%a %b %e %r %Z %Y", &timebuf);
ast_cli(a->fd, " Last received voicemail: %s\n", buf);
}
ast_localtime(&global_stats.reset, &timebuf, NULL);
ast_strftime(buf, sizeof(buf), "%a %b %e %r %Z %Y", &timebuf);
ast_cli(a->fd, " Last reset: %s\n", buf);
ast_cli(a->fd, "\n");
return CLI_SUCCESS;
}
/*! \brief ${MINIVMACCOUNT()} Dialplan function - reads account data */
static int minivm_account_func_read(struct ast_channel *chan, const char *cmd, char *data, char *buf, size_t len)
{
struct minivm_account *vmu;
char *username, *domain, *colname;
username = ast_strdupa(data);
if ((colname = strchr(username, ':'))) {
*colname = '\0';
colname++;
} else {
colname = "path";
}
if ((domain = strchr(username, '@'))) {
*domain = '\0';
domain++;
}
if (ast_strlen_zero(username) || ast_strlen_zero(domain)) {
ast_log(LOG_ERROR, "This function needs a username and a domain: username@domain\n");
return 0;
}
if (!(vmu = find_account(domain, username, TRUE)))
return 0;
if (!strcasecmp(colname, "hasaccount")) {
ast_copy_string(buf, (ast_test_flag(vmu, MVM_ALLOCED) ? "0" : "1"), len);
} else if (!strcasecmp(colname, "fullname")) {
ast_copy_string(buf, vmu->fullname, len);
} else if (!strcasecmp(colname, "email")) {
if (!ast_strlen_zero(vmu->email))
ast_copy_string(buf, vmu->email, len);
else
snprintf(buf, len, "%s@%s", vmu->username, vmu->domain);
} else if (!strcasecmp(colname, "pager")) {
ast_copy_string(buf, vmu->pager, len);
} else if (!strcasecmp(colname, "etemplate")) {
if (!ast_strlen_zero(vmu->etemplate))
ast_copy_string(buf, vmu->etemplate, len);
else
ast_copy_string(buf, "email-default", len);
} else if (!strcasecmp(colname, "language")) {
ast_copy_string(buf, vmu->language, len);
} else if (!strcasecmp(colname, "timezone")) {
ast_copy_string(buf, vmu->zonetag, len);
} else if (!strcasecmp(colname, "ptemplate")) {
if (!ast_strlen_zero(vmu->ptemplate))
ast_copy_string(buf, vmu->ptemplate, len);
else
ast_copy_string(buf, "email-default", len);
} else if (!strcasecmp(colname, "accountcode")) {
ast_copy_string(buf, vmu->accountcode, len);
} else if (!strcasecmp(colname, "pincode")) {
ast_copy_string(buf, vmu->pincode, len);
} else if (!strcasecmp(colname, "path")) {
check_dirpath(buf, len, vmu->domain, vmu->username, NULL);
} else { /* Look in channel variables */
struct ast_variable *var;
for (var = vmu->chanvars ; var ; var = var->next)
if (!strcmp(var->name, colname)) {
ast_copy_string(buf, var->value, len);
break;
}
}
if(ast_test_flag(vmu, MVM_ALLOCED))
free_user(vmu);
return 0;
}
/*! \brief lock directory
only return failure if ast_lock_path returns 'timeout',
not if the path does not exist or any other reason
*/
static int vm_lock_path(const char *path)
{
switch (ast_lock_path(path)) {
case AST_LOCK_TIMEOUT:
return -1;
default:
return 0;
}
}
/*! \brief Access counter file, lock directory, read and possibly write it again changed
\param directory Directory to crate file in
\param countername filename
\param value If set to zero, we only read the variable
\param operand 0 to read, 1 to set new value, 2 to change
\return -1 on error, otherwise counter value
*/
static int access_counter_file(char *directory, char *countername, int value, int operand)
{
char filename[BUFSIZ];
char readbuf[BUFSIZ];
FILE *counterfile;
int old = 0, counter = 0;
/* Lock directory */
if (vm_lock_path(directory)) {
return -1; /* Could not lock directory */
}
snprintf(filename, sizeof(filename), "%s/%s.counter", directory, countername);
if (operand != 1) {
counterfile = fopen(filename, "r");
if (counterfile) {
if(fgets(readbuf, sizeof(readbuf), counterfile)) {
ast_debug(3, "Read this string from counter file: %s\n", readbuf);
old = counter = atoi(readbuf);
}
fclose(counterfile);
}
}
switch (operand) {
case 0: /* Read only */
ast_unlock_path(directory);
ast_debug(2, "MINIVM Counter %s/%s: Value %d\n", directory, countername, counter);
return counter;
break;
case 1: /* Set new value */
counter = value;
break;
case 2: /* Change value */
counter += value;
if (counter < 0) /* Don't allow counters to fall below zero */
counter = 0;
break;
}
/* Now, write the new value to the file */
counterfile = fopen(filename, "w");
if (!counterfile) {
ast_log(LOG_ERROR, "Could not open counter file for writing : %s - %s\n", filename, strerror(errno));
ast_unlock_path(directory);
return -1; /* Could not open file for writing */
}
fprintf(counterfile, "%d\n\n", counter);
fclose(counterfile);
ast_unlock_path(directory);
ast_debug(2, "MINIVM Counter %s/%s: Old value %d New value %d\n", directory, countername, old, counter);
return counter;
}
/*! \brief ${MINIVMCOUNTER()} Dialplan function - read counters */
static int minivm_counter_func_read(struct ast_channel *chan, const char *cmd, char *data, char *buf, size_t len)
{
char *username, *domain, *countername;
struct minivm_account *vmu = NULL;
char userpath[BUFSIZ];
int res;
*buf = '\0';
username = ast_strdupa(data);
if ((countername = strchr(username, ':'))) {
*countername = '\0';
countername++;
}
if ((domain = strchr(username, '@'))) {
*domain = '\0';
domain++;
}
/* If we have neither username nor domain now, let's give up */
if (ast_strlen_zero(username) && ast_strlen_zero(domain)) {
ast_log(LOG_ERROR, "No account given\n");
return -1;
}
if (ast_strlen_zero(countername)) {
ast_log(LOG_ERROR, "This function needs two arguments: Account:countername\n");
return -1;
}
/* We only have a domain, no username */
if (!ast_strlen_zero(username) && ast_strlen_zero(domain)) {
domain = username;
username = NULL;
}
/* If we can't find account or if the account is temporary, return. */
if (!ast_strlen_zero(username) && !(vmu = find_account(domain, username, FALSE))) {
ast_log(LOG_ERROR, "Minivm account does not exist: %s@%s\n", username, domain);
return 0;
}
create_dirpath(userpath, sizeof(userpath), domain, username, NULL);
/* We have the path, now read the counter file */
res = access_counter_file(userpath, countername, 0, 0);
if (res >= 0)
snprintf(buf, len, "%d", res);
return 0;
}
/*! \brief ${MINIVMCOUNTER()} Dialplan function - changes counter data */
static int minivm_counter_func_write(struct ast_channel *chan, const char *cmd, char *data, const char *value)
{
char *username, *domain, *countername, *operand;
char userpath[BUFSIZ];
struct minivm_account *vmu;
int change = 0;
int operation = 0;
if(!value)
return -1;
change = atoi(value);
username = ast_strdupa(data);
if ((countername = strchr(username, ':'))) {
*countername = '\0';
countername++;
}
if ((operand = strchr(countername, ':'))) {
*operand = '\0';
operand++;
}
if ((domain = strchr(username, '@'))) {
*domain = '\0';
domain++;
}
/* If we have neither username nor domain now, let's give up */
if (ast_strlen_zero(username) && ast_strlen_zero(domain)) {
ast_log(LOG_ERROR, "No account given\n");
return -1;
}
/* We only have a domain, no username */
if (!ast_strlen_zero(username) && ast_strlen_zero(domain)) {
domain = username;
username = NULL;
}
if (ast_strlen_zero(operand) || ast_strlen_zero(countername)) {
ast_log(LOG_ERROR, "Writing to this function requires three arguments: Account:countername:operand\n");
return -1;
}
/* If we can't find account or if the account is temporary, return. */
if (!ast_strlen_zero(username) && !(vmu = find_account(domain, username, FALSE))) {
ast_log(LOG_ERROR, "Minivm account does not exist: %s@%s\n", username, domain);
return 0;
}
create_dirpath(userpath, sizeof(userpath), domain, username, NULL);
/* Now, find out our operator */
if (*operand == 'i') /* Increment */
operation = 2;
else if (*operand == 'd') {
change = change * -1;
operation = 2;
} else if (*operand == 's')
operation = 1;
else {
ast_log(LOG_ERROR, "Unknown operator: %s\n", operand);
return -1;
}
/* We have the path, now read the counter file */
access_counter_file(userpath, countername, change, operation);
return 0;
}
/*! \brief CLI commands for Mini-voicemail */
static struct ast_cli_entry cli_minivm[] = {
AST_CLI_DEFINE(handle_minivm_show_users, "List defined mini-voicemail boxes"),
AST_CLI_DEFINE(handle_minivm_show_zones, "List zone message formats"),
AST_CLI_DEFINE(handle_minivm_list_templates, "List message templates"),
AST_CLI_DEFINE(handle_minivm_reload, "Reload Mini-voicemail configuration"),
AST_CLI_DEFINE(handle_minivm_show_stats, "Show some mini-voicemail statistics"),
AST_CLI_DEFINE(handle_minivm_show_settings, "Show mini-voicemail general settings"),
};
static struct ast_custom_function minivm_counter_function = {
.name = "MINIVMCOUNTER",
.read = minivm_counter_func_read,
.write = minivm_counter_func_write,
};
static struct ast_custom_function minivm_account_function = {
.name = "MINIVMACCOUNT",
.read = minivm_account_func_read,
};
/*! \brief Load mini voicemail module */
static int load_module(void)
{
int res;
res = ast_register_application_xml(app_minivm_record, minivm_record_exec);
res = ast_register_application_xml(app_minivm_greet, minivm_greet_exec);
res = ast_register_application_xml(app_minivm_notify, minivm_notify_exec);
res = ast_register_application_xml(app_minivm_delete, minivm_delete_exec);
res = ast_register_application_xml(app_minivm_accmess, minivm_accmess_exec);
res = ast_register_application_xml(app_minivm_mwi, minivm_mwi_exec);
ast_custom_function_register(&minivm_account_function);
ast_custom_function_register(&minivm_counter_function);
if (res)
return(res);
if ((res = load_config(0)))
return(res);
ast_cli_register_multiple(cli_minivm, ARRAY_LEN(cli_minivm));
/* compute the location of the voicemail spool directory */
snprintf(MVM_SPOOL_DIR, sizeof(MVM_SPOOL_DIR), "%s/voicemail/", ast_config_AST_SPOOL_DIR);
return res;
}
/*! \brief Reload mini voicemail module */
static int reload(void)
{
return(load_config(1));
}
/*! \brief Reload cofiguration */
static char *handle_minivm_reload(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a)
{
switch (cmd) {
case CLI_INIT:
e->command = "minivm reload";
e->usage =
"Usage: minivm reload\n"
" Reload mini-voicemail configuration and reset statistics\n";
return NULL;
case CLI_GENERATE:
return NULL;
}
reload();
ast_cli(a->fd, "\n-- Mini voicemail re-configured \n");
return CLI_SUCCESS;
}
/*! \brief Unload mini voicemail module */
static int unload_module(void)
{
int res;
res = ast_unregister_application(app_minivm_record);
res |= ast_unregister_application(app_minivm_greet);
res |= ast_unregister_application(app_minivm_notify);
res |= ast_unregister_application(app_minivm_delete);
res |= ast_unregister_application(app_minivm_accmess);
res |= ast_unregister_application(app_minivm_mwi);
ast_cli_unregister_multiple(cli_minivm, ARRAY_LEN(cli_minivm));
ast_custom_function_unregister(&minivm_account_function);
ast_custom_function_unregister(&minivm_counter_function);
message_destroy_list(); /* Destroy list of voicemail message templates */
timezone_destroy_list(); /* Destroy list of timezones */
vmaccounts_destroy_list(); /* Destroy list of voicemail accounts */
return res;
}
AST_MODULE_INFO(ASTERISK_GPL_KEY, AST_MODFLAG_DEFAULT, "Mini VoiceMail (A minimal Voicemail e-mail System)",
.load = load_module,
.unload = unload_module,
.reload = reload,
);
| gpl-2.0 |
aliozel/swak4Foam | Libraries/simpleSearchableSurfaces/include/swakSurfaceTypes.H | 2158 | /*---------------------------------------------------------------------------*\
## #### ###### |
## ## ## | Copyright: ICE Stroemungsfoschungs GmbH
## ## #### |
## ## ## | http://www.ice-sf.at
## #### ###### |
-------------------------------------------------------------------------------
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 1991-2008 OpenCFD Ltd.
\\/ M anipulation |
-------------------------------------------------------------------------------
License
This file is based on OpenFOAM.
OpenFOAM is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM; if not, write to the Free Software Foundation,
Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Class
Description
Define types for searchableSurfaces that changed
SourceFiles
Contributors/Copyright:
2014 Bernhard F.W. Gschaider <[email protected]>
SWAK Revision: $Id$
\*---------------------------------------------------------------------------*/
#ifndef SwakSurfaceTypesMacroHeader_H
#define SwakSurfaceTypesMacroHeader_H
#include "swak.H"
#ifdef FOAM_VOLUMETYPE_IS_TYPE
#define INSIDE volumeType::INSIDE
#define OUTSIDE volumeType::OUTSIDE
#define UNKNOWN volumeType::UNKNOWN
#define MIXED volumeType::MIXED
#endif
#if FOAM_VERSION4SWAK > VERSION_NR2(2,2) && !defined(FOAM_DEV)
#define FOAM_SEARCHABLE_SURF_NEEDS_BOUNDING_SPHERES
#endif
#endif
// ************************************************************************* //
| gpl-2.0 |
systers/vms | vms/volunteer/templates/volunteer/500.html | 538 | {% extends "base.html" %}
{% block title %} - Server Error{% endblock %}
{% block content %}
<div class="jumbotron jumbotron-custom">
<h1>500</h1>
<h1>Server Error/Internal Server Error</h1>
<p>An unexpected error seems to have occured. Try refreshing the page or return to the previous page.</p>
<p>
<a class="btn btn-lg btn-success" href="#" role="button" style="margin-top: 80px" onclick="history.go(-1); return false;">Back to previous page</a>
</p>
</div>
<div class="mt40"></div>
{% endblock %}
| gpl-2.0 |
sin3fu3/inkjet-conveni | data/class/pages/regist/LC_Page_Regist_Complete.php | 1863 | <?php
/*
* This file is part of EC-CUBE
*
* Copyright(c) 2000-2011 LOCKON CO.,LTD. All Rights Reserved.
*
* http://www.lockon.co.jp/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
// {{{ requires
require_once CLASS_EX_REALDIR . 'page_extends/LC_Page_Ex.php';
/**
* 会員登録完了のページクラス.
*
* @package Page
* @author LOCKON CO.,LTD.
* @version $Id:LC_Page_Regist_Complete.php 15532 2007-08-31 14:39:46Z nanasess $
*/
class LC_Page_Regist_Complete extends LC_Page_Ex {
// }}}
// {{{ functions
/**
* Page を初期化する.
*
* @return void
*/
function init() {
parent::init();
$this->tpl_title = '会員登録(完了ページ)';
$this->tpl_conv_page = AFF_ENTRY_COMPLETE;
}
/**
* Page のプロセス.
*
* @return void
*/
function process() {
parent::process();
$this->action();
$this->sendResponse();
}
/**
* Page のAction.
*
* @return void
*/
function action() {
}
/**
* デストラクタ.
*
* @return void
*/
function destroy() {
parent::destroy();
}
}
?>
| gpl-2.0 |
erik132/MolekelCUDA | deps/lib/vtk-5.2_devenv/CMake/vtkWrapJava.cmake | 6287 | #
# a cmake implementation of the Wrap Java command
#
MACRO(VTK_WRAP_JAVA2 TARGET SOURCE_LIST_NAME)
# convert to the WRAP3 signature
VTK_WRAP_JAVA3(${TARGET} ${SOURCE_LIST_NAME} "${ARGN}")
ENDMACRO(VTK_WRAP_JAVA2)
MACRO(VTK_WRAP_JAVA3 TARGET SRC_LIST_NAME SOURCES)
IF(NOT VTK_PARSE_JAVA_EXE)
MESSAGE(SEND_ERROR "VTK_PARSE_JAVA_EXE not specified when calling VTK_WRAP_JAVA3")
ENDIF(NOT VTK_PARSE_JAVA_EXE)
IF(NOT VTK_WRAP_JAVA_EXE)
MESSAGE(SEND_ERROR "VTK_WRAP_JAVA_EXE not specified when calling VTK_WRAP_JAVA3")
ENDIF(NOT VTK_WRAP_JAVA_EXE)
# Initialize the custom target counter.
IF(VTK_WRAP_JAVA_NEED_CUSTOM_TARGETS)
SET(VTK_WRAP_JAVA_CUSTOM_COUNT "")
SET(VTK_WRAP_JAVA_CUSTOM_NAME ${TARGET})
SET(VTK_WRAP_JAVA_CUSTOM_LIST)
ENDIF(VTK_WRAP_JAVA_NEED_CUSTOM_TARGETS)
SET(VTK_JAVA_DEPENDENCIES)
SET(VTK_JAVA_DEPENDENCIES_FILE)
# For each class
FOREACH(FILE ${SOURCES})
# should we wrap the file?
GET_SOURCE_FILE_PROPERTY(TMP_WRAP_EXCLUDE ${FILE} WRAP_EXCLUDE)
# if we should wrap it
IF (NOT TMP_WRAP_EXCLUDE)
# what is the filename without the extension
GET_FILENAME_COMPONENT(TMP_FILENAME ${FILE} NAME_WE)
# the input file might be full path so handle that
GET_FILENAME_COMPONENT(TMP_FILEPATH ${FILE} PATH)
# compute the input filename
IF (TMP_FILEPATH)
SET(TMP_INPUT ${TMP_FILEPATH}/${TMP_FILENAME}.h)
ELSE (TMP_FILEPATH)
SET(TMP_INPUT ${CMAKE_CURRENT_SOURCE_DIR}/${TMP_FILENAME}.h)
ENDIF (TMP_FILEPATH)
# is it abstract?
GET_SOURCE_FILE_PROPERTY(TMP_ABSTRACT ${FILE} ABSTRACT)
IF (TMP_ABSTRACT)
SET(TMP_CONCRETE 0)
ELSE (TMP_ABSTRACT)
SET(TMP_CONCRETE 1)
ENDIF (TMP_ABSTRACT)
# new source file is nameJava.cxx, add to resulting list
SET(${SRC_LIST_NAME} ${${SRC_LIST_NAME}}
${TMP_FILENAME}Java.cxx)
# add custom command to output
ADD_CUSTOM_COMMAND(
OUTPUT ${VTK_JAVA_HOME}/${TMP_FILENAME}.java
DEPENDS ${VTK_PARSE_JAVA_EXE} ${VTK_WRAP_HINTS} ${TMP_INPUT}
COMMAND ${VTK_PARSE_JAVA_EXE}
ARGS ${TMP_INPUT} ${VTK_WRAP_HINTS} ${TMP_CONCRETE}
${VTK_JAVA_HOME}/${TMP_FILENAME}.java
COMMENT "Java Wrappings - generating ${TMP_FILENAME}.java"
)
# add custom command to output
ADD_CUSTOM_COMMAND(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${TMP_FILENAME}Java.cxx
DEPENDS ${VTK_WRAP_JAVA_EXE} ${VTK_WRAP_HINTS} ${TMP_INPUT}
COMMAND ${VTK_WRAP_JAVA_EXE}
ARGS ${TMP_INPUT} ${VTK_WRAP_HINTS} ${TMP_CONCRETE}
${CMAKE_CURRENT_BINARY_DIR}/${TMP_FILENAME}Java.cxx
COMMENT "Java Wrappings - generating ${TMP_FILENAME}Java.cxx"
)
SET(VTK_JAVA_DEPENDENCIES ${VTK_JAVA_DEPENDENCIES} "${VTK_JAVA_HOME}/${TMP_FILENAME}.java")
SET(VTK_JAVA_DEPENDENCIES_FILE
"${VTK_JAVA_DEPENDENCIES_FILE}\n \"${VTK_JAVA_HOME}/${TMP_FILENAME}.java\"")
# Add this output to a custom target if needed.
IF(VTK_WRAP_JAVA_NEED_CUSTOM_TARGETS)
SET(VTK_WRAP_JAVA_CUSTOM_LIST ${VTK_WRAP_JAVA_CUSTOM_LIST}
${CMAKE_CURRENT_BINARY_DIR}/${TMP_FILENAME}Java.cxx
)
SET(VTK_WRAP_JAVA_CUSTOM_COUNT ${VTK_WRAP_JAVA_CUSTOM_COUNT}x)
IF(VTK_WRAP_JAVA_CUSTOM_COUNT MATCHES "^${VTK_WRAP_JAVA_CUSTOM_LIMIT}$")
SET(VTK_WRAP_JAVA_CUSTOM_NAME ${VTK_WRAP_JAVA_CUSTOM_NAME}Hack)
ADD_CUSTOM_TARGET(${VTK_WRAP_JAVA_CUSTOM_NAME} DEPENDS ${VTK_WRAP_JAVA_CUSTOM_LIST})
SET(KIT_JAVA_DEPS ${VTK_WRAP_JAVA_CUSTOM_NAME})
SET(VTK_WRAP_JAVA_CUSTOM_LIST)
SET(VTK_WRAP_JAVA_CUSTOM_COUNT)
ENDIF(VTK_WRAP_JAVA_CUSTOM_COUNT MATCHES "^${VTK_WRAP_JAVA_CUSTOM_LIMIT}$")
ENDIF(VTK_WRAP_JAVA_NEED_CUSTOM_TARGETS)
ENDIF (NOT TMP_WRAP_EXCLUDE)
ENDFOREACH(FILE)
ADD_CUSTOM_TARGET("${TARGET}JavaClasses" ALL DEPENDS ${VTK_JAVA_DEPENDENCIES})
SET(dir ${CMAKE_CURRENT_SOURCE_DIR})
IF(VTK_WRAP_JAVA3_INIT_DIR)
SET(dir ${VTK_WRAP_JAVA3_INIT_DIR})
ENDIF(VTK_WRAP_JAVA3_INIT_DIR)
CONFIGURE_FILE("${dir}/JavaDependencies.cmake.in"
"${CMAKE_CURRENT_BINARY_DIR}/JavaDependencies.cmake" IMMEDIATE @ONLY)
ENDMACRO(VTK_WRAP_JAVA3)
# VS 6 does not like needing to run a huge number of custom commands
# when building a single target. Generate some extra custom targets
# that run the custom commands before the main target is built. This
# is a hack to work-around the limitation. The test to enable it is
# done here since it does not need to be done for every macro
# invocation.
IF(CMAKE_GENERATOR MATCHES "^Visual Studio 6$")
SET(VTK_WRAP_JAVA_NEED_CUSTOM_TARGETS 1)
SET(VTK_WRAP_JAVA_CUSTOM_LIMIT x)
# Limit the number of custom commands in each target
# to 2^7.
FOREACH(t 1 2 3 4 5 6 7)
SET(VTK_WRAP_JAVA_CUSTOM_LIMIT
${VTK_WRAP_JAVA_CUSTOM_LIMIT}${VTK_WRAP_JAVA_CUSTOM_LIMIT})
ENDFOREACH(t)
ENDIF(CMAKE_GENERATOR MATCHES "^Visual Studio 6$")
MACRO(VTK_GENERATE_JAVA_DEPENDENCIES TARGET)
SET (javaPath "${VTK_BINARY_DIR}/java")
IF (USER_JAVA_CLASSPATH)
SET (javaPath "${USER_JAVA_PATH};${VTK_BINARY_DIR}/java")
ENDIF (USER_JAVA_CLASSPATH)
SET (OUT_TEXT)
SET (classes)
# get the classes for this lib
FOREACH(srcName ${ARGN})
# what is the filename without the extension
GET_FILENAME_COMPONENT(srcNameWe ${srcName} NAME_WE)
# the input file might be full path so handle that
GET_FILENAME_COMPONENT(srcPath ${srcName} PATH)
SET(className "${srcPath}/${srcNameWe}.class")
SET (OUT_TEXT ${OUT_TEXT} "\n dummy = new ${srcNameWe}()")
# On Unix we can just call javac ... *.java
ADD_CUSTOM_COMMAND(
OUTPUT "${className}"
COMMAND "${JAVA_COMPILE}" ARGS -classpath "${javaPath}" "${srcPath}/vtk${TARGET}Driver.java"
DEPENDS "${srcName}"
)
SET (classes ${classes} ${className})
ENDFOREACH(srcName)
ADD_CUSTOM_COMMAND(TARGET ${TARGET} SOURCE ${TARGET} DEPENDS ${classes})
SET (TARGET_NAME ${TARGET})
CONFIGURE_FILE(
${VTK_CMAKE_DIR}/vtkJavaDriver.java.in
"${VTK_BINARY_DIR}/java/vtk/vtk${TARGET}Driver.java"
COPY_ONLY
IMMEDIATE
)
ENDMACRO(VTK_GENERATE_JAVA_DEPENDENCIES)
| gpl-2.0 |
mhndev/shopping | app/models/Auth.php | 70 | <?php
class Auth extends Eloquent{
protected $table = 'auth';
}
| gpl-2.0 |
rfdrake/opennms | integrations/opennms-link-provisioning-adapter/src/main/java/org/opennms/netmgt/provision/adapters/link/DefaultLinkMatchResolverImpl.java | 2417 | /*******************************************************************************
* This file is part of OpenNMS(R).
*
* Copyright (C) 2009-2012 The OpenNMS Group, Inc.
* OpenNMS(R) is Copyright (C) 1999-2012 The OpenNMS Group, Inc.
*
* OpenNMS(R) is a registered trademark of The OpenNMS Group, Inc.
*
* OpenNMS(R) is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundation, either version 3 of the License,
* or (at your option) any later version.
*
* OpenNMS(R) is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with OpenNMS(R). If not, see:
* http://www.gnu.org/licenses/
*
* For more information contact:
* OpenNMS(R) Licensing <[email protected]>
* http://www.opennms.org/
* http://www.opennms.com/
*******************************************************************************/
package org.opennms.netmgt.provision.adapters.link;
import org.opennms.core.utils.BeanUtils;
import org.opennms.netmgt.provision.adapters.link.config.dao.DefaultLinkAdapterConfigurationDao;
import org.opennms.netmgt.provision.adapters.link.config.linkadapter.LinkPattern;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
/**
* <p>DefaultLinkMatchResolverImpl class.</p>
*
* @author ranger
* @version $Id: $
*/
public class DefaultLinkMatchResolverImpl implements LinkMatchResolver, InitializingBean {
@Autowired
private DefaultLinkAdapterConfigurationDao m_configDao;
@Override
public void afterPropertiesSet() throws Exception {
BeanUtils.assertAutowiring(this);
}
/** {@inheritDoc} */
@Override
public String getAssociatedEndPoint(String endPoint) {
if (m_configDao != null) {
for (LinkPattern p : m_configDao.getPatterns()) {
String endPointResolvedTemplate = p.resolveTemplate(endPoint);
if (endPointResolvedTemplate != null) {
return endPointResolvedTemplate;
}
}
}
return null;
}
}
| gpl-2.0 |
proudcity/proud-drops | profiles/proud/modules/contrib/socialfield_widgets/README.md | 181 | Social Field Widgets
====================
Adds additional field formatters to [Social Field](https://www.drupal.org/project/socialfield) displays:
* Facebook Likebox
* Twitter Feed | gpl-2.0 |
rydnr/queryj-rt | queryj-templates-deprecated/src/main/java/org/acmsl/queryj/templates/dao/DataAccessManagerTemplateGenerator.java | 2309 | /*
QueryJ
Copyright (C) 2002-today Jose San Leandro Armendariz
[email protected]
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 2 of the License, or any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Thanks to ACM S.L. for distributing this library under the GPL license.
Contact info: [email protected]
******************************************************************************
*
* Filename: DataAccessManagerTemplateGenerator.java
*
* Author: Jose San Leandro Armendariz
*
* Description: Is able to generate DataAccessManager implementations
* according to database metadata.
*
*/
package org.acmsl.queryj.templates.dao;
/*
* Importing some project-specific classes.
*/
import org.acmsl.queryj.api.AbstractTemplateGenerator;
import org.acmsl.queryj.api.PerRepositoryTemplateGenerator;
/*
* Importing checkthread.org annotations.
*/
import org.checkthread.annotations.ThreadSafe;
/**
* Is able to generate DataAccessManager implementations according
* to database metadata.
* @author <a href="mailto:[email protected]">Jose San Leandro Armendariz</a>
*/
@ThreadSafe
public class DataAccessManagerTemplateGenerator
extends AbstractTemplateGenerator<DataAccessManagerTemplate>
implements PerRepositoryTemplateGenerator<DataAccessManagerTemplate>
{
/**
* Creates a new {@link DataAccessContextLocalTemplateGenerator} with given settings.
* @param caching whether to enable caching.
* @param threadCount the number of threads to use.
*/
public DataAccessManagerTemplateGenerator(final boolean caching, final int threadCount)
{
super(caching, threadCount);
}
}
| gpl-2.0 |
bsmitty83/kernel_omap | drivers/power/bq2419x_charger.c | 62219 | /*
* drivers/power/bq2419x_charger_.c
* BQ24195 battery charging driver
* Copyright (C) 2012 Texas Instruments, Inc.
* Author: Texas Instruments, Inc.
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#define DEBUG
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/i2c/twl.h>
#include <linux/usb/otg.h>
#include <mach/gpio.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/power_supply.h>
#include <linux/i2c/bq2419x.h>
#include <plat/dmtimer.h>
#include <linux/clk.h>
#define BQ2419x_WDT_TIMEOUT WatchDog_160s
#define BQ2419x_BOOSTBACK_THRESHOLD_LO 3200
#define BQ2419x_BOOSTBACK_THRESHOLD_HI 3500
#define BQ2419x_LOW_BATTEMP_ICHR_LIMIT ((8*64)+500) /* 1012mA */
/* NOTE: When REGN is on (such as: a normal charging mode), the WDT timer has +/-15% clock variation.
* When REGN is off (such as: a HIZ mode), the WDT timer has +/-30% clock variation.
*/
#define BQ2419x_OMAP_DMTIMER_INTERVAL 110
static enum power_supply_property bq2419x_usb_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_CURRENT_AVG,
};
static enum power_supply_property bq2419x_wall_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_CURRENT_AVG,
};
static u8 global_charge_state;
static struct omap_dm_timer *wdt_timer_ptr;
static bool threshold_voltage_checked = true;
static unsigned long time_previous;
static bool stopdmtimershutdown = false;
static bool first_active = true;
extern bool bq27x00_is_battery_present(void);
extern int bq27x00_get_battery_temperature(void);
bool bq2419x_is_charging_done(void);
static int bqEnDPDM(struct bq2419x_device_info *di, int enable);
/*
* Return reuired battery property or error.
*/
static int bq2419x_usb_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct bq2419x_device_info *di = container_of(psy,
struct bq2419x_device_info,
usb);
int ret = -EINVAL;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = (USB_EVENT_VBUS == di->power_type);
ret = 0;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
val->intval = USB_CURRENT_LIMIT_HIGH;
ret = 0;
break;
default:
return -EINVAL;
}
return ret;
}
/*
* Return reuired battery property or error.
*/
static int bq2419x_wall_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct bq2419x_device_info *di = container_of(psy,
struct bq2419x_device_info,
wall);
int ret = -EINVAL;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = (USB_EVENT_CHARGER == di->power_type);
ret = 0;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
val->intval = AC_CURRENT_LIMIT;
ret = 0;
break;
default:
return -EINVAL;
}
return ret;
}
/* HOW-NOT: USB event comes before we register in the notifiers,
* so we do not know that there is SDP/DCP connected on boot, so
* we get it manually on probe. */
int twl6030_usbotg_get_status(void);
/* i2c read/write util functions */
static int bq2419x_write_block(struct bq2419x_device_info *di, u8 *value,
u8 reg, unsigned num_bytes)
{
struct i2c_msg msg[1];
int ret;
*value = reg;
msg[0].addr = di->client->addr;
msg[0].flags = 0;
msg[0].buf = value;
msg[0].len = num_bytes + 1;
ret = i2c_transfer(di->client->adapter, msg, 1);
/* i2c_transfer returns number of messages transferred */
if (ret != 1) {
dev_err(di->dev,
"i2c_write failed to transfer all messages\n");
if (ret < 0)
return ret;
else
return -EIO;
} else {
return 0;
}
}
static int bq2419x_read_block(struct bq2419x_device_info *di, u8 *value,
u8 reg, unsigned num_bytes)
{
struct i2c_msg msg[2];
u8 buf;
int ret;
buf = reg;
msg[0].addr = di->client->addr;
msg[0].flags = 0;
msg[0].buf = &buf;
msg[0].len = 1;
msg[1].addr = di->client->addr;
msg[1].flags = I2C_M_RD;
msg[1].buf = value;
msg[1].len = num_bytes;
ret = i2c_transfer(di->client->adapter, msg, 2);
/* i2c_transfer returns number of messages transferred */
if (ret != 2) {
dev_err(di->dev,
"i2c_write failed to transfer all messages\n");
if (ret < 0)
return ret;
else
return -EIO;
} else {
return 0;
}
}
static int bq2419x_write_byte(struct bq2419x_device_info *di, u8 value, u8 reg)
{
/* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */
u8 temp_buffer[2] = { 0 };
/* offset 1 contains the data */
temp_buffer[1] = value;
return bq2419x_write_block(di, temp_buffer, reg, 1);
}
static int bq2419x_read_byte(struct bq2419x_device_info *di, u8 *value, u8 reg)
{
return bq2419x_read_block(di, value, reg, 1);
}
/* end i2c read/write util functions */
bool bq2419x_is_charging_done(void)
{
return (3 == global_charge_state);
}
EXPORT_SYMBOL(bq2419x_is_charging_done);
/* Start of Functions Related to 19x Functionality */
//////////////////////////////////////////
////------------------------------------------//
//// REG 0 //
////------------------------------------------//
////////////////////////////////////////////////
/*************************************************************
* bqEnHIZ:Disable or enable the high impedance mode
* Accepted Inputs: ENABLE, DISABLE
*
* Returns:
* -EINVAL(22): Invalid Input
* 0: I2C Write Success
* <0: I2C Write Fail
*
* Can be modified to send Ack bit as the success code
*
**************************************************************/
static int bqEnHIZ(struct bq2419x_device_info *di, int enable)
{
int success;
int regbits;
unsigned int Reg00Val;
u8 data;
int rc = -EINVAL;
if ((enable != ENABLE) && (enable != DISABLE)) {
rc = -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg00Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
rc = success;
} else {
Reg00Val = data;
regbits = enable << ENHIZ_LSHIFT;
Reg00Val &= ENHIZ_MASK;
Reg00Val |= regbits;
data = Reg00Val;
success = bq2419x_write_byte(di, data, Reg00Address);
rc = success;
}
}
/* When write any command to REG00, Bit 0 and Bit1 will be reset to 0.
Only valid input current limit settings through I2C are 100mA and 1.2A.
Write REG07 bit 7 DPDM_EN=1 after REG00 write in order to recover to default
input current limit setting (such as 500mA or 3A). */
if (BQ24196_REV_1_3 == di->bqchip_version)
bqEnDPDM(di, ENABLE);
return rc;
}
#if 0
static int bqSetVINDPM(struct bq2419x_device_info *di, int vdpm)
{
///*************************************************************
//* bqSetVINDPM:
//*
//* Accepted Inputs: VINDPM_MIN >= vreg <= VINDPM_MAX
//*
//* Returns:
//* -1: Invalid Setting
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//* NOTE: Accepted values are determine by VINDPM_MIN,
//* VINDPM_MAX variables defined in .h file. If invalid voltage*
//* is detected regulation voltage will be kept as it is.
//**************************************************************
int code = 0;
int vregbits = 0;
int success;
unsigned int Reg00Val;
u8 data;
if ((vdpm < VINDPM_MIN) || (vdpm > VINDPM_MAX)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg00Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg00Val = data;
code = ((vdpm - VINDPM_MIN)/VINDPM_STEP);
vregbits = code << VINDPM_LSHIFT;
Reg00Val &= VINDPM_MASK;
Reg00Val |= vregbits;
data = Reg00Val;
success = bq2419x_write_byte(di, data, Reg00Address);
return success;
}
}
#endif
static int bqSetIINDPM(struct bq2419x_device_info *di, int code)
{
///*************************************************************
//* bqSetIINDPM: Changes input current limit, actual current
//* is the lesser of the I2C and ILIM settings
//*
//* Accepted Inputs: IINLIM_100MA, IINLIM_150MA, IINLIM_500MA
//* IINLIM_900MA, IINLIM_1200MA, IINLIM_1500MA, IINLIM_2000MA,
//* IINLIM_3000MA
//*
//* Returns:
//* -1: Invalid Input
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg00Val;
u8 data;
if ((code != IINLIM_100MA) && (code != IINLIM_150MA) &&
(code != IINLIM_500MA) && (code != IINLIM_900MA) &&
(code != IINLIM_1200MA) && (code != IINLIM_1500MA) &&
(code != IINLIM_2000MA) && (code != IINLIM_3000MA)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg00Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg00Val = data;
regbits = code << IINDPM_LSHIFT;
Reg00Val &= IINDPM_MASK;
Reg00Val |= regbits;
data = Reg00Val;
success = bq2419x_write_byte(di, data, Reg00Address);
return success;
}
}
////////////////////////////////////////////////
////------------------------------------------//
//// REG 1 //
////------------------------------------------//
////////////////////////////////////////////////
//
static int bqRstREG(struct bq2419x_device_info *di)
{
///*************************************************************
//* bqRstREG:Resets Register Settings
//* Accepted Inputs: None
//*
//* Returns:
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg01Val;
u8 data = 0x00;
success = bq2419x_read_byte(di, &data, Reg01Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg01Val = data;
regbits = RESET << RESETREG_LSHIFT ;
Reg01Val &= RESETREG_MASK;
Reg01Val |= regbits;
data = Reg01Val;
success = bq2419x_write_byte(di, data, Reg01Address);
return success;
}
static int bqRstWatchDog(struct bq2419x_device_info *di)
{
///*************************************************************
//* bqRstWatchDog:Resets WatchDog Timer
//* Accepted Inputs: None
//*
//* Returns:
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg01Val;
u8 data;
success = bq2419x_read_byte(di, &data, Reg01Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg01Val = data;
regbits = RESET << RESETWATCHDOG_LSHIFT ;
Reg01Val &= RESETWATCHDOG_MASK;
Reg01Val |= regbits;
data = Reg01Val;
success = bq2419x_write_byte(di, data, Reg01Address);
return success;
}
static int bqSetCHGCONFIG(struct bq2419x_device_info *di, int code)
{
///*************************************************************
//* bqSetCHGCONFIG: Charger Configuration: Disable, Charge
//* Battery, or OTG
//*
//* Accepted Inputs: DISABLE, CHARGE_BATTERY, OTG
//*
//* Returns:
//* -1: Invalid Input
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg01Val;
u8 data;
if ((code != DISABLE) && (code != CHARGE_BATTERY) && (code != OTG)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg01Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg01Val = data;
regbits = code << CHGCONFIG_LSHIFT;
Reg01Val &= CHGCONFIG_MASK;
Reg01Val |= regbits;
data = Reg01Val;
success = bq2419x_write_byte(di, data, Reg01Address);
return success;
}
}
static int bqSetSYSMIN(struct bq2419x_device_info *di, int vlimit)
{
///*************************************************************
//* bqSetSYSMIN:
//*
//* Accepted Inputs: SYSMIN_MIN >= vlimit <= SYSMIN_MAX
//*
//* Returns:
//* -1: Invalid Setting
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//* NOTE: Accepted values are determine by SYSMIN_MIN,
//* SYSMIN_MAX variables defined in .h file. If invalid voltage*
//* is detected regulation voltage will be kept as it is.
//**************************************************************/
int code = 0;
int regbits = 0;
int success;
unsigned int Reg01Val;
u8 data;
if ((vlimit < SYSMIN_MIN) || (vlimit > SYSMIN_MAX)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg01Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg01Val = data;
code = ((vlimit - SYSMIN_MIN)/SYSMIN_STEP);
regbits = code << SYSMIN_LSHIFT;
Reg01Val &= SYSMIN_MASK;
Reg01Val |= regbits;
data = Reg01Val;
success = bq2419x_write_byte(di, data, Reg01Address);
dev_dbg(di->dev, "bqSetSYSMIN: %d\n", vlimit);
return success;
}
}
#if 0
static int bqSetOTGILIM(struct bq2419x_device_info *di, int code)
{
int success;
int regbits;
unsigned int Reg01Val;
u8 data;
if ((code != BOOSTLIM_500mA) && (code != BOOSTLIM_1300mA)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg01Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg01Val = data;
regbits = code << BOOSTLIM_LSHIFT;
Reg01Val &= BOOSTLIM_MASK;
Reg01Val |= regbits;
data = Reg01Val;
success = bq2419x_write_byte(di, data, Reg01Address);
return success;
}
}
#endif
////////////////////////////////////////////////
////------------------------------------------//
//// REG 2 //
////------------------------------------------//
////////////////////////////////////////////////
static int bqSetFASTCHRG(struct bq2419x_device_info *di, int ichg)
{
///*************************************************************
//* bqSetFASTCHRG:
//*
//* Accepted Inputs: ICHG_MIN >= ichg <= ICHG_MAX
//*
//* Returns:
//* -1: Invalid Setting
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//* NOTE: Accepted values are determine by ICHG_MIN,
//* ICHG_MAX variables defined in .h file. If invalid voltage
//* is detected regulation voltage will be kept as it is.
//**************************************************************/
int code = 0;
int regbits = 0;
int success;
unsigned int Reg02Val;
u8 data;
if ((ichg < ICHG_MIN) || (ichg > ICHG_MAX)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg02Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg02Val = data;
code = ((ichg - ICHG_MIN)/ICHG_STEP);
regbits = code << ICHG_LSHIFT;
Reg02Val &= ICHG_MASK;
Reg02Val |= regbits;
data = Reg02Val;
success = bq2419x_write_byte(di, data, Reg02Address);
return success;
}
}
static int bqGetFASTCHRG(struct bq2419x_device_info *di)
{
int success;
unsigned int Reg02Val;
u8 data;
success = bq2419x_read_byte(di, &data, Reg02Address);
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg02Val = data & ~ICHG_MASK;
Reg02Val >>= ICHG_LSHIFT;
Reg02Val = ((Reg02Val * ICHG_STEP) + ICHG_MIN);
return Reg02Val;
}
#if 0
////////////////////////////////////////////////
////------------------------------------------//
//// REG 3 //
////------------------------------------------//
////////////////////////////////////////////////
static int bqSetPRECHRG(struct bq2419x_device_info *di, int iprechg)
{
///*************************************************************
//* bqSetPRECHRG:
//*
//* Accepted Inputs: PRECHG_MIN >= iprechg <= PRECHG_MAX
//*
//* Returns:
//* -1: Invalid Setting
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//* NOTE: Accepted values are determine by PRECHG_MIN,
//* PRECHG_MAX variables defined in .h file. If invalid voltage
//* is detected regulation voltage will be kept as it is.
//**************************************************************/
int code = 0;
int regbits = 0;
int success;
unsigned int Reg03Val;
u8 data;
if ((iprechg < PRECHG_MIN) || (iprechg > PRECHG_MAX)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg03Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg03Val = data;
code = ((iprechg - PRECHG_MIN)/PRECHG_STEP);
regbits = code << PRECHG_LSHIFT;
Reg03Val &= PRECHG_MASK;
Reg03Val |= regbits;
data = Reg03Val;
success = bq2419x_write_byte(di, data, Reg03Address);
return success;
}
}
#endif
#if 0
static int bqSetTERMCHRG(struct bq2419x_device_info *di, int iterm)
{
///*************************************************************
//* bqSetTERMCHRG:
//*
//* Accepted Inputs: ITERM_MIN >= iterm <= ITERM_MAX
//*
//* Returns:
//* -1: Invalid Setting
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//* NOTE: Accepted values are determine by ITERM_MIN,
//* ITERM_MAX variables defined in .h file. If invalid voltage
//* is detected regulation voltage will be kept as it is.
//**************************************************************/
int code = 0;
int regbits = 0;
int success;
unsigned int Reg03Val;
u8 data;
if ((iterm < ITERM_MIN) || (iterm > ITERM_MAX)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg03Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg03Val = data;
code = ((iterm - ITERM_MIN)/ITERM_STEP);
regbits = code << ITERM_LSHIFT;
Reg03Val &= ITERM_MASK;
Reg03Val |= regbits;
data = Reg03Val;
success = bq2419x_write_byte(di, data, Reg03Address);
return success;
}
}
#endif
////////////////////////////////////////////////
////------------------------------------------//
//// REG 4 //
////------------------------------------------//
////////////////////////////////////////////////
#if 0
static int bqSetChgVoltage(struct bq2419x_device_info *di, int vreg)
{
///*************************************************************
//* bqSetChgVoltage: Send battery regulation voltage in mV and
//* the function will calculate the closest value without
//* going above the desired value. Function will calculate
//* the I2C code and store it in vregbits. Reg03Val keeps track*
//* of the overall register value as there are other features
//* that can be programmed on this register.
//* Accepted Inputs: VREGMIN >= vreg <= VREG_MAX
//*
//* Returns:
//* -1: Invalid Regulation Voltage
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//* NOTE: Accepted values are determine by VREG_MAX, VREG_MIN
//* variables defined in .h file. If invalid voltage is
//* detected regulation voltage will be kept as it is.
//**************************************************************/
int code = 0;
int regbits = 0;
int success;
unsigned int Reg04Val;
u8 data;
if ((vreg < VREG_MIN) || (vreg > VREG_MAX)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg04Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg04Val = data;
code = ((vreg - VREG_MIN)/VREG_STEP);
regbits = code << VREG_LSHIFT;
Reg04Val &= VREG_MASK;
Reg04Val |= regbits;
data = Reg04Val;
success = bq2419x_write_byte(di, data, Reg04Address);
return success;
}
}
#endif
#if 0
static int bqSetBATLOWV(struct bq2419x_device_info *di, int setting)
{
///*************************************************************
//* bqSetBATLOWV: BATLOWV setting 2.8V or 3.0V
//* Accepted Inputs: BATLOWV_2800mV, BATLOWV_3000mV
//*
//* Returns:
//* -1: Invalid Input
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg04Val;
u8 data;
if ((setting != BATLOWV_2800mV) && (setting != BATLOWV_3000mV)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg04Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg04Val = data;
regbits = setting << BATLOWV_LSHIFT;
Reg04Val &= BATLOWV_MASK;
Reg04Val |= regbits;
data = Reg04Val;
success = bq2419x_write_byte(di, data, Reg04Address);
return success;
}
}
#endif
static int bqSetRECHRG(struct bq2419x_device_info *di, int setting)
{
///*************************************************************
//* bqSetRECHRG: Battery Recharge Threshold setting 100mV or
//* 300mV
//* Accepted Inputs: VRECHG_100mV, VRECHG_300mV
//*
//* Returns:
//* -1: Invalid Input
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg04Val;
u8 data;
if ((setting != VRECHG_100mV) && (setting != VRECHG_300mV)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg04Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg04Val = data;
regbits = setting << VRECHG_LSHIFT;
Reg04Val &= VRECHG_MASK;
Reg04Val |= regbits;
data = Reg04Val;
success = bq2419x_write_byte(di, data, Reg04Address);
return success;
}
}
////////////////////////////////////////////////
////------------------------------------------//
//// REG 5 //
////------------------------------------------//
////////////////////////////////////////////////
#if 0
static int bqEnTERM(struct bq2419x_device_info *di, int enable)
{
///*************************************************************
//* bqEnTERM:Disable or enable Charge Termination
//* Accepted Inputs: ENABLE, DISABLE
//*
//* Returns:
//* -1: Invalid Input
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg05Val;
u8 data;
if ((enable != ENABLE) && (enable != DISABLE)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg05Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg05Val = data;
regbits = enable << ENTERM_LSHIFT;
Reg05Val &= ENTERM_MASK;
Reg05Val |= regbits;
data = Reg05Val;
success = bq2419x_write_byte(di, data, Reg05Address);
return success;
}
}
#endif
#if 0
static int bqTERMSTAT(struct bq2419x_device_info *di, int enable)
{
///*************************************************************
//* bqTERMSTAT: Matches ITERM or Indicates before actual
//* termination on STAT
//* Accepted Inputs: TERMSTAT_ITERM, TERMSTAT_EARLY
//*
//* Returns:
//* -1: Invalid Input
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg05Val;
u8 data;
if ((enable != TERMSTAT_ITERM) && (enable != TERMSTAT_EARLY)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg05Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg05Val = data;
regbits = enable << TERMSTAT_LSHIFT;
Reg05Val &= TERMSTAT_MASK;
Reg05Val |= regbits;
data = Reg05Val;
success = bq2419x_write_byte(di, data, Reg05Address);
return success;
}
}
#endif
static int bqSetWatchDog(struct bq2419x_device_info *di, int code)
{
///*************************************************************
//* bqSetWatchDog:
//*
//* Accepted Inputs: DISABLE, WatchDog_40s, WatchDog_80s,
//* WatchDog_160s
//*
//* Returns:
//* -1: Invalid Input
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg05Val;
u8 data;
if ((code != DISABLE) && (code != WatchDog_40s) &&
(code != WatchDog_80s) && (code != WatchDog_160s)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg05Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg05Val = data;
regbits = code << WatchDog_LSHIFT;
Reg05Val = Reg05Val & WatchDog_MASK;
Reg05Val = regbits | Reg05Val;
data = Reg05Val;
success = bq2419x_write_byte(di, data, Reg05Address);
return success;
}
}
#if 0
static int bqEnTIMER(struct bq2419x_device_info *di, int enable)
{
///*************************************************************
//* bqEnTIMER:Disable or enable Safety Timer Setting
//* Accepted Inputs: ENABLE, DISABLE
//*
//* Returns:
//* -1: Invalid Input
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg05Val;
u8 data;
if ((enable != ENABLE) && (enable != DISABLE)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg05Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg05Val = data;
regbits = enable << ENTIMER_LSHIFT;
Reg05Val &= ENTIMER_MASK;
Reg05Val |= regbits;
data = Reg05Val;
success = bq2419x_write_byte(di, data, Reg05Address);
return success;
}
}
#endif
static int bqSetFastChgTimer(struct bq2419x_device_info *di, int code)
{
///*************************************************************
//* bqSetFastChgTimer:
//*
//* Accepted Inputs: CHGTIMER_5h, CHGTIMER_8h, CHGTIMER_12h,
//* CHGTIMER_20h
//*
//* Returns:
//* -1: Invalid Input
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg05Val;
u8 data;
if ((code != CHGTIMER_5h) && (code != CHGTIMER_8h) &&
(code != CHGTIMER_12h) && (code != CHGTIMER_20h)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg05Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg05Val = data;
regbits = code << CHGTIMER_LSHIFT;
Reg05Val &= CHGTIMER_MASK;
Reg05Val |= regbits;
data = Reg05Val;
success = bq2419x_write_byte(di, data, Reg05Address);
return success;
}
}
////////////////////////////////////////////////
////------------------------------------------//
//// REG 6 //
////------------------------------------------//
////////////////////////////////////////////////
#if 0
static int bqSetBATCOMP(struct bq2419x_device_info *di, int resistor)
{
///*************************************************************
//* bqSetBATCOMP:
//*
//* Accepted Inputs: BATCOMP_MIN >= resistor <= BATCOMP_MAX
//*
//* Returns:
//* -1: Invalid Setting
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//* NOTE: Accepted values are determine by BATCOMP_MIN,
//* BATCOMP_MAX variables defined in .h file. If invalid
//* voltage is detected regulation voltage will be kept as it
//* is.
//**************************************************************/
//
int code = 0;
int regbits = 0;
int success;
unsigned int Reg06Val;
u8 data;
if ((resistor < BATCOMP_MIN) || (resistor > BATCOMP_MAX)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg06Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg06Val = data;
code = ((resistor - BATCOMP_MIN)/BATCOMP_STEP);
regbits = code << BATCOMP_LSHIFT;
Reg06Val &= BATCOMP_MASK;
Reg06Val |= regbits;
data = Reg06Val;
success = bq2419x_write_byte(di, data, Reg06Address);
return success;
}
}
static int bqSetVCLAMP(struct bq2419x_device_info *di, int vclamp)
{
///*************************************************************
//* bqSetVCLAMP:
//*
//* Accepted Inputs: VCLAMP_MIN >= vclamp <= VCLAMP_MAX
//*
//* Returns:
//* -1: Invalid Setting
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//* NOTE: Accepted values are determine by VCLAMP_MIN,
//* VCLAMP_MAX variables defined in .h file. If invalid voltage*
//* is detected regulation voltage will be kept as it is. *
//**************************************************************/
int code = 0;
int regbits = 0;
int success;
unsigned int Reg06Val;
u8 data;
if ((vclamp < VCLAMP_MIN) || (vclamp > VCLAMP_MAX)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg06Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg06Val = data;
code = ((vclamp - VCLAMP_MIN)/VCLAMP_STEP);
regbits = code << VCLAMP_LSHIFT;
Reg06Val &= VCLAMP_MASK;
Reg06Val |= regbits;
data = Reg06Val;
success = bq2419x_write_byte(di, data, Reg06Address);
return success;
}
}
static int bqSetTREG(struct bq2419x_device_info *di, int code)
{
///*************************************************************
//* bqSetTREG:
//*
//* Accepted Inputs: TREG_60C, TREG_80C, TREG_100C, TREG_120C
//*
//* Returns:
//* -1: Invalid Input
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg06Val;
u8 data;
if ((code != TREG_60C) && (code != TREG_80C) &&
(code != TREG_100C) && (code != TREG_120C)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg06Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg06Val = data;
regbits = code << TREG_LSHIFT;
Reg06Val &= TREG_MASK;
Reg06Val |= regbits;
data = Reg06Val;
success = bq2419x_write_byte(di, data, Reg06Address);
return success;
}
}
#endif
//////////////////////////////////////////////
//------------------------------------------//
// REG 7 //
//------------------------------------------//
//////////////////////////////////////////////
static int bqEnDPDM(struct bq2419x_device_info *di, int enable)
{
///*************************************************************
//* bqEnDPDM:Disable or enable D+/D- Detection
//* Accepted Inputs: ENABLE, DISABLE
//*
//* Returns:
//* -1: Invalid Input
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg07Val;
u8 data;
if ((enable != ENABLE) && (enable != DISABLE)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg07Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg07Val = data;
regbits = enable << ENDPDM_LSHIFT;
Reg07Val &= ENDPDM_MASK;
Reg07Val |= regbits;
data = Reg07Val;
success = bq2419x_write_byte(di, data, Reg07Address);
return success;
}
}
#if 0
static int bqEnTMR2X(struct bq2419x_device_info *di, int enable)
{
///*************************************************************
//* bqEnTMR2X:Disable or enable 2x Extened Safety Timer
//* Accepted Inputs: ENABLE, DISABLE
//*
//* Returns:
//* -1: Invalid Input
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg07Val;
u8 data;
if ((enable != ENABLE) && (enable != DISABLE)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg07Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg07Val = data;
regbits = enable << EN2XTIMER_LSHIFT;
Reg07Val &= EN2XTIMER_MASK;
Reg07Val |= regbits;
data = Reg07Val;
success = bq2419x_write_byte(di, data, Reg07Address);
return success;
}
}
static int bqOffBATFET(struct bq2419x_device_info *di, int enable)
{
///*************************************************************
//* bqOffBATFET:Disable or enable 2x Extened Safety Timer
//* Accepted Inputs: ENABLE, DISABLE
//*
//* Returns:
//* -1: Invalid Input
//* 0: I2C Write Fail
//* 1: I2C Write Success
//*
//* Can be modified to send Ack bit as the success code
//*
//**************************************************************/
int success;
int regbits;
unsigned int Reg07Val;
u8 data;
if ((enable != ENABLE) && (enable != DISABLE)) {
return -EINVAL;
} else {
success = bq2419x_read_byte(di, &data, Reg07Address );
if (success) {
dev_err(di->dev, "Unable to read byte from charger\n");
return success;
}
Reg07Val = data;
regbits = enable << OFFBATFET_LSHIFT;
Reg07Val &= OFFBATFET_MASK;
Reg07Val |= regbits;
data = Reg07Val;
success = bq2419x_write_byte(di, data, Reg07Address);
return success;
}
}
#endif
/* End of Functions Related to 19x Functionality */
extern int bq27x00_read_voltage_mv(void);
static bool bq2419x_is_vbat_in_range(void);
static void check_and_set_Vsysmin(struct bq2419x_device_info *di)
{
/* Always set WDT before modifying Vsysmin */
bqSetWatchDog(di, BQ2419x_WDT_TIMEOUT);
bqRstWatchDog(di);
if (di->boostback_fix_required) {
if(bq2419x_is_vbat_in_range())
bqSetSYSMIN(di, BQ2419x_BOOSTBACK_THRESHOLD_LO);
else
bqSetSYSMIN(di, BQ2419x_BOOSTBACK_THRESHOLD_HI);
}
}
static void adjust_fast_charging_current_limit(struct bq2419x_device_info *di)
{
const int current_temp = bq27x00_get_battery_temperature();
const int current_ichr = bqGetFASTCHRG(di);
int target_ichr_limit = current_ichr;
if ((current_ichr >= ICHG_DEFAULT) && (current_temp <= 160))
target_ichr_limit = BQ2419x_LOW_BATTEMP_ICHR_LIMIT;
else if ((current_ichr < ICHG_DEFAULT) && (current_temp >= 180))
target_ichr_limit = ICHG_DEFAULT;
if (target_ichr_limit != current_ichr)
bqSetFASTCHRG(di, target_ichr_limit);
dev_dbg(di->dev, "adj_fast_ichg_lim: %d, %d, %d\n",
current_ichr, current_temp, target_ichr_limit);
}
static void bq2419x_start_dmtimer(struct bq2419x_device_info *di, bool active)
{
if (stopdmtimershutdown == false ) {
dev_dbg(di->dev, "bq2419x_start_dmtimer: %d\n", active);
if (active) {
if (!first_active)
omap_dm_timer_stop(wdt_timer_ptr);
else
first_active = false;
omap_dm_timer_set_prescaler(wdt_timer_ptr, 0);
omap_dm_timer_set_int_enable(wdt_timer_ptr,
OMAP_TIMER_INT_MATCH | OMAP_TIMER_INT_OVERFLOW);
omap_dm_timer_set_load_start(wdt_timer_ptr, 1, di->tick_rate);
check_and_set_Vsysmin(di);
} else {
omap_dm_timer_stop(wdt_timer_ptr);
omap_dm_timer_disable(wdt_timer_ptr);
bqSetSYSMIN(di, BQ2419x_BOOSTBACK_THRESHOLD_HI);
}
}
}
static void bq2419x_charger_show_status(struct bq2419x_device_info *di)
{
const u8 read_reg = di->reg08;
const u8 vbus_state = (read_reg >> 6) & 0x03;
const u8 charge_state = (read_reg >> 4) & 0x03;
const u8 dpm_state = read_reg & (1<<3);
const u8 pg_state = read_reg & (1<<2);
static u8 pg_state_old = 0;
const u8 therm_state = read_reg & (1<<1);
const u8 vsys_state = read_reg & (1<<0);
if (pg_state) {
dev_dbg(di->dev,"S: Power Good\n");
if (!pg_state_old)
bqEnDPDM(di, ENABLE);
} else {
dev_dbg(di->dev,"S: Not Power Good\n");
}
global_charge_state = charge_state;
pg_state_old = pg_state;
switch (charge_state)
{
case 3:
dev_dbg(di->dev,"S: CHARGE STATE = %d (CHARGE DONE)\n", charge_state);
/* If we get multiple Charge Done interrupts within 3 seconds interval,
* meaning the battery is aged therefore need to set a lower recharge voltage.
*/
if (threshold_voltage_checked) {
const unsigned long time_now = jiffies;
printk("diff=%lu\n", msecs_to_jiffies(time_now - time_previous));
if(time_before_eq(time_now, (time_previous + msecs_to_jiffies(3000)))
&& (time_previous != 0)) {
bqSetRECHRG(di, VRECHG_300mV);
threshold_voltage_checked = false;
time_previous = 0;
dev_dbg(di->dev, "setting VRECHG to 300mV\n");
}
else
time_previous = time_now;
}
break;
case 2:
dev_dbg(di->dev,"S: CHARGE STATE = %d (FAST CHARGING)\n",charge_state);
break;
case 1:
dev_dbg(di->dev,"S: CHARGE STATE = %d (PRECHARGE)\n",charge_state);
break;
case 0:
dev_dbg(di->dev,"S: CHARGE STATE = %d (NOT CHARGING)\n",charge_state);
break;
default:
dev_dbg(di->dev,"S: CHARGE STATE = %d\n",charge_state);
break;
}
switch (vbus_state)
{
case 3:
dev_dbg(di->dev,"S: VBUS STATE = %d (OTG)\n",vbus_state);
break;
case 2:
dev_dbg(di->dev,"S: VBUS STATE = %d (Adapter port)\n",vbus_state);
break;
case 1:
dev_dbg(di->dev,"S: VBUS STATE = %d (USB host)\n",vbus_state);
break;
case 0:
dev_dbg(di->dev,"S: VBUS STATE = %d (Unknown)\n",vbus_state);
break;
default:
dev_dbg(di->dev,"S: VBUS STATE = %d\n",vbus_state);
break;
}
if (dpm_state)
dev_dbg(di->dev,"S: Dynamic Power Management enabled\n");
if (therm_state)
dev_dbg(di->dev,"S: Thermal regulation\n");
if (vsys_state)
dev_dbg(di->dev,"S: Battery too low\n");
}
static void bq2419x_charger_handle_error(struct bq2419x_device_info *di)
{
const u8 read_reg = di->reg09;
const u8 watchdog_fault = read_reg & 0x80;
const u8 otg_fault = read_reg & 0x40;
const u8 chrg_fault = (read_reg >> 4) & 0x03;
const u8 bat_fault = read_reg & 0x08;
const u8 ntc_fault = read_reg & 0x07;
switch (chrg_fault)
{
case 3:
dev_dbg(di->dev,"E: Charger fault: Charge (Safe) timer expiration\n");
break;
case 2:
dev_dbg(di->dev,"E: Charger fault: Thermal shutdown\n");
break;
case 1:
dev_dbg(di->dev,"E: Charger fault: Input fault (OVP or bad source)\n");
break;
case 0:
break;
default:
dev_dbg(di->dev,"E: Charger fault: %d\n",chrg_fault);
break;
}
switch (ntc_fault)
{
case 6:
dev_dbg(di->dev,"E: Ntc fault: Hot\n");
break;
case 5:
dev_dbg(di->dev,"E: Ntc fault: Cold\n");
break;
case 0:
break;
default:
dev_dbg(di->dev,"E: Ntc fault: %d\n",ntc_fault);
break;
}
if (watchdog_fault)
dev_dbg(di->dev,"E: Watchdog timer expired\n");
if (otg_fault)
dev_dbg(di->dev,"E: OTG fault: Cannot start boost function\n");
if (bat_fault)
dev_dbg(di->dev,"E: Battery fault: System OVP\n");
}
static void bq2419x_interrupt_work_func(struct work_struct *work)
{
struct bq2419x_device_info *di = container_of(work,
struct bq2419x_device_info,
iwork);
char reg08, reg09;
int ret;
ret = bq2419x_read_byte(di, ®08, Reg08Address );
if (ret) {
dev_err(di->dev, "Unable to read reg08 from charger\n");
return;
}
ret = bq2419x_read_byte(di, ®09, Reg09Address );
if (ret) {
dev_err(di->dev, "Unable to read reg09-1 from charger\n");
return;
}
if (di->bqchip_version == BQ24196_REV_1_3) {
/* This second read is due to errata in PG1.3 */
ret = bq2419x_read_byte(di, ®09, Reg09Address );
if (ret) {
dev_err(di->dev, "Unable to read reg09-2 from charger\n");
return;
}
}
di->reg08 = reg08;
di->reg09 = reg09;
dev_dbg(di->dev, "INTERRUPT 08=%x, 09=%x\n", di->reg08, di->reg09);
bq2419x_charger_show_status(di);
bq2419x_charger_handle_error(di);
}
static irqreturn_t bq24196_interrupt(int irq, void *dev)
{
struct bq2419x_device_info * const di = dev_get_drvdata(dev);
schedule_work(&di->iwork);
return IRQ_HANDLED;
}
static void bq2419x_event_work_func(struct work_struct *work)
{
struct bq2419x_device_info * const di = container_of(work,
struct bq2419x_device_info,
ework);
mutex_lock(&di->lock);
switch (di->event) {
case USB_EVENT_VBUS:
dev_dbg(di->dev, "USB_EVENT_VBUS\n");
di->power_type = di->event;
bq2419x_start_dmtimer(di, true);
adjust_fast_charging_current_limit(di);
gpio_direction_output(di->gpio_psel, 1);
gpio_direction_output(di->gpio_ce, 0);
if (di->bqchip_version == BQ24196_REV_1_3) {
bqEnDPDM(di, ENABLE);
} else {
bqSetIINDPM(di, IINLIM_500MA);
}
bqSetFastChgTimer(di, di->stimer_sdp);
bqSetCHGCONFIG(di, CHARGE_BATTERY);
power_supply_changed(&di->usb);
power_supply_changed(&di->wall);
break;
case USB_EVENT_CHARGER:
dev_dbg(di->dev, "USB_EVENT_CHARGER\n");
di->power_type = di->event;
bq2419x_start_dmtimer(di, true);
adjust_fast_charging_current_limit(di);
gpio_direction_output(di->gpio_psel, 0);
gpio_direction_output(di->gpio_ce, 0);
if (di->bqchip_version == BQ24196_REV_1_3) {
bqEnDPDM(di, ENABLE);
} else {
bqSetIINDPM(di, IINLIM_2000MA);
}
bqSetFastChgTimer(di, di->stimer_dcp);
bqSetCHGCONFIG(di, CHARGE_BATTERY);
power_supply_changed(&di->usb);
power_supply_changed(&di->wall);
break;
case USB_EVENT_NONE:
dev_dbg(di->dev, "USB_EVENT_NONE\n");
di->power_type = di->event;
bqSetCHGCONFIG(di, DISABLE);
gpio_direction_output(di->gpio_ce, 1);
gpio_direction_output(di->gpio_psel, 1);
if (!threshold_voltage_checked) {
bqSetRECHRG(di, VRECHG_100mV);
threshold_voltage_checked = true;
dev_dbg(di->dev, "setting VRECHG to 100mV\n");
}
bq2419x_start_dmtimer(di, false);
bqRstREG(di);
power_supply_changed(&di->usb);
power_supply_changed(&di->wall);
break;
case USB_EVENT_ID:
dev_dbg(di->dev, "USB_EVENT_ID");
bqSetCHGCONFIG(di, OTG);
di->power_type = di->event;
bq2419x_start_dmtimer(di, true);
power_supply_changed(&di->usb);
power_supply_changed(&di->wall);
break;
case USB_EVENT_ENUMERATED:
/* Nothing to do for this one */
dev_dbg(di->dev, "USB_EVENT_ENUMERATED\n");
if (di->bqchip_version == BQ24196_REV_1_3)
bqEnDPDM(di, ENABLE);
break;
case USB_EVENT_NO_CONTACT:
dev_dbg(di->dev, "USB_EVENT_NO_CONTACT\n");
/* This is not in the charging state, this event is followed
by a USB_EVENT_NONE once VBUS is removed */
if (di->boostback_fix_required)
bq2419x_start_dmtimer(di, true);
break;
default:
dev_dbg(di->dev, "(%s): unknown cmd\n", __func__);
break;
}
bq2419x_charger_handle_error(di);
mutex_unlock(&di->lock);
}
static int bq2419x_charger_event(struct notifier_block *nb, unsigned long event,
void *_data)
{
struct bq2419x_device_info * const di =
container_of(nb, struct bq2419x_device_info, nb);
di->cfg_params = 1;
di->event = event;
schedule_work(&di->ework);
return 0;
}
static bool bq2419x_is_vbat_in_range(void)
{
const int v = bq27x00_read_voltage_mv();
pr_debug("bq2419x_is_vbat_in_range: vbat = %d\n", v);
return (v > 3400000 ? true : false);
}
#ifdef DEBUG_BQ2419X_REG
static long dgbtest_OTG = 0xdeadbeef;
static ssize_t bq2419x_set_dgbtest_OTG(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
long val;
int retval;
retval = strict_strtol(buf, 10, &val);
if ((retval < 0))
return -EINVAL;
dgbtest_OTG = val;
return count;
}
static ssize_t bq2419x_show_dgbtest_OTG(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n", dgbtest_OTG);
}
static ssize_t bq2419x_set_regNN(int reg,struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int retval = 0;
unsigned long val = 0;
u8 data = 0;
struct bq2419x_device_info *di = dev_get_drvdata(dev);
retval = strict_strtol(buf, 16, &val);
data = (u8)val;
retval = bq2419x_write_byte(di, data, reg);
dev_dbg(di->dev, "(set) ret=%d, %d=%s==%x\n",retval, reg, buf, data);
return count;
}
static ssize_t bq2419x_show_regNN(int reg, struct device *dev,
struct device_attribute *attr,
char *buf)
{
int retval = 0;
u8 read_reg = 0;
struct bq2419x_device_info *di = dev_get_drvdata(dev);
retval = bq2419x_read_byte(di, &read_reg, reg);
dev_dbg(di->dev, "(get) ret=%d, %d=%x\n", retval, reg, read_reg);
return sprintf(buf, "%x\n", read_reg);
}
static ssize_t bq2419x_set_reg00(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
return bq2419x_set_regNN(0,dev,attr,buf,count);
}
static ssize_t bq2419x_show_reg00(struct device *dev, struct device_attribute *attr, char *buf)
{
return bq2419x_show_regNN(0,dev,attr,buf);
}
static ssize_t bq2419x_set_reg01(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
return bq2419x_set_regNN(1,dev,attr,buf,count);
}
static ssize_t bq2419x_show_reg01(struct device *dev, struct device_attribute *attr, char *buf)
{
return bq2419x_show_regNN(1,dev,attr,buf);
}
static ssize_t bq2419x_set_reg02(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
return bq2419x_set_regNN(2,dev,attr,buf,count);
}
static ssize_t bq2419x_show_reg02(struct device *dev, struct device_attribute *attr, char *buf)
{
return bq2419x_show_regNN(2,dev,attr,buf);
}
static ssize_t bq2419x_set_reg03(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
return bq2419x_set_regNN(3,dev,attr,buf,count);
}
static ssize_t bq2419x_show_reg03(struct device *dev, struct device_attribute *attr, char *buf)
{
return bq2419x_show_regNN(3,dev,attr,buf);
}
static ssize_t bq2419x_set_reg04(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
return bq2419x_set_regNN(4,dev,attr,buf,count);
}
static ssize_t bq2419x_show_reg04(struct device *dev, struct device_attribute *attr, char *buf)
{
return bq2419x_show_regNN(4,dev,attr,buf);
}
static ssize_t bq2419x_set_reg05(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
return bq2419x_set_regNN(5,dev,attr,buf,count);
}
static ssize_t bq2419x_show_reg05(struct device *dev, struct device_attribute *attr, char *buf)
{
return bq2419x_show_regNN(5,dev,attr,buf);
}
static ssize_t bq2419x_set_reg06(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
return bq2419x_set_regNN(6,dev,attr,buf,count);
}
static ssize_t bq2419x_show_reg06(struct device *dev, struct device_attribute *attr, char *buf)
{
return bq2419x_show_regNN(6,dev,attr,buf);
}
static ssize_t bq2419x_set_reg07(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
return bq2419x_set_regNN(7,dev,attr,buf,count);
}
static ssize_t bq2419x_show_reg07(struct device *dev, struct device_attribute *attr, char *buf)
{
return bq2419x_show_regNN(7,dev,attr,buf);
}
static ssize_t bq2419x_set_reg08(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
return bq2419x_set_regNN(8,dev,attr,buf,count);
}
static ssize_t bq2419x_show_reg08(struct device *dev, struct device_attribute *attr, char *buf)
{
return bq2419x_show_regNN(8,dev,attr,buf);
}
static ssize_t bq2419x_set_reg09(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
return bq2419x_set_regNN(9,dev,attr,buf,count);
}
static ssize_t bq2419x_show_reg09(struct device *dev, struct device_attribute *attr, char *buf)
{
return bq2419x_show_regNN(9,dev,attr,buf);
}
static ssize_t bq2419x_set_reg10(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
return bq2419x_set_regNN(10,dev,attr,buf,count);
}
static ssize_t bq2419x_show_reg10(struct device *dev, struct device_attribute *attr, char *buf)
{
return bq2419x_show_regNN(10,dev,attr,buf);
}
static DEVICE_ATTR(dgbtest_OTG, S_IWUSR | S_IRUGO,
bq2419x_show_dgbtest_OTG,
bq2419x_set_dgbtest_OTG);
static DEVICE_ATTR(reg00, S_IWUSR | S_IRUGO,
bq2419x_show_reg00,
bq2419x_set_reg00);
static DEVICE_ATTR(reg01, S_IWUSR | S_IRUGO,
bq2419x_show_reg01,
bq2419x_set_reg01);
static DEVICE_ATTR(reg02, S_IWUSR | S_IRUGO,
bq2419x_show_reg02,
bq2419x_set_reg02);
static DEVICE_ATTR(reg03, S_IWUSR | S_IRUGO,
bq2419x_show_reg03,
bq2419x_set_reg03);
static DEVICE_ATTR(reg04, S_IWUSR | S_IRUGO,
bq2419x_show_reg04,
bq2419x_set_reg04);
static DEVICE_ATTR(reg05, S_IWUSR | S_IRUGO,
bq2419x_show_reg05,
bq2419x_set_reg05);
static DEVICE_ATTR(reg06, S_IWUSR | S_IRUGO,
bq2419x_show_reg06,
bq2419x_set_reg06);
static DEVICE_ATTR(reg07, S_IWUSR | S_IRUGO,
bq2419x_show_reg07,
bq2419x_set_reg07);
static DEVICE_ATTR(reg08, S_IWUSR | S_IRUGO,
bq2419x_show_reg08,
bq2419x_set_reg08);
static DEVICE_ATTR(reg09, S_IWUSR | S_IRUGO,
bq2419x_show_reg09,
bq2419x_set_reg09);
static DEVICE_ATTR(reg10, S_IWUSR | S_IRUGO,
bq2419x_show_reg10,
bq2419x_set_reg10);
static struct attribute *bq2419x_attributes[] = {
&dev_attr_dgbtest_OTG.attr,
&dev_attr_reg00.attr,
&dev_attr_reg01.attr,
&dev_attr_reg02.attr,
&dev_attr_reg03.attr,
&dev_attr_reg04.attr,
&dev_attr_reg05.attr,
&dev_attr_reg06.attr,
&dev_attr_reg07.attr,
&dev_attr_reg08.attr,
&dev_attr_reg09.attr,
&dev_attr_reg10.attr,
NULL,
};
static const struct attribute_group bq2419x_attr_group = {
.attrs = bq2419x_attributes,
};
#endif /* DEBUG_BQ2419X_REG */
static ssize_t bq2419x_set_charge_status(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct bq2419x_device_info * const di = dev_get_drvdata(dev);
if (count > 1) {
if ('1' == buf[0]) {
bqSetCHGCONFIG(di, CHARGE_BATTERY);
dev_dbg(di->dev, "charger enabled\n");
} else if ('2' == buf[0]) {
bqSetCHGCONFIG(di, OTG);
bq2419x_start_dmtimer(di, true);
dev_dbg(di->dev, "OTG boost mode enabled\n");
} else {
bqSetCHGCONFIG(di, DISABLE);
dev_dbg(di->dev, "charger disabled\n");
}
}
return count;
}
static ssize_t bq2419x_show_charge_status(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bq2419x_device_info * const di = dev_get_drvdata(dev);
int retval = 0;
u8 read_reg = 0;
retval = bq2419x_read_byte(di, &read_reg, Reg01Address);
dev_dbg(di->dev, "read reg01: 0x%x\n", read_reg);
read_reg &= ~CHGCONFIG_MASK;
read_reg >>= CHGCONFIG_LSHIFT;
return sprintf(buf, "%x\n", read_reg);
}
static ssize_t bq2419x_set_hiz_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct bq2419x_device_info * const di = dev_get_drvdata(dev);
if (count > 1) {
if ('1' == buf[0]) {
bqEnHIZ(di, 1);
dev_dbg(di->dev, "HiZ enabled\n");
} else {
bqEnHIZ(di, 0);
dev_dbg(di->dev, "HiZ disabled\n");
}
}
return count;
}
static ssize_t bq2419x_show_hiz_enable(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bq2419x_device_info * const di = dev_get_drvdata(dev);
int retval = 0;
u8 read_reg = 0;
retval = bq2419x_read_byte(di, &read_reg, Reg00Address);
dev_dbg(di->dev, "read reg00: 0x%x\n", read_reg);
read_reg &= ~ENHIZ_MASK;
read_reg >>= ENHIZ_LSHIFT;
return sprintf(buf, "%x\n", read_reg);
}
static DEVICE_ATTR(charge_status, S_IWUSR | S_IRUSR,
bq2419x_show_charge_status,
bq2419x_set_charge_status);
static DEVICE_ATTR(hiz_enable, S_IWUSR | S_IRUSR,
bq2419x_show_hiz_enable,
bq2419x_set_hiz_enable);
static struct attribute *bq2419x_mfg_attributes[] = {
&dev_attr_charge_status.attr,
&dev_attr_hiz_enable.attr,
NULL,
};
static const struct attribute_group bq2419x_mfg_attr_group = {
.attrs = bq2419x_mfg_attributes,
};
static void bq2419x_wdt_work_func(struct work_struct *work)
{
struct bq2419x_device_info * const di = container_of(work,
struct bq2419x_device_info,
wdt_work);
mutex_lock(&di->lock);
check_and_set_Vsysmin(di);
adjust_fast_charging_current_limit(di);
/* A second WDT reset is needed here to ensure WDT RST CMD went through */
bqRstWatchDog(di);
wake_unlock(&di->timer_wakelock);
mutex_unlock(&di->lock);
}
static irqreturn_t bq2419x_wdt_irq_handler(int irq, void *dev)
{
struct bq2419x_device_info * const di = dev_get_drvdata(dev);
omap_dm_timer_write_status(wdt_timer_ptr, OMAP_TIMER_INT_OVERFLOW);
omap_dm_timer_read_status(wdt_timer_ptr);
wake_lock_timeout(&di->timer_wakelock, msecs_to_jiffies(10000));
schedule_work(&di->wdt_work);
return IRQ_HANDLED;
}
static unsigned int omap_get_clk_load(u32 seconds, u32 milliseconds)
{
u32 tick_rate, cycles;
if (!seconds && !milliseconds) {
pr_debug("omap_get_clk_load err\n");
return 0;
}
tick_rate = clk_get_rate(omap_dm_timer_get_fclk(wdt_timer_ptr));
cycles = (tick_rate * seconds) + (tick_rate * milliseconds / 1000);
return 0xffffffff - cycles;
}
static int __devinit bq2419x_charger_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct bq2419x_device_info *di;
struct bq2419x_platform_data *pdata = NULL;
int ret;
u8 read_reg = 0;
pr_debug("bq2419x probe\n");
pdata = client->dev.platform_data;
di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di)
return -ENOMEM;
di->dev = &client->dev;
di->client = client;
di->gpio_int = pdata->gpio_int;
di->gpio_ce = pdata->gpio_ce;
di->gpio_psel = pdata->gpio_psel;
di->stimer_sdp = pdata->stimer_sdp;
di->stimer_dcp = pdata->stimer_dcp;
di->boostback_fix_required = true;
i2c_set_clientdata(client, di);
/* identify the chip */
ret = bq2419x_read_byte(di, &read_reg, Reg10Address);
if (ret < 0) {
dev_err(di->dev, "chip not present at address 0x%x\n",
client->addr);
ret = -EINVAL;
goto err_kfree;
}
di->bqchip_version = (read_reg & BQ2419x_PN_REV_BIT_MASK);
dev_dbg(di->dev, "read_reg=0x%x\n", read_reg);
switch (di->bqchip_version) {
case BQ24196_REV_1_3:
dev_dbg(di->dev, "identified chip BQ24196_REV_1_3\n");
di->boostback_fix_required = true;
break;
case BQ24196_REV_1_4:
dev_dbg(di->dev, "identified chip BQ24196_REV_1_4\n");
di->boostback_fix_required = false;
break;
case BQ24196_REV_1_2:
dev_dbg(di->dev, "identified chip BQ24196_REV_1_2\n");
di->boostback_fix_required = true;
break;
default:
dev_dbg(di->dev, "unsupported bq chip\n");
dev_dbg(di->dev, "Chip address 0x%x", client->addr);
dev_dbg(di->dev, "bq chip version reg value 0x%x", read_reg);
ret = -EINVAL;
goto err_kfree;
break;
}
if (DevID == client->addr) {
dev_dbg(di->dev, "found chip at 0x6b\n");
} else {
dev_err(di->dev, "not the chip 0x%x\n", client->addr);
ret = -EINVAL;
goto err_kfree;
}
/* Register interrupt and its work */
mutex_init(&di->lock);
wake_lock_init(&di->timer_wakelock, WAKE_LOCK_SUSPEND, "bq2419x_timer_lock");
INIT_WORK(&di->iwork, bq2419x_interrupt_work_func);
INIT_WORK(&di->wdt_work, bq2419x_wdt_work_func);
wdt_timer_ptr = omap_dm_timer_request();
if (!wdt_timer_ptr) {
dev_err(di->dev, "no more gp timers available\n");
ret = -EINVAL;
goto err_kfree;
}
ret = omap_dm_timer_set_source(wdt_timer_ptr, OMAP_TIMER_SRC_SYS_CLK);
if (ret)
dev_err(di->dev, "omap_dm_timer_set_source failed: %d\n", ret);
ret = omap_dm_timer_set_prescaler(wdt_timer_ptr, 0);
if (ret)
dev_err(di->dev, "omap_dm_timer_set_source failed: %d\n", ret);
ret = request_irq(omap_dm_timer_get_irq(wdt_timer_ptr), bq2419x_wdt_irq_handler,
IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, "bq2419x timer", di->dev);
if (ret) {
dev_err(di->dev, "request_irq timer_irq failed\n");
ret = di->gpio_psel = -ENODEV;
goto err_kfree;
}
di->tick_rate = omap_get_clk_load((BQ2419x_OMAP_DMTIMER_INTERVAL/2), 0);
dev_dbg(di->dev, "omap_get_clk_load: 0x%x\n", di->tick_rate);
ret = omap_dm_timer_set_int_enable(wdt_timer_ptr,
OMAP_TIMER_INT_MATCH | OMAP_TIMER_INT_OVERFLOW);
if (ret)
dev_err(di->dev, "omap_dm_timer_set_int_enable failed: %d\n", ret);
omap_dm_timer_disable(wdt_timer_ptr);
if (gpio_is_valid(di->gpio_psel)) {
if (gpio_request(di->gpio_psel, "gpio_psel")) {
dev_err(di->dev, "gpio_psel pin not available\n");
ret = di->gpio_psel = -ENODEV;
goto err_kfree;
} else
gpio_direction_output(di->gpio_psel, 1);
}
if (gpio_is_valid(di->gpio_int)) {
if (gpio_request(di->gpio_int, "gpio_int")) {
dev_err(di->dev, "gpio_int pin not available\n");
ret = di->gpio_int = -ENODEV;
goto err_kfree;
} else
gpio_direction_input(di->gpio_int);
}
if (gpio_is_valid(di->gpio_ce)) {
if (gpio_request(di->gpio_ce, "gpio_ce")) {
dev_err(di->dev, "gpio_ce pin not available\n");
ret = di->gpio_ce = -ENODEV;
goto err_kfree;
} else
gpio_direction_output(di->gpio_ce, 1);
}
ret = request_irq(OMAP_GPIO_IRQ(di->gpio_int),
bq24196_interrupt,
IRQF_TRIGGER_FALLING,
"chg_nint",
di->dev);
if (ret < 0) {
dev_err(di->dev, "%s: cannot regitser IRQ %d, ret=%d\n", __func__,
di->gpio_int, ret);
goto err_kfree;
}
di->nb.notifier_call = bq2419x_charger_event;
di->active = 0;
di->params.enable = 1;
di->cfg_params = 1;
di->enable_iterm = 1;
di->usb.name = "bq2419x-usb";
di->usb.type = POWER_SUPPLY_TYPE_USB;
di->usb.properties = bq2419x_usb_props;
di->usb.num_properties = ARRAY_SIZE(bq2419x_usb_props);
di->usb.get_property = bq2419x_usb_get_property;
di->usb.external_power_changed = NULL;
di->wall.name = "bq2419x-wall";
di->wall.type = POWER_SUPPLY_TYPE_MAINS;
di->wall.properties = bq2419x_wall_props;
di->wall.num_properties = ARRAY_SIZE(bq2419x_wall_props);
di->wall.get_property = bq2419x_wall_get_property;
di->wall.external_power_changed = NULL;
ret = power_supply_register(di->dev, &di->usb);
if (ret) {
dev_err(di->dev, "failed to register usb: %d\n", ret);
goto err_pwrsys;
}
ret = power_supply_register(di->dev, &di->wall);
if (ret) {
dev_err(di->dev, "failed to register wall: %d\n", ret);
goto err_pwrsys;
}
#ifdef DEBUG_BQ2419X_REG
ret = sysfs_create_group(&di->dev->kobj, &bq2419x_attr_group);
if (ret) {
dev_dbg(di->dev, "could not create sysfs files\n");
goto err_sysfs;
}
#endif
ret = sysfs_create_group(&di->dev->kobj, &bq2419x_mfg_attr_group);
if (ret) {
dev_dbg(di->dev, "could not create mfg sysfs files\n");
goto err_sysfs;
}
INIT_WORK(&di->ework, bq2419x_event_work_func);
/* only reset charger if battery is present to allow boot without battery */
if (bq27x00_is_battery_present()) {
dev_dbg(di->dev, "resting registers\n");
bqRstREG(di);
}
di->otg = usb_get_phy(USB_PHY_TYPE_USB2);
if (di->otg) {
ret = usb_register_notifier(di->otg, &di->nb);
if (ret) {
dev_err(di->dev, "otg register notifier"
" failed %d\n", ret);
goto err_otg;
}
di->event = twl6030_usbotg_get_status();
if (di->event)
schedule_work(&di->ework);
} else {
dev_err(di->dev, "otg_get_transceiver failed %d\n", ret);
goto err_otg;
}
return 0;
err_otg:
#ifdef DEBUG_BQ2419X_REG
sysfs_remove_group(&di->dev->kobj, &bq2419x_attr_group);
#endif /* DEBUG_BQ2419X_REG */
sysfs_remove_group(&di->dev->kobj, &bq2419x_mfg_attr_group);
cancel_work_sync(&di->ework);
err_sysfs:
free_irq(di->gpio_int, NULL);
cancel_work_sync(&di->iwork);
err_pwrsys:
err_kfree:
kfree(di);
return ret;
}
static int __devexit bq2419x_charger_remove(struct i2c_client *client)
{
struct bq2419x_device_info *di = i2c_get_clientdata(client);
bqRstREG(di);
free_irq(OMAP_GPIO_IRQ(di->gpio_int),di->dev);
cancel_work_sync(&di->ework);
cancel_work_sync(&di->iwork);
#ifdef DEBUG_BQ2419X_REG
sysfs_remove_group(&client->dev.kobj, &bq2419x_attr_group);
#endif /* DEBUG_BQ2419X_REG */
sysfs_remove_group(&client->dev.kobj, &bq2419x_mfg_attr_group);
flush_scheduled_work();
usb_unregister_notifier(di->otg, &di->nb);
kfree(di);
return 0;
}
static void bq2419x_charger_shutdown(struct i2c_client *client)
{
struct bq2419x_device_info * const di = i2c_get_clientdata(client);
free_irq(OMAP_GPIO_IRQ(di->gpio_int),di->dev);
usb_unregister_notifier(di->otg, &di->nb);
stopdmtimershutdown = true;
omap_dm_timer_stop(wdt_timer_ptr);
omap_dm_timer_disable(wdt_timer_ptr);
omap_dm_timer_free(wdt_timer_ptr);
cancel_work_sync(&di->ework);
cancel_work_sync(&di->iwork);
cancel_work_sync(&di->wdt_work);
bqRstREG(di);
dev_dbg(di->dev, "%s \n", __func__);
}
static int bq2419x_charger_suspend(struct device *dev)
{
return 0;
}
static int bq2419x_charger_resume(struct device *dev)
{
struct platform_device * const pdev = to_platform_device(dev);
struct bq2419x_device_info * const di = platform_get_drvdata(pdev);
cancel_work_sync(&di->iwork);
schedule_work(&di->iwork);
return 0;
}
static const struct i2c_device_id bq2419x_id[] = {
{ "bq24196", 0 },
{},
};
static const struct dev_pm_ops pm_ops = {
.suspend = bq2419x_charger_suspend,
.resume = bq2419x_charger_resume,
};
static struct i2c_driver bq2419x_charger_driver = {
.probe = bq2419x_charger_probe,
.remove = __devexit_p(bq2419x_charger_remove),
.shutdown = bq2419x_charger_shutdown,
.id_table = bq2419x_id,
.driver = {
.name = "bq24196",
.pm = &pm_ops,
},
};
static int __init bq2419x_charger_init(void)
{
pr_debug("bq2419x driver init\n");
return i2c_add_driver(&bq2419x_charger_driver);
}
module_init(bq2419x_charger_init);
static void __exit bq2419x_charger_exit(void)
{
pr_debug("bq2419x driver exit\n");
i2c_del_driver(&bq2419x_charger_driver);
}
module_exit(bq2419x_charger_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments Inc");
| gpl-2.0 |
barche/k3d | tests/sdk/xml_xpath.cpp | 3438 | // K-3D
// Copyright (c) 1995-2009, Timothy M. Shead
//
// Contact: [email protected]
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public
// License along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#include <k3dsdk/log_control.h>
#include <k3dsdk/xml.h>
#include <k3dsdk/xpath.h>
using namespace k3d::xml;
#include <iostream>
#include <stdexcept>
#include <sstream>
#define test_expression(expression) \
{ \
if(!(expression)) \
{ \
std::ostringstream buffer; \
buffer << "Expression failed at line " << __LINE__ << ": " << #expression; \
throw std::runtime_error(buffer.str()); \
} \
}
int main(int argc, char* argv[])
{
k3d::log_color_level(true);
k3d::log_show_level(true);
k3d::log_minimum_level(k3d::K3D_LOG_LEVEL_DEBUG);
try
{
element document("k3d",
element("nodes",
element("node",
attribute("class", "foo")
),
element("node",
attribute("factory", "bar"),
element("properties",
element("property",
attribute("user_property", ""),
attribute("type", "double")
)
)
)
),
element("dependencies"
)
);
xpath::result_set results;
results = xpath::match(document, "");
test_expression(results.size() == 0);
results = xpath::match(document, "/");
test_expression(results.size() == 0);
results = xpath::match(document, "/foo");
test_expression(results.size() == 0);
results = xpath::match(document, "/k3d");
test_expression(results.size() == 1);
test_expression(results[0]->name == "k3d");
results = xpath::match(document, "/k3d/*");
test_expression(results.size() == 2);
test_expression(results[0]->name == "nodes");
test_expression(results[1]->name == "dependencies");
results = xpath::match(document, "/k3d/nodes");
test_expression(results.size() == 1);
test_expression(results[0]->name == "nodes");
results = xpath::match(document, "/k3d/nodes/node");
test_expression(results.size() == 2);
test_expression(results[0]->name == "node");
test_expression(results[1]->name == "node");
results = xpath::match(document, "/k3d/nodes/node[@class]");
test_expression(results.size() == 1);
test_expression(find_attribute(*results[0], "class"));
test_expression(!find_attribute(*results[0], "factory"));
results = xpath::match(document, "/k3d/nodes/node/properties/property[@user_property][@type='double']");
test_expression(results.size() == 1);
results = xpath::match(document, "nodes");
test_expression(results.size() == 1);
test_expression(results[0]->name == "nodes");
results = xpath::match(document, "nodes/node");
test_expression(results.size() == 2);
test_expression(results[0]->name == "node");
test_expression(results[1]->name == "node");
}
catch(std::exception& e)
{
std::cerr << e.what() << std::endl;
return 1;
}
return 0;
}
| gpl-2.0 |
ntj/ComplexRapidMiner | src/com/rapidminer/operator/performance/PredictionAverage.java | 3662 | /*
* RapidMiner
*
* Copyright (C) 2001-2008 by Rapid-I and the contributors
*
* Complete list of developers available at our web site:
*
* http://rapid-i.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/.
*/
package com.rapidminer.operator.performance;
import com.rapidminer.example.Attribute;
import com.rapidminer.example.Example;
import com.rapidminer.example.ExampleSet;
import com.rapidminer.operator.OperatorException;
import com.rapidminer.tools.math.Averagable;
/**
* Returns the average value of the prediction. This criterion can be used to
* detect whether a learning scheme predicts nonsense, e.g. always make the same
* error. This criterion is not suitable for evaluating the performance and
* should never be used as main criterion. The {@link #getFitness()} method
* always returns 0.
*
* @author Ingo Mierswa
* @version $Id: PredictionAverage.java,v 2.18 2006/03/21 15:35:51 ingomierswa
* Exp $
*/
public class PredictionAverage extends MeasuredPerformance {
private static final long serialVersionUID = -5316112625406102611L;
private double sum;
private double squaredSum;
private double count;
private Attribute labelAttribute;
private Attribute weightAttribute;
public PredictionAverage() {
}
public PredictionAverage(PredictionAverage pa) {
super(pa);
this.sum = pa.sum;
this.squaredSum = pa.squaredSum;
this.count = pa.count;
this.labelAttribute = (Attribute)pa.labelAttribute.clone();
if (pa.weightAttribute != null)
this.weightAttribute = (Attribute)pa.weightAttribute.clone();
}
public double getExampleCount() {
return count;
}
public void countExample(Example example) {
double weight = 1.0d;
if (weightAttribute != null)
weight = example.getValue(weightAttribute);
count += weight;
double v = example.getLabel();
if (!Double.isNaN(v)) {
sum += v * weight;
squaredSum += v * v * weight * weight;
}
}
public double getMikroAverage() {
return sum / count;
}
public double getMikroVariance() {
double avg = getMikroAverage();
return (squaredSum / count) - avg * avg;
}
public void startCounting(ExampleSet set, boolean useExampleWeights) throws OperatorException {
super.startCounting(set, useExampleWeights);
count = 0;
sum = 0.0;
this.labelAttribute = set.getAttributes().getLabel();
if (useExampleWeights)
this.weightAttribute = set.getAttributes().getWeight();
}
public String getName() {
return "prediction_average";
}
/** Returns 0. */
public double getFitness() {
return 0.0;
}
public void buildSingleAverage(Averagable performance) {
PredictionAverage other = (PredictionAverage) performance;
this.sum += other.sum;
this.squaredSum += other.squaredSum;
this.count += other.count;
}
public String getDescription() {
return "This is not a real performance measure, but merely the average of the predicted labels.";
}
}
| gpl-2.0 |
TanjinHe/Mesmer4.0 | qd/include/qd/c_dd.h | 3057 | /*
* include/c_dd.h
*
* This work was supported by the Director, Office of Science, Division
* of Mathematical, Information, and Computational Sciences of the
* U.S. Department of Energy under contract number DE-AC03-76SF00098.
*
* Copyright (c) 2000-2001
*
* Contains C wrapper function prototypes for double-double precision
* arithmetic. This can also be used from fortran code.
*/
#ifndef _QD_C_DD_H
#define _QD_C_DD_H
#include <qd/qd_config.h>
#ifdef __cplusplus
extern "C" {
#endif
/* add */
void c_dd_add(const double *a, const double *b, double *c);
void c_dd_add_d_dd(double a, const double *b, double *c);
void c_dd_add_dd_d(const double *a, double b, double *c);
/* sub */
void c_dd_sub(const double *a, const double *b, double *c);
void c_dd_sub_d_dd(double a, const double *b, double *c);
void c_dd_sub_dd_d(const double *a, double b, double *c);
/* mul */
void c_dd_mul(const double *a, const double *b, double *c);
void c_dd_mul_d_dd(double a, const double *b, double *c);
void c_dd_mul_dd_d(const double *a, double b, double *c);
/* div */
void c_dd_div(const double *a, const double *b, double *c);
void c_dd_div_d_dd(double a, const double *b, double *c);
void c_dd_div_dd_d(const double *a, double b, double *c);
/* copy */
void c_dd_copy(const double *a, double *b);
void c_dd_copy_d(double a, double *b);
void c_dd_sqrt(const double *a, double *b);
void c_dd_sqr(const double *a, double *b);
void c_dd_abs(const double *a, double *b);
void c_dd_npwr(const double *a, int b, double *c);
void c_dd_nroot(const double *a, int b, double *c);
void c_dd_nint(const double *a, double *b);
void c_dd_aint(const double *a, double *b);
void c_dd_floor(const double *a, double *b);
void c_dd_ceil(const double *a, double *b);
void c_dd_exp(const double *a, double *b);
void c_dd_log(const double *a, double *b);
void c_dd_log10(const double *a, double *b);
void c_dd_sin(const double *a, double *b);
void c_dd_cos(const double *a, double *b);
void c_dd_tan(const double *a, double *b);
void c_dd_asin(const double *a, double *b);
void c_dd_acos(const double *a, double *b);
void c_dd_atan(const double *a, double *b);
void c_dd_atan2(const double *a, const double *b, double *c);
void c_dd_sinh(const double *a, double *b);
void c_dd_cosh(const double *a, double *b);
void c_dd_tanh(const double *a, double *b);
void c_dd_asinh(const double *a, double *b);
void c_dd_acosh(const double *a, double *b);
void c_dd_atanh(const double *a, double *b);
void c_dd_sincos(const double *a, double *s, double *c);
void c_dd_sincosh(const double *a, double *s, double *c);
void c_dd_read(const char *s, double *a);
void c_dd_swrite(const double *a, char *s);
void c_dd_write(const double *a);
void c_dd_neg(const double *a, double *b);
void c_dd_rand(double *a);
void c_dd_comp(const double *a, const double *b, int *result);
void c_dd_comp_dd_d(const double *a, double b, int *result);
void c_dd_comp_d_dd(double a, const double *b, int *result);
void c_dd_pi(double *a);
#ifdef __cplusplus
}
#endif
#endif /* _QD_C_DD_H */
| gpl-2.0 |
site4com/j-gov-3-it | layouts/joomla/content/blog_style_default_item_title.php | 1670 | <?php
/**
* @package Joomla.Site
* @subpackage Layout
*
* @copyright Copyright (C) 2005 - 2014 Open Source Matters, Inc. All rights reserved.
* @license GNU General Public License version 2 or later; see LICENSE.txt
*/
defined('_JEXEC') or die;
// Create a shortcut for params.
$params = $displayData->params;
$canEdit = $displayData->params->get('access-edit');
JHtml::addIncludePath(JPATH_COMPONENT.'/helpers/html');
?>
<?php if ($params->get('show_title') || $displayData->state == 0 || ($params->get('show_author') && !empty($displayData->author ))) : ?>
<div class="page-header">
<?php if ($params->get('show_title')) : ?>
<h2 >
<?php if ($params->get('link_titles') && $params->get('access-view')) : ?>
<a href="<?php echo JRoute::_(ContentHelperRoute::getArticleRoute($displayData->slug, $displayData->catid)); ?>" >
<?php echo $this->escape($displayData->title); ?></a>
<?php else : ?>
<?php echo $this->escape($displayData->title); ?>
<?php endif; ?>
</h2>
<?php endif; ?>
<?php if ($displayData->state == 0) : ?>
<span class="label label-warning"><?php echo JText::_('JUNPUBLISHED'); ?></span>
<?php endif; ?>
<?php if (strtotime($displayData->publish_up) > strtotime(JFactory::getDate())) : ?>
<span class="label label-warning"><?php echo JText::_('JNOTPUBLISHEDYET'); ?></span>
<?php endif; ?>
<?php if ((strtotime($displayData->publish_down) < strtotime(JFactory::getDate())) && $displayData->publish_down != '0000-00-00 00:00:00') : ?>
<span class="label label-warning"><?php echo JText::_('JEXPIRED'); ?></span>
<?php endif; ?>
</div>
<?php endif; ?>
| gpl-2.0 |
elbruno/Blog | 20170807 Holo MRDesignLab Move Resize Holograms/Assets/HUX/Editor/Utility/HUXEditorUtils.cs | 29118 | //
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE in the project root for license information.
//
using HUX.Buttons;
using System;
using System.Collections.Generic;
using System.Reflection;
using UnityEditor;
using UnityEngine;
namespace HUX
{
public class HUXEditorUtils
{
public readonly static Color DefaultColor = new Color(1f, 1f, 1f);
public readonly static Color DisabledColor = new Color(0.6f, 0.6f, 0.6f);
public readonly static Color BorderedColor = new Color(0.8f, 0.8f, 0.8f);
public readonly static Color WarningColor = new Color(1f, 0.85f, 0.6f);
public readonly static Color ErrorColor = new Color(1f, 0.55f, 0.5f);
public readonly static Color SuccessColor = new Color(0.8f, 1f, 0.75f);
public readonly static Color ObjectColor = new Color(0.85f, 0.9f, 1f);
public readonly static Color HelpBoxColor = new Color(0.22f, 0.23f, 0.24f, 0.45f);
public readonly static Color SectionColor = new Color(0.42f, 0.43f, 0.47f, 0.25f);
public readonly static Color DarkColor = new Color(0.1f, 0.1f, 0.1f);
public readonly static Color ObjectColorEmpty = new Color(0.75f, 0.8f, 0.9f);
/// <summary>
/// Draws a field for scriptable object profiles
/// If base class T is abstract, includes a button for creating a profile of each type that inherits from base class T
/// Otherwise just includes button for creating a profile of type T
/// </summary>
/// <typeparam name="T"></typeparam>
/// <param name="profile"></param>
/// <returns></returns>
public static T DrawProfileField<T>(T profile) where T : ButtonProfile
{
Color prevColor = GUI.color;
GUI.color = Color.Lerp(Color.white, Color.gray, 0.5f);
EditorGUILayout.BeginVertical(EditorStyles.helpBox);
GUI.color = Color.Lerp(Color.white, Color.gray, 0.25f);
EditorGUILayout.LabelField("Select a " + typeof(T).Name + " or create a new profile", EditorStyles.miniBoldLabel);
T newProfile = profile;
EditorGUILayout.BeginHorizontal();
newProfile = (T)EditorGUILayout.ObjectField(profile, typeof(T), false);
// is this an abstract class?
if (typeof(T).IsAbstract)
{
EditorGUILayout.BeginVertical();
List<Type> types = GetDerivedTypes(typeof(T), Assembly.GetAssembly(typeof(T)));
foreach (Type profileType in types)
{
if (GUILayout.Button("Create " + profileType.Name))
{
profile = CreateProfile<T>(profileType);
}
}
EditorGUILayout.EndVertical();
}
else
{
if (GUILayout.Button("Create Profile"))
{
profile = CreateProfile<T>();
}
}
EditorGUILayout.EndHorizontal();
EditorGUILayout.EndVertical();
if (profile == null)
{
ErrorMessage("You must choose a button profile.", null);
}
GUI.color = prevColor;
return newProfile;
}
public static T CreateProfile<T>(Type profileType) where T : ButtonProfile
{
T asset = (T)ScriptableObject.CreateInstance(profileType);
if (asset != null)
{
AssetDatabase.CreateAsset(asset, "Assets/New" + profileType.Name + ".asset");
AssetDatabase.SaveAssets();
}
else
{
Debug.LogError("Couldn't create profile of type " + profileType.Name);
}
return asset;
}
public static T CreateProfile<T>() where T : ButtonProfile
{
T asset = ScriptableObject.CreateInstance<T>();
AssetDatabase.CreateAsset(asset, "Assets/New" + typeof(T).Name + ".asset");
AssetDatabase.SaveAssets();
return asset;
}
public static void DrawFilterTagField(SerializedObject serializedObject, string propertyName)
{
SerializedProperty p = serializedObject.FindProperty(propertyName);
EditorGUI.BeginChangeCheck();
EditorGUILayout.PropertyField(p);
if (EditorGUI.EndChangeCheck())
serializedObject.ApplyModifiedProperties();
}
public static void DrawProfileInspector(ButtonProfile profile, Component targetComponent)
{
ProfileInspector profileEditor = (ProfileInspector)Editor.CreateEditor(profile);
profileEditor.targetComponent = targetComponent;
profileEditor.OnInspectorGUI();
}
public static T DropDownComponentField<T>(string label, T obj, Transform transform, bool showComponentName = false) where T : UnityEngine.Component
{
T[] optionObjects = transform.GetComponentsInChildren<T>(true);
int selectedIndex = 0;
string[] options = new string[optionObjects.Length + 1];
options[0] = "(None)";
for (int i = 0; i < optionObjects.Length; i++)
{
if (showComponentName)
{
options[i + 1] = optionObjects[i].GetType().Name + " (" + optionObjects[i].name + ")";
}
else
{
options[i + 1] = optionObjects[i].name;
}
if (obj == optionObjects[i])
{
selectedIndex = i + 1;
}
}
EditorGUILayout.BeginHorizontal();
int newIndex = EditorGUILayout.Popup(label, selectedIndex, options);
if (newIndex == 0)
{
// Zero means '(None)'
obj = null;
}
else
{
obj = optionObjects[newIndex - 1];
}
//draw the object field so people can click it
obj = (T)EditorGUILayout.ObjectField(obj, typeof(T), true);
EditorGUILayout.EndHorizontal();
return obj;
}
/// <summary>
/// Draws enum values as a set of toggle fields
/// </summary>
/// <typeparam name="T"></typeparam>
/// <param name="label"></param>
/// <param name="enumObj"></param>
/// <returns></returns>
public static int EnumCheckboxField<T>(string label, T enumObj) where T : struct, IConvertible
{
if (!typeof(T).IsEnum)
{
throw new ArgumentException("T must be an enum.");
}
return EnumCheckboxField<T>(label, enumObj, string.Empty, (T)Activator.CreateInstance(typeof(T)));
}
public static T SceneObjectField<T>(string label, T sceneObject) where T : Component {
EditorGUILayout.BeginHorizontal();
if (string.IsNullOrEmpty(label)) {
sceneObject = (T)EditorGUILayout.ObjectField(sceneObject, typeof(T), true);
} else {
sceneObject = (T)EditorGUILayout.ObjectField(label, sceneObject, typeof(T), true);
}
if (sceneObject != null && sceneObject.gameObject.scene.name == null) {
// Don't allow objects that aren't in the scene!
sceneObject = null;
}
T[] objectsInScene = GameObject.FindObjectsOfType<T>();
int selectedIndex = 0;
string[] displayedOptions = new string[objectsInScene.Length + 1];
displayedOptions[0] = "(None)";
for (int i = 0; i < objectsInScene.Length; i++) {
displayedOptions[i + 1] = objectsInScene[i].name;
if (objectsInScene[i] == sceneObject) {
selectedIndex = i + 1;
}
}
selectedIndex = EditorGUILayout.Popup(selectedIndex, displayedOptions);
if (selectedIndex == 0) {
sceneObject = null;
} else {
sceneObject = objectsInScene[selectedIndex - 1];
}
EditorGUILayout.EndHorizontal();
return sceneObject;
}
/// <summary>
/// Draws enum values as a set of toggle fields
/// Also draws a button the user can click to set to a 'default' value
/// </summary>
/// <typeparam name="T"></typeparam>
/// <param name="label"></param>
/// <param name="enumObj"></param>
/// <param name="defaultName"></param>
/// <param name="defaultVal"></param>
/// <returns></returns>
public static int EnumCheckboxField<T>(string label, T enumObj, string defaultName, T defaultVal, bool ignoreNone = true, bool ignoreAll = true) where T : struct, IConvertible
{
if (!typeof(T).IsEnum)
{
throw new ArgumentException("T must be an enum.");
}
// Convert enum value to an int64 so we can treat it as a flag set
int enumFlags = Convert.ToInt32(enumObj);
EditorGUILayout.BeginVertical(EditorStyles.helpBox);
if (!string.IsNullOrEmpty(label)) {
EditorGUILayout.LabelField(label, EditorStyles.miniLabel);
DrawDivider();
}
System.Array enumVals = Enum.GetValues(typeof(T));
int lastvalue = Convert.ToInt32((T)enumVals.GetValue(enumVals.GetLength(0) - 1));
foreach (T enumVal in enumVals)
{
int flagVal = Convert.ToInt32(enumVal);
if (ignoreNone && flagVal == 0 && enumVal.ToString().ToLower() == "none")
{
continue;
}
if (ignoreAll && flagVal == lastvalue && enumVal.ToString().ToLower() == "all")
{
continue;
}
bool selected = (flagVal & enumFlags) != 0;
selected = EditorGUILayout.Toggle(enumVal.ToString(), selected);
// If it's selected add it to the enumObj, otherwise remove it
if (selected)
{
enumFlags |= flagVal;
}
else
{
enumFlags &= ~flagVal;
}
}
if (!string.IsNullOrEmpty(defaultName))
{
if (GUILayout.Button(defaultName, EditorStyles.miniButton))
{
enumFlags = Convert.ToInt32(defaultVal);
}
}
EditorGUILayout.EndVertical();
return enumFlags;
}
public static int EnumCheckboxField<T>(string label, T enumObj, string defaultName, T defaultVal, T valOnZero, bool ignoreNone = true, bool ignoreAll = true) where T : struct, IConvertible
{
if (!typeof(T).IsEnum)
{
throw new ArgumentException("T must be an enum.");
}
// Convert enum value to an int64 so we can treat it as a flag set
int enumFlags = Convert.ToInt32(enumObj);
EditorGUILayout.BeginVertical(EditorStyles.helpBox);
EditorGUILayout.LabelField(label, EditorStyles.miniLabel);
DrawDivider();
System.Array enumVals = Enum.GetValues(typeof(T));
int lastvalue = Convert.ToInt32((T)enumVals.GetValue(enumVals.GetLength(0) - 1));
foreach (T enumVal in enumVals)
{
int flagVal = Convert.ToInt32(enumVal);
if (ignoreNone && flagVal == 0 && enumVal.ToString().ToLower() == "none")
{
continue;
}
if (ignoreAll && flagVal == lastvalue && enumVal.ToString().ToLower() == "all")
{
continue;
}
bool selected = (flagVal & enumFlags) != 0;
selected = EditorGUILayout.Toggle(enumVal.ToString(), selected);
// If it's selected add it to the enumObj, otherwise remove it
if (selected)
{
enumFlags |= flagVal;
}
else
{
enumFlags &= ~flagVal;
}
}
if (!string.IsNullOrEmpty(defaultName))
{
if (GUILayout.Button(defaultName, EditorStyles.miniButton))
{
enumFlags = Convert.ToInt32(defaultVal);
}
}
EditorGUILayout.EndVertical();
if (enumFlags == 0)
{
enumFlags = Convert.ToInt32(valOnZero);
}
return enumFlags;
}
public static string MaterialPropertyName(string property, Material mat, ShaderUtil.ShaderPropertyType type, bool allowNone = true, string defaultProperty = "_Color", string labelName = null)
{
Color tColor = GUI.color;
// Create a list of available color and value properties
List<string> props = new List<string>();
int selectedPropIndex = 0;
if (allowNone) {
props.Add("(None)");
}
if (mat != null)
{
int propertyCount = ShaderUtil.GetPropertyCount(mat.shader);
string propName = string.Empty;
for (int i = 0; i < propertyCount; i++)
{
if (ShaderUtil.GetPropertyType(mat.shader, i) == type)
{
propName = ShaderUtil.GetPropertyName(mat.shader, i);
if (propName == property)
{
// We've found our current property
selectedPropIndex = props.Count;
}
props.Add(propName);
}
}
GUI.color = string.IsNullOrEmpty(property) ? HUXEditorUtils.DisabledColor : HUXEditorUtils.DefaultColor;
if (string.IsNullOrEmpty (labelName))
{
labelName = type.ToString();
}
int newPropIndex = EditorGUILayout.Popup(labelName, selectedPropIndex, props.ToArray());
if (allowNone) {
property = (newPropIndex > 0 ? props[newPropIndex] : string.Empty);
} else {
if (props.Count > 0) {
property = props[newPropIndex];
} else {
property = defaultProperty;
}
}
GUI.color = HUXEditorUtils.DefaultColor;
return property;
}
else
{
WarningMessage("Can't get material " + type.ToString() + " properties because material is null.");
GUI.color = HUXEditorUtils.DefaultColor;
return string.Empty;
}
}
public static void Header (string header) {
GUIStyle headerStyle = new GUIStyle(EditorStyles.boldLabel);
headerStyle.fontSize = 18;
EditorGUILayout.LabelField(header, headerStyle, GUILayout.MinHeight(24));
}
public static void WarningMessage(string warning, string buttonMessage = null, Action buttonAction = null)
{
Color tColor = GUI.color;
HUXEditorUtils.BeginSectionBox("Warning", HUXEditorUtils.WarningColor);
EditorGUILayout.LabelField(warning, EditorStyles.wordWrappedLabel);
if (!string.IsNullOrEmpty(buttonMessage) && buttonAction != null)
{
if (GUILayout.Button(buttonMessage))
{
buttonAction.Invoke();
}
}
HUXEditorUtils.EndSectionBox();
GUI.color = tColor;
}
public static void ErrorMessage(string error, Action action = null, string fixMessage = null)
{
Color tColor = GUI.color;
HUXEditorUtils.BeginSectionBox("Error", HUXEditorUtils.ErrorColor);
EditorGUILayout.LabelField(error, EditorStyles.wordWrappedLabel);
if (action != null && GUILayout.Button((fixMessage != null) ? fixMessage : "Fix now"))
{
action.Invoke();
}
HUXEditorUtils.EndSectionBox();
GUI.color = tColor;
}
public static void BeginProfileBox()
{
GUI.color = HUXEditorUtils.WarningColor;
EditorGUILayout.BeginVertical(EditorStyles.helpBox);
DrawSubtleMiniLabel("Profile" + ":");
DrawSubtleMiniLabel("(Warning: this section edits the button profile. These changes will affect all buttons that use this profile.)");
}
public static void EndProfileBox()
{
EndSectionBox();
}
public static void BeginSectionBox(string label)
{
GUI.color = DefaultColor;
/*GUIStyle boxStyle = new GUIStyle(EditorStyles.helpBox);
boxStyle.normal.background = SectionBackground;*/
EditorGUILayout.BeginVertical(EditorStyles.helpBox);
DrawSubtleMiniLabel(label + ":");
}
public static void HelpBox(bool show, string text) {
if (show) {
GUI.color = ObjectColor;
GUIStyle helpBoxStyle = new GUIStyle(EditorStyles.helpBox);
helpBoxStyle.wordWrap = true;
helpBoxStyle.fontSize = 9;
helpBoxStyle.normal.background = HelpBoxBackground;
EditorGUILayout.LabelField(text, helpBoxStyle);
}
GUI.color = DefaultColor;
}
public static bool BeginSectionBox(string label, ref bool foldout) {
GUI.color = DefaultColor;
/*GUIStyle boxStyle = new GUIStyle(EditorStyles.helpBox);
boxStyle.normal.background = SectionBackground;*/
EditorGUILayout.BeginVertical(EditorStyles.helpBox);
GUI.color = Color.Lerp(DefaultColor, Color.grey, 0.5f); ;
//GUI.contentColor = DarkColor;
GUIStyle foldoutStyle = new GUIStyle(EditorStyles.foldout);
foldoutStyle.fontStyle = FontStyle.Normal;
foldoutStyle.fontSize = 9;
foldoutStyle.fontStyle = FontStyle.Normal;
foldout = EditorGUILayout.Foldout(foldout, label + (foldout ? ":" : ""), true, foldoutStyle);
GUI.color = DefaultColor;
//GUI.contentColor = Color.white;
return foldout;
}
public static void BeginSectionBox(string label, Color color)
{
GUI.color = color;
/*GUIStyle boxStyle = new GUIStyle(EditorStyles.helpBox);
boxStyle.normal.background = SectionBackground;*/
EditorGUILayout.BeginVertical(EditorStyles.helpBox);
/*GUIStyle foldoutStyle = new GUIStyle(EditorStyles.wordWrappedLabel);
foldoutStyle.fontStyle = FontStyle.Normal;
foldoutStyle.fontSize = 12;
foldoutStyle.fontStyle = FontStyle.Bold;
EditorGUILayout.LabelField(label + ":", foldoutStyle);*/
DrawSubtleMiniLabel(label + ":");
}
public static void EndSectionBox()
{
EditorGUILayout.EndVertical();
}
public static void BeginSubSectionBox(string label, Color sectionColor)
{
GUI.color = sectionColor;
GUIStyle boxStyle = new GUIStyle(EditorStyles.helpBox);
boxStyle.normal.background = SectionBackground;
EditorGUILayout.BeginVertical(boxStyle);
EditorGUILayout.LabelField(label + ":", EditorStyles.boldLabel);
}
public static void BeginSubSectionBox(string label)
{
GUI.color = DefaultColor;
GUIStyle boxStyle = new GUIStyle(EditorStyles.helpBox);
boxStyle.normal.background = SectionBackground;
EditorGUILayout.BeginVertical(boxStyle);
EditorGUILayout.LabelField(label + ":", EditorStyles.boldLabel);
}
public static void EndSubSectionBox()
{
EditorGUILayout.EndVertical();
}
public static void DrawSubtleMiniLabel(string label)
{
Color tColor = GUI.color;
GUI.color = Color.Lerp(tColor, Color.grey, 0.5f);
EditorGUILayout.LabelField(label, EditorStyles.wordWrappedMiniLabel);
GUI.color = tColor;
}
public static void DrawDivider()
{
GUIStyle styleHR = new GUIStyle(GUI.skin.box);
styleHR.stretchWidth = true;
styleHR.fixedHeight = 2;
GUILayout.Box("", styleHR);
}
public static void SaveChanges(UnityEngine.Object target)
{
if (Application.isPlaying)
return;
if (GUI.changed)
{
EditorUtility.SetDirty(target);
UnityEditor.SceneManagement.EditorSceneManager.MarkSceneDirty(UnityEngine.SceneManagement.SceneManager.GetActiveScene());
}
}
public static void SaveChanges(UnityEngine.Object target1, UnityEngine.Object target2)
{
if (Application.isPlaying)
return;
if (GUI.changed)
{
EditorUtility.SetDirty(target1);
EditorUtility.SetDirty(target2);
UnityEditor.SceneManagement.EditorSceneManager.MarkSceneDirty(UnityEngine.SceneManagement.SceneManager.GetActiveScene());
}
}
public static string[] getMethodOptions(GameObject comp, List<System.Type> ignoreTypes = null)
{
List<string> methods = new List<string>();
if (comp != null)
{
Component[] allComponents = comp.GetComponents<Component>();
List<System.Type> doneTypes = new List<System.Type>();
for (int index = 0; index < allComponents.Length; index++)
{
System.Type compType = allComponents[index].GetType();
if (!doneTypes.Contains(compType) && (ignoreTypes == null || !ignoreTypes.Contains(compType)))
{
MethodInfo[] allMemebers = compType.GetMethods();
for (int memberIndex = 0; memberIndex < allMemebers.Length; memberIndex++)
{
if (allMemebers[memberIndex].IsPublic
&& allMemebers[memberIndex].GetParameters().Length == 0
&& !methods.Contains(allMemebers[memberIndex].Name)
&& allMemebers[memberIndex].ReturnType == typeof(void))
{
methods.Add(allMemebers[memberIndex].Name);
}
}
doneTypes.Add(compType);
}
}
}
return methods.ToArray();
}
/// <summary>
/// Adds a prefab to the scene.
/// </summary>
/// <param name="prefabPath"></param>
/// <param name="ignoreAlreadyInScene">If false the prefab will not be added if it exists in the hierarchy.</param>
/// <returns>A refernce to the newly created prefab instance or one that exists in the scene if ignoreAlreadyInScene is false.</returns>
public static GameObject AddToScene(string prefabPath, bool ignoreAlreadyInScene = true)
{
GameObject prefab = AssetDatabase.LoadAssetAtPath(prefabPath, typeof(GameObject)) as GameObject;
GameObject instance = null;
if (prefab != null)
{
instance = FindFirstPrefabInstance(prefab);
if (instance == null || ignoreAlreadyInScene)
{
instance = PrefabUtility.InstantiatePrefab(prefab) as GameObject;
}
else
{
Debug.LogWarning("Instance already exits in the scene: " + prefabPath);
}
}
else
{
Debug.LogError("Could not load prefab: " + prefabPath);
}
return instance;
}
/// <summary>
/// Finds the first instance of a preface in the Hierarchy.
/// </summary>
/// <param name="prefab"></param>
/// <returns>First instance of the prefab or null if one is not found.</returns>
public static GameObject FindFirstPrefabInstance(GameObject prefab)
{
GameObject result = null;
GameObject[] allObjects = (GameObject[])GameObject.FindObjectsOfType(typeof(GameObject));
foreach (GameObject obj in allObjects)
{
PrefabType type = PrefabUtility.GetPrefabType(obj);
if (type == PrefabType.PrefabInstance)
{
UnityEngine.Object GO_prefab = PrefabUtility.GetPrefabParent(obj);
if (prefab == GO_prefab)
{
result = obj;
break;
}
}
}
return result;
}
public static void CorrectAmbientLightingInScene()
{
RenderSettings.ambientMode = UnityEngine.Rendering.AmbientMode.Flat;
RenderSettings.ambientIntensity = 1.0f;
// Normalize and set ambient light to default.
Vector4 c = new Vector4(51.0f, 51.0f, 51.0f, 255.0f);
c.Normalize();
RenderSettings.ambientLight = new Color(c.x, c.y, c.z, c.w);
RenderSettings.reflectionBounces = 1;
RenderSettings.reflectionIntensity = 1.0f;
RenderSettings.skybox = null;
RenderSettings.fog = false;
}
private static List<Type> GetDerivedTypes(Type baseType, Assembly assembly)
{
Type[] types = assembly.GetTypes();
List<Type> derivedTypes = new List<Type>();
for (int i = 0, count = types.Length; i < count; i++)
{
Type type = types[i];
if (IsSubclassOf(type, baseType))
{
derivedTypes.Add(type);
}
}
return derivedTypes;
}
private static bool IsSubclassOf(Type type, Type baseType)
{
if (type == null || baseType == null || type == baseType)
return false;
if (baseType.IsGenericType == false)
{
if (type.IsGenericType == false)
return type.IsSubclassOf(baseType);
}
else
{
baseType = baseType.GetGenericTypeDefinition();
}
type = type.BaseType;
Type objectType = typeof(object);
while (type != objectType && type != null)
{
Type curentType = type.IsGenericType ?
type.GetGenericTypeDefinition() : type;
if (curentType == baseType)
return true;
type = type.BaseType;
}
return false;
}
private static Texture2D SectionBackground {
get {
if (sectionBackground == null) {
sectionBackground = new Texture2D(2, 2);
var pix = new Color[2 * 2];
for (int i = 0; i < pix.Length; i++) {
pix[i] = SectionColor;
}
sectionBackground.SetPixels(pix);
sectionBackground.Apply();
}
return sectionBackground;
}
}
private static Texture2D HelpBoxBackground {
get {
if (helpBoxBackground == null) {
helpBoxBackground = new Texture2D(2, 2);
var pix = new Color[2 * 2];
for (int i = 0; i < pix.Length; i++) {
pix[i] = HelpBoxColor;
}
helpBoxBackground.SetPixels(pix);
helpBoxBackground.Apply();
}
return helpBoxBackground;
}
}
private static Texture2D helpBoxBackground = null;
private static Texture2D sectionBackground = null;
}
} | gpl-2.0 |
loveyoupeng/rt | modules/web/src/main/native/Source/JavaScriptCore/bytecode/DFGExitProfile.cpp | 3542 | /*
* Copyright (C) 2011 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "DFGExitProfile.h"
#if ENABLE(DFG_JIT)
#include <wtf/PassOwnPtr.h>
namespace JSC { namespace DFG {
ExitProfile::ExitProfile() { }
ExitProfile::~ExitProfile() { }
bool ExitProfile::add(const ConcurrentJITLocker&, const FrequentExitSite& site)
{
ASSERT(site.jitType() != ExitFromAnything);
// If we've never seen any frequent exits then create the list and put this site
// into it.
if (!m_frequentExitSites) {
m_frequentExitSites = adoptPtr(new Vector<FrequentExitSite>());
m_frequentExitSites->append(site);
return true;
}
// Don't add it if it's already there. This is O(n), but that's OK, because we
// know that the total number of places where code exits tends to not be large,
// and this code is only used when recompilation is triggered.
for (unsigned i = 0; i < m_frequentExitSites->size(); ++i) {
if (m_frequentExitSites->at(i) == site)
return false;
}
m_frequentExitSites->append(site);
return true;
}
Vector<FrequentExitSite> ExitProfile::exitSitesFor(unsigned bytecodeIndex)
{
Vector<FrequentExitSite> result;
if (!m_frequentExitSites)
return result;
for (unsigned i = 0; i < m_frequentExitSites->size(); ++i) {
if (m_frequentExitSites->at(i).bytecodeOffset() == bytecodeIndex)
result.append(m_frequentExitSites->at(i));
}
return result;
}
bool ExitProfile::hasExitSite(const ConcurrentJITLocker&, const FrequentExitSite& site) const
{
if (!m_frequentExitSites)
return false;
for (unsigned i = m_frequentExitSites->size(); i--;) {
if (site.subsumes(m_frequentExitSites->at(i)))
return true;
}
return false;
}
QueryableExitProfile::QueryableExitProfile() { }
QueryableExitProfile::~QueryableExitProfile() { }
void QueryableExitProfile::initialize(const ConcurrentJITLocker&, const ExitProfile& profile)
{
if (!profile.m_frequentExitSites)
return;
for (unsigned i = 0; i < profile.m_frequentExitSites->size(); ++i)
m_frequentExitSites.add(profile.m_frequentExitSites->at(i));
}
} } // namespace JSC::DFG
#endif
| gpl-2.0 |
vvanherk/oscar_emr | src/main/java/oscar/eform/actions/DisplayImageAction.java | 9846 | /**
* Copyright (c) 2001-2002. Department of Family Medicine, McMaster University. All Rights Reserved.
* This software is published under the GPL GNU General Public License.
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* This software was written for the
* Department of Family Medicine
* McMaster University
* Hamilton
* Ontario, Canada
*/
package oscar.eform.actions;
import java.io.File;
import java.util.ArrayList;
import javax.activation.MimetypesFileTypeMap;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.struts.action.ActionForm;
import org.apache.struts.action.ActionMapping;
import org.apache.struts.actions.DownloadAction;
import org.oscarehr.util.MiscUtils;
import oscar.OscarProperties;
/**
* eform_image
* @author jay
*and Paul
*/
public class DisplayImageAction extends DownloadAction{
/** Creates a new instance of DisplayImageAction */
public DisplayImageAction() {
}
protected StreamInfo getStreamInfo(ActionMapping mapping,
ActionForm form,
HttpServletRequest request,
HttpServletResponse response)
throws Exception {
String fileName = request.getParameter("imagefile");
//if (fileName.indexOf('/') != -1) return null; //prevents navigating away from the page.
String home_dir = OscarProperties.getInstance().getProperty("eform_image");
response.setHeader("Content-disposition","inline; filename=" + fileName);
File file = null;
try{
File directory = new File(home_dir);
if(!directory.exists()){
throw new Exception("Directory: "+home_dir+ " does not exist");
}
file = new File(directory,fileName);
//String canonicalPath = file.getParentFile().getCanonicalPath(); //absolute path of the retrieved file
if (!directory.equals(file.getParentFile())) {
MiscUtils.getLogger().debug("SECURITY WARNING: Illegal file path detected, client attempted to navigate away from the file directory");
throw new Exception("Could not open file " + fileName + ". Check the file path");
}
}catch(Exception e){
MiscUtils.getLogger().error("Error", e);
throw new Exception("Could not open file "+home_dir+fileName +" does "+home_dir+ " exist ?",e);
}
//gets content type from image extension
String contentType = new MimetypesFileTypeMap().getContentType(file);
/**
* For encoding file types not included in the mimetypes file
* You need to look at mimetypes file to check if the file type you are using is included
*
*/
try{
if(extension(file.getName()).equalsIgnoreCase("png")){ // for PNG
contentType = "image/png";
}else if(extension(file.getName()).equalsIgnoreCase("jpeg")||
extension(file.getName()).equalsIgnoreCase("jpe")||
extension(file.getName()).equalsIgnoreCase("jpg")){ //for JPEG,JPG,JPE
contentType = "image/jpeg";
}else if(extension(file.getName()).equalsIgnoreCase("bmp")){ // for BMP
contentType = "image/bmp";
}else if(extension(file.getName()).equalsIgnoreCase("cod")){ // for COD
contentType = "image/cis-cod";
}else if(extension(file.getName()).equalsIgnoreCase("ief")){ // for IEF
contentType = "image/ief";
}else if(extension(file.getName()).equalsIgnoreCase("jfif")){ // for JFIF
contentType = "image/pipeg";
}else if(extension(file.getName()).equalsIgnoreCase("svg")){ // for SVG
contentType = "image/svg+xml";
}else if(extension(file.getName()).equalsIgnoreCase("tiff")||
extension(file.getName()).equalsIgnoreCase("tif")){ // for TIFF or TIF
contentType = "image/tiff";
}else if(extension(file.getName()).equalsIgnoreCase("pbm")){ // for PBM
contentType = "image/x-portable-bitmap";
}else if(extension(file.getName()).equalsIgnoreCase("pnm")){ // for PNM
contentType = "image/x-portable-anymap";
}else if(extension(file.getName()).equalsIgnoreCase("pgm")){ // for PGM
contentType = "image/x-portable-greymap";
}else if(extension(file.getName()).equalsIgnoreCase("ppm")){ // for PPM
contentType = "image/x-portable-pixmap";
}else if(extension(file.getName()).equalsIgnoreCase("xbm")){ // for XBM
contentType = "image/x-xbitmap";
}else if(extension(file.getName()).equalsIgnoreCase("xpm")){ // for XPM
contentType = "image/x-xpixmap";
}else if(extension(file.getName()).equalsIgnoreCase("xwd")){ // for XWD
contentType = "image/x-xwindowdump";
}else if(extension(file.getName()).equalsIgnoreCase("rgb")){ // for RGB
contentType = "image/x-rgb";
}else if(extension(file.getName()).equalsIgnoreCase("ico")){ // for ICO
contentType = "image/x-icon";
}else if(extension(file.getName()).equalsIgnoreCase("cmx")){ // for CMX
contentType = "image/x-cmx";
}else if(extension(file.getName()).equalsIgnoreCase("ras")){ // for RAS
contentType = "image/x-cmu-raster";
}else if(extension(file.getName()).equalsIgnoreCase("gif")){ // for GIF
contentType = "image/gif";
}else if(extension(file.getName()).equalsIgnoreCase("js")){ // for GIF
contentType = "text/javascript";
}else if(extension(file.getName()).equalsIgnoreCase("css")){ // for GIF
contentType = "text/css";
}else if(extension(file.getName()).equalsIgnoreCase("rtl") || extension(file.getName()).equalsIgnoreCase("html") || extension(file.getName()).equalsIgnoreCase("htm")){ // for HTML
contentType = "text/html";
}else{
throw new Exception("please check the file type or update mimetypes.default file to include the "+"."+extension(file.getName()));
}
}catch(Exception e){MiscUtils.getLogger().error("Error", e);
throw new Exception("Could not open file "+file.getName()+" wrong file extension, ",e);
}
return new FileStreamInfo(contentType, file);
}
/**
*
* @String <filename e.g example.jpeg>
* This method used to get file extension from a given filename
* @return String <file extension>
*
*/
public String extension(String f) {
int dot = f.lastIndexOf(".");
return f.substring(dot + 1);
}
public static File getImageFile(String imageFileName) throws Exception {
String home_dir = OscarProperties.getInstance().getProperty("eform_image");
File file = null;
try{
File directory = new File(home_dir);
if(!directory.exists()){
throw new Exception("Directory: "+home_dir+ " does not exist");
}
file = new File(directory,imageFileName);
//String canonicalPath = file.getParentFile().getCanonicalPath(); //absolute path of the retrieved file
if (!directory.equals(file.getParentFile())) {
MiscUtils.getLogger().debug("SECURITY WARNING: Illegal file path detected, client attempted to navigate away from the file directory");
throw new Exception("Could not open file " + imageFileName + ". Check the file path");
}
return file;
}catch(Exception e){
MiscUtils.getLogger().error("Error", e);
throw new Exception("Could not open file "+home_dir+imageFileName +" does "+home_dir+ " exist ?",e);
}
}
/**
*
* Process only files under dir
* This method used to list images for eform generator
*
*/
public String[] visitAllFiles(File dir) {
String[] children=null;
if (dir.isDirectory()) {
children = dir.list();
for (int i=0; i<children.length; i++) {
visitAllFiles(new File(dir, children[i]));
}
}
return children;
}
public static String[] getRichTextLetterTemplates(File dir) {
ArrayList<String> results = getFiles(dir, ".*(rtl)$", null);
return results.toArray(new String[0]);
}
public static ArrayList<String> getFiles(File dir, String ext, ArrayList<String> files) {
if (files == null) { files = new ArrayList<String>(); }
if (dir.isDirectory()) {
for (String fileName : dir.list()) {
if (fileName.toLowerCase().matches(ext)) {
files.add(fileName);
}
}
}
return files;
}
}
| gpl-2.0 |
ashchat404/wordpress | wp-content/themes/tsf/inc/custom-header.php | 6290 | <?php
/**
* Sample implementation of the Custom Header feature
* http://codex.wordpress.org/Custom_Headers
*
* You can add an optional custom header image to header.php like so ...
<?php $header_image = get_header_image();
if ( ! empty( $header_image ) ) { ?>
<a href="<?php echo esc_url( home_url( '/' ) ); ?>" title="<?php echo esc_attr( get_bloginfo( 'name', 'display' ) ); ?>" rel="home">
<img src="<?php header_image(); ?>" width="<?php echo get_custom_header()->width; ?>" height="<?php echo get_custom_header()->height; ?>" alt="" />
</a>
<?php } // if ( ! empty( $header_image ) ) ?>
*
* @package wpjobboard_theme
*/
/**
* Setup the WordPress core custom header feature.
*
* Use add_theme_support to register support for WordPress 3.4+
* as well as provide backward compatibility for previous versions.
* Use feature detection of wp_get_theme() which was introduced
* in WordPress 3.4.
*
* @todo Rework this function to remove WordPress 3.4 support when WordPress 3.6 is released.
*
* @uses wpjobboard_theme_header_style()
* @uses wpjobboard_theme_admin_header_style()
* @uses wpjobboard_theme_admin_header_image()
*
* @package wpjobboard_theme
*/
function wpjobboard_theme_custom_header_setup() {
$args = array(
'default-image' => '',
'default-text-color' => '000',
'width' => 1000,
'height' => 250,
'flex-height' => true,
'wp-head-callback' => 'wpjobboard_theme_header_style',
'admin-head-callback' => 'wpjobboard_theme_admin_header_style',
'admin-preview-callback' => 'wpjobboard_theme_admin_header_image',
);
$args = apply_filters('wpjobboard_theme_custom_header_args', $args);
if (function_exists('wp_get_theme')) {
add_theme_support('custom-header', $args);
} else {
// Compat: Versions of WordPress prior to 3.4.
define('HEADER_TEXTCOLOR', $args['default-text-color']);
define('HEADER_IMAGE', $args['default-image']);
define('HEADER_IMAGE_WIDTH', $args['width']);
define('HEADER_IMAGE_HEIGHT', $args['height']);
add_custom_image_header($args['wp-head-callback'], $args['admin-head-callback'], $args['admin-preview-callback']);
}
}
add_action('after_setup_theme', 'wpjobboard_theme_custom_header_setup');
/**
* Shiv for get_custom_header().
*
* get_custom_header() was introduced to WordPress
* in version 3.4. To provide backward compatibility
* with previous versions, we will define our own version
* of this function.
*
* @todo Remove this function when WordPress 3.6 is released.
* @return stdClass All properties represent attributes of the curent header image.
*
* @package wpjobboard_theme
*/
if (!function_exists('get_custom_header')) {
function get_custom_header() {
return (object) array(
'url' => get_header_image(),
'thumbnail_url' => get_header_image(),
'width' => HEADER_IMAGE_WIDTH,
'height' => HEADER_IMAGE_HEIGHT,
);
}
}
if (!function_exists('wpjobboard_theme_header_style')) :
/**
* Styles the header image and text displayed on the blog
*
* @see wpjobboard_theme_custom_header_setup().
*/
function wpjobboard_theme_header_style() {
// If no custom options for text are set, let's bail
// get_header_textcolor() options: HEADER_TEXTCOLOR is default, hide text (returns 'blank') or any hex value
if (HEADER_TEXTCOLOR == get_header_textcolor())
return;
// If we get this far, we have custom styles. Let's do this.
?>
<style type="text/css">
<?php
// Has the text been hidden?
if ('blank' == get_header_textcolor()) :
?>
.site-title,
.site-description {
position: absolute !important;
clip: rect(1px 1px 1px 1px); /* IE6, IE7 */
clip: rect(1px, 1px, 1px, 1px);
}
<?php
// If the user has set a custom color for the text use that
else :
?>
.site-title a,
.site-description {
color: #<?php echo get_header_textcolor(); ?>;
}
<?php endif; ?>
</style>
<?php
}
endif; // wpjobboard_theme_header_style
if (!function_exists('wpjobboard_theme_admin_header_style')) :
/**
* Styles the header image displayed on the Appearance > Header admin panel.
*
* @see wpjobboard_theme_custom_header_setup().
*/
function wpjobboard_theme_admin_header_style() {
?>
<style type="text/css">
.appearance_page_custom-header #headimg {
border: none;
}
#headimg h1,
#desc {
}
#headimg h1 {
}
#headimg h1 a {
}
#desc {
}
#headimg img {
}
</style>
<?php
}
endif; // wpjobboard_theme_admin_header_style
if (!function_exists('wpjobboard_theme_admin_header_image')) :
/**
* Custom header image markup displayed on the Appearance > Header admin panel.
*
* @see wpjobboard_theme_custom_header_setup().
*/
function wpjobboard_theme_admin_header_image() {
?>
<div id="headimg">
<?php
if ('blank' == get_header_textcolor() || '' == get_header_textcolor())
$style = ' style="display:none;"';
else
$style = ' style="color:#' . get_header_textcolor() . ';"';
?>
<h1 class="displaying-header-text"><a id="name"<?php echo $style; ?> onclick="return false;" href="<?php echo esc_url(home_url('/')); ?>"><?php bloginfo('name'); ?></a></h1>
<div class="displaying-header-text" id="desc"<?php echo $style; ?>><?php bloginfo('description'); ?></div>
<?php
$header_image = get_header_image();
if (!empty($header_image)) :
?>
<img src="<?php echo esc_url($header_image); ?>" alt="" />
<?php endif; ?>
</div>
<?php
}
endif; // wpjobboard_theme_admin_header_image
| gpl-2.0 |
siduction-upstream/screengrab | src/modules/uploader/uploader.h | 3354 | /***************************************************************************
* Copyright (C) 2009 - 2013 by Artem 'DOOMer' Galichkin *
* [email protected] *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
***************************************************************************/
#ifndef UPLOADER_H
#define UPLOADER_H
#include <QObject>
#include <QByteArray>
#include <QMap>
#include <QPair>
#include <QNetworkAccessManager>
#include <QNetworkReply>
#include <QHttpMultiPart>
#include <QUrl>
typedef QPair<QByteArray, QString> ResultString_t;
const QByteArray UL_DIRECT_LINK = "direct_link";
const QByteArray UL_HTML_CODE = "html_code";
const QByteArray UL_BB_CODE = "bb_code";
const QByteArray UL_HTML_CODE_THUMB = "html_code_thumb";
const QByteArray UL_BB_CODE_THUMB = "bb_code_thumb";
const QByteArray UL_DELETE_URL = "delete_url";
class Uploader : public QObject
{
Q_OBJECT
public:
explicit Uploader(QObject *parent = 0);
virtual ~Uploader();
// overriding methods
void getUserSettings(const QVariantMap& settings);
virtual void startUploading();
QMap<QByteArray, ResultString_t> parsedLinks();
QList<ResultString_t> parsedLinksToGui();
Q_SIGNALS:
void uploadStart();
void uploadFail(const QByteArray &error);
void uploadDoneStr(const QString &directLink);
void uploadDone();
void uploadProgress(qint64 bytesSent, qint64 bytesTotal);
public Q_SLOTS:
protected Q_SLOTS:
virtual void replyFinished(QNetworkReply* reply) {Q_UNUSED(reply)};
void replyProgress(qint64 bytesSent, qint64 bytesTotal);
protected:
// methods
QByteArray boundary(bool cleared = false);
QMap<QByteArray, QByteArray> parseResultStrings(const QVector<QByteArray>& keytags, const QByteArray& result);
virtual QUrl apiUrl();
virtual void createData(bool inBase64 = false);
virtual void createRequest(const QByteArray& requestData, const QUrl url);
// vars
QByteArray imageData;
QHttpMultiPart *_multipartData;
QString _uploadFilename;
QString _formatString;
QByteArray _strBoundary;
QMap<QByteArray, ResultString_t> _uploadedStrings;
QVariantMap _userSettings;
QNetworkAccessManager *_net;
QNetworkRequest _request;
QNetworkReply *_serverReply;
private:
void initUploadedStrList();
};
#endif // UPLOADER_H
| gpl-2.0 |
pclouds/busybox-w32 | loginutils/login.c | 17513 | /* vi: set sw=4 ts=4: */
/*
* Licensed under GPLv2 or later, see file LICENSE in this source tree.
*/
//config:config LOGIN
//config: bool "login"
//config: default y
//config: select FEATURE_SYSLOG
//config: help
//config: login is used when signing onto a system.
//config:
//config: Note that Busybox binary must be setuid root for this applet to
//config: work properly.
//config:
//config:config LOGIN_SESSION_AS_CHILD
//config: bool "Run logged in session in a child process"
//config: default y if PAM
//config: depends on LOGIN
//config: help
//config: Run the logged in session in a child process. This allows
//config: login to clean up things such as utmp entries or PAM sessions
//config: when the login session is complete. If you use PAM, you
//config: almost always would want this to be set to Y, else PAM session
//config: will not be cleaned up.
//config:
//config:config LOGIN_SCRIPTS
//config: bool "Support for login scripts"
//config: depends on LOGIN
//config: default y
//config: help
//config: Enable this if you want login to execute $LOGIN_PRE_SUID_SCRIPT
//config: just prior to switching from root to logged-in user.
//config:
//config:config FEATURE_NOLOGIN
//config: bool "Support for /etc/nologin"
//config: default y
//config: depends on LOGIN
//config: help
//config: The file /etc/nologin is used by (some versions of) login(1).
//config: If it exists, non-root logins are prohibited.
//config:
//config:config FEATURE_SECURETTY
//config: bool "Support for /etc/securetty"
//config: default y
//config: depends on LOGIN
//config: help
//config: The file /etc/securetty is used by (some versions of) login(1).
//config: The file contains the device names of tty lines (one per line,
//config: without leading /dev/) on which root is allowed to login.
//applet:/* Needs to be run by root or be suid root - needs to change uid and gid: */
//applet:IF_LOGIN(APPLET(login, BB_DIR_BIN, BB_SUID_REQUIRE))
//kbuild:lib-$(CONFIG_LOGIN) += login.o
//usage:#define login_trivial_usage
//usage: "[-p] [-h HOST] [[-f] USER]"
//usage:#define login_full_usage "\n\n"
//usage: "Begin a new session on the system\n"
//usage: "\n -f Don't authenticate (user already authenticated)"
//usage: "\n -h HOST Host user came from (for network logins)"
//usage: "\n -p Preserve environment"
#include "libbb.h"
#include <syslog.h>
#include <sys/resource.h>
#if ENABLE_SELINUX
# include <selinux/selinux.h> /* for is_selinux_enabled() */
# include <selinux/get_context_list.h> /* for get_default_context() */
# include <selinux/flask.h> /* for security class definitions */
#endif
#if ENABLE_PAM
/* PAM may include <locale.h>. We may need to undefine bbox's stub define: */
# undef setlocale
/* For some obscure reason, PAM is not in pam/xxx, but in security/xxx.
* Apparently they like to confuse people. */
# include <security/pam_appl.h>
# include <security/pam_misc.h>
# if 0
/* This supposedly can be used to avoid double password prompt,
* if used instead of standard misc_conv():
*
* "When we want to authenticate first with local method and then with tacacs for example,
* the password is asked for local method and if not good is asked a second time for tacacs.
* So if we want to authenticate a user with tacacs, and the user exists localy, the password is
* asked two times before authentication is accepted."
*
* However, code looks shaky. For example, why misc_conv() return value is ignored?
* Are msg[i] and resp[i] indexes handled correctly?
*/
static char *passwd = NULL;
static int my_conv(int num_msg, const struct pam_message **msg,
struct pam_response **resp, void *data)
{
int i;
for (i = 0; i < num_msg; i++) {
switch (msg[i]->msg_style) {
case PAM_PROMPT_ECHO_OFF:
if (passwd == NULL) {
misc_conv(num_msg, msg, resp, data);
passwd = xstrdup(resp[i]->resp);
return PAM_SUCCESS;
}
resp[0] = xzalloc(sizeof(struct pam_response));
resp[0]->resp = passwd;
passwd = NULL;
resp[0]->resp_retcode = PAM_SUCCESS;
resp[1] = NULL;
return PAM_SUCCESS;
default:
break;
}
}
return PAM_SUCCESS;
}
# endif
static const struct pam_conv conv = {
misc_conv,
NULL
};
#endif
enum {
TIMEOUT = 60,
EMPTY_USERNAME_COUNT = 10,
/* Some users found 32 chars limit to be too low: */
USERNAME_SIZE = 64,
TTYNAME_SIZE = 32,
};
struct globals {
struct termios tty_attrs;
} FIX_ALIASING;
#define G (*(struct globals*)&bb_common_bufsiz1)
#define INIT_G() do { } while (0)
#if ENABLE_FEATURE_NOLOGIN
static void die_if_nologin(void)
{
FILE *fp;
int c;
int empty = 1;
fp = fopen_for_read("/etc/nologin");
if (!fp) /* assuming it does not exist */
return;
while ((c = getc(fp)) != EOF) {
if (c == '\n')
bb_putchar('\r');
bb_putchar(c);
empty = 0;
}
if (empty)
puts("\r\nSystem closed for routine maintenance\r");
fclose(fp);
fflush_all();
/* Users say that they do need this prior to exit: */
tcdrain(STDOUT_FILENO);
exit(EXIT_FAILURE);
}
#else
# define die_if_nologin() ((void)0)
#endif
#if ENABLE_FEATURE_SECURETTY && !ENABLE_PAM
static int check_securetty(const char *short_tty)
{
char *buf = (char*)"/etc/securetty"; /* any non-NULL is ok */
parser_t *parser = config_open2("/etc/securetty", fopen_for_read);
while (config_read(parser, &buf, 1, 1, "# \t", PARSE_NORMAL)) {
if (strcmp(buf, short_tty) == 0)
break;
buf = NULL;
}
config_close(parser);
/* buf != NULL here if config file was not found, empty
* or line was found which equals short_tty */
return buf != NULL;
}
#else
static ALWAYS_INLINE int check_securetty(const char *short_tty UNUSED_PARAM) { return 1; }
#endif
#if ENABLE_SELINUX
static void initselinux(char *username, char *full_tty,
security_context_t *user_sid)
{
security_context_t old_tty_sid, new_tty_sid;
if (!is_selinux_enabled())
return;
if (get_default_context(username, NULL, user_sid)) {
bb_error_msg_and_die("can't get SID for %s", username);
}
if (getfilecon(full_tty, &old_tty_sid) < 0) {
bb_perror_msg_and_die("getfilecon(%s) failed", full_tty);
}
if (security_compute_relabel(*user_sid, old_tty_sid,
SECCLASS_CHR_FILE, &new_tty_sid) != 0) {
bb_perror_msg_and_die("security_change_sid(%s) failed", full_tty);
}
if (setfilecon(full_tty, new_tty_sid) != 0) {
bb_perror_msg_and_die("chsid(%s, %s) failed", full_tty, new_tty_sid);
}
}
#endif
#if ENABLE_LOGIN_SCRIPTS
static void run_login_script(struct passwd *pw, char *full_tty)
{
char *t_argv[2];
t_argv[0] = getenv("LOGIN_PRE_SUID_SCRIPT");
if (t_argv[0]) {
t_argv[1] = NULL;
xsetenv("LOGIN_TTY", full_tty);
xsetenv("LOGIN_USER", pw->pw_name);
xsetenv("LOGIN_UID", utoa(pw->pw_uid));
xsetenv("LOGIN_GID", utoa(pw->pw_gid));
xsetenv("LOGIN_SHELL", pw->pw_shell);
spawn_and_wait(t_argv); /* NOMMU-friendly */
unsetenv("LOGIN_TTY");
unsetenv("LOGIN_USER");
unsetenv("LOGIN_UID");
unsetenv("LOGIN_GID");
unsetenv("LOGIN_SHELL");
}
}
#else
void run_login_script(struct passwd *pw, char *full_tty);
#endif
#if ENABLE_LOGIN_SESSION_AS_CHILD && ENABLE_PAM
static void login_pam_end(pam_handle_t *pamh)
{
int pamret;
pamret = pam_setcred(pamh, PAM_DELETE_CRED);
if (pamret != PAM_SUCCESS) {
bb_error_msg("pam_%s failed: %s (%d)", "setcred",
pam_strerror(pamh, pamret), pamret);
}
pamret = pam_close_session(pamh, 0);
if (pamret != PAM_SUCCESS) {
bb_error_msg("pam_%s failed: %s (%d)", "close_session",
pam_strerror(pamh, pamret), pamret);
}
pamret = pam_end(pamh, pamret);
if (pamret != PAM_SUCCESS) {
bb_error_msg("pam_%s failed: %s (%d)", "end",
pam_strerror(pamh, pamret), pamret);
}
}
#endif /* ENABLE_PAM */
static void get_username_or_die(char *buf, int size_buf)
{
int c, cntdown;
cntdown = EMPTY_USERNAME_COUNT;
prompt:
print_login_prompt();
/* skip whitespace */
do {
c = getchar();
if (c == EOF)
exit(EXIT_FAILURE);
if (c == '\n') {
if (!--cntdown)
exit(EXIT_FAILURE);
goto prompt;
}
} while (isspace(c)); /* maybe isblank? */
*buf++ = c;
if (!fgets(buf, size_buf-2, stdin))
exit(EXIT_FAILURE);
if (!strchr(buf, '\n'))
exit(EXIT_FAILURE);
while ((unsigned char)*buf > ' ')
buf++;
*buf = '\0';
}
static void motd(void)
{
int fd;
fd = open(bb_path_motd_file, O_RDONLY);
if (fd >= 0) {
fflush_all();
bb_copyfd_eof(fd, STDOUT_FILENO);
close(fd);
}
}
static void alarm_handler(int sig UNUSED_PARAM)
{
/* This is the escape hatch! Poor serial line users and the like
* arrive here when their connection is broken.
* We don't want to block here */
ndelay_on(STDOUT_FILENO);
/* Test for correct attr restoring:
* run "getty 0 -" from a shell, enter bogus username, stop at
* password prompt, let it time out. Without the tcsetattr below,
* when you are back at shell prompt, echo will be still off.
*/
tcsetattr_stdin_TCSANOW(&G.tty_attrs);
printf("\r\nLogin timed out after %u seconds\r\n", TIMEOUT);
fflush_all();
/* unix API is brain damaged regarding O_NONBLOCK,
* we should undo it, or else we can affect other processes */
ndelay_off(STDOUT_FILENO);
_exit(EXIT_SUCCESS);
}
int login_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
int login_main(int argc UNUSED_PARAM, char **argv)
{
enum {
LOGIN_OPT_f = (1<<0),
LOGIN_OPT_h = (1<<1),
LOGIN_OPT_p = (1<<2),
};
char *fromhost;
char username[USERNAME_SIZE];
int run_by_root;
unsigned opt;
int count = 0;
struct passwd *pw;
char *opt_host = NULL;
char *opt_user = opt_user; /* for compiler */
char *full_tty;
char *short_tty;
IF_SELINUX(security_context_t user_sid = NULL;)
#if ENABLE_PAM
int pamret;
pam_handle_t *pamh;
const char *pamuser;
const char *failed_msg;
struct passwd pwdstruct;
char pwdbuf[256];
char **pamenv;
#endif
#if ENABLE_LOGIN_SESSION_AS_CHILD
pid_t child_pid;
#endif
INIT_G();
/* More of suid paranoia if called by non-root: */
/* Clear dangerous stuff, set PATH */
run_by_root = !sanitize_env_if_suid();
/* Mandatory paranoia for suid applet:
* ensure that fd# 0,1,2 are opened (at least to /dev/null)
* and any extra open fd's are closed.
* (The name of the function is misleading. Not daemonizing here.) */
bb_daemonize_or_rexec(DAEMON_ONLY_SANITIZE | DAEMON_CLOSE_EXTRA_FDS, NULL);
username[0] = '\0';
opt = getopt32(argv, "f:h:p", &opt_user, &opt_host);
if (opt & LOGIN_OPT_f) {
if (!run_by_root)
bb_error_msg_and_die("-f is for root only");
safe_strncpy(username, opt_user, sizeof(username));
}
argv += optind;
if (argv[0]) /* user from command line (getty) */
safe_strncpy(username, argv[0], sizeof(username));
/* Save tty attributes - and by doing it, check that it's indeed a tty */
if (tcgetattr(STDIN_FILENO, &G.tty_attrs) < 0
|| !isatty(STDOUT_FILENO)
/*|| !isatty(STDERR_FILENO) - no, guess some people might want to redirect this */
) {
return EXIT_FAILURE; /* Must be a terminal */
}
/* We install timeout handler only _after_ we saved G.tty_attrs */
signal(SIGALRM, alarm_handler);
alarm(TIMEOUT);
/* Find out and memorize our tty name */
full_tty = xmalloc_ttyname(STDIN_FILENO);
if (!full_tty)
full_tty = xstrdup("UNKNOWN");
short_tty = skip_dev_pfx(full_tty);
if (opt_host) {
fromhost = xasprintf(" on '%s' from '%s'", short_tty, opt_host);
} else {
fromhost = xasprintf(" on '%s'", short_tty);
}
/* Was breaking "login <username>" from shell command line: */
/*bb_setpgrp();*/
openlog(applet_name, LOG_PID | LOG_CONS, LOG_AUTH);
while (1) {
/* flush away any type-ahead (as getty does) */
tcflush(0, TCIFLUSH);
if (!username[0])
get_username_or_die(username, sizeof(username));
#if ENABLE_PAM
pamret = pam_start("login", username, &conv, &pamh);
if (pamret != PAM_SUCCESS) {
failed_msg = "start";
goto pam_auth_failed;
}
/* set TTY (so things like securetty work) */
pamret = pam_set_item(pamh, PAM_TTY, short_tty);
if (pamret != PAM_SUCCESS) {
failed_msg = "set_item(TTY)";
goto pam_auth_failed;
}
/* set RHOST */
if (opt_host) {
pamret = pam_set_item(pamh, PAM_RHOST, opt_host);
if (pamret != PAM_SUCCESS) {
failed_msg = "set_item(RHOST)";
goto pam_auth_failed;
}
}
if (!(opt & LOGIN_OPT_f)) {
pamret = pam_authenticate(pamh, 0);
if (pamret != PAM_SUCCESS) {
failed_msg = "authenticate";
goto pam_auth_failed;
/* TODO: or just "goto auth_failed"
* since user seems to enter wrong password
* (in this case pamret == 7)
*/
}
}
/* check that the account is healthy */
pamret = pam_acct_mgmt(pamh, 0);
if (pamret != PAM_SUCCESS) {
failed_msg = "acct_mgmt";
goto pam_auth_failed;
}
/* read user back */
pamuser = NULL;
/* gcc: "dereferencing type-punned pointer breaks aliasing rules..."
* thus we cast to (void*) */
if (pam_get_item(pamh, PAM_USER, (void*)&pamuser) != PAM_SUCCESS) {
failed_msg = "get_item(USER)";
goto pam_auth_failed;
}
if (!pamuser || !pamuser[0])
goto auth_failed;
safe_strncpy(username, pamuser, sizeof(username));
/* Don't use "pw = getpwnam(username);",
* PAM is said to be capable of destroying static storage
* used by getpwnam(). We are using safe(r) function */
pw = NULL;
getpwnam_r(username, &pwdstruct, pwdbuf, sizeof(pwdbuf), &pw);
if (!pw)
goto auth_failed;
pamret = pam_open_session(pamh, 0);
if (pamret != PAM_SUCCESS) {
failed_msg = "open_session";
goto pam_auth_failed;
}
pamret = pam_setcred(pamh, PAM_ESTABLISH_CRED);
if (pamret != PAM_SUCCESS) {
failed_msg = "setcred";
goto pam_auth_failed;
}
break; /* success, continue login process */
pam_auth_failed:
/* syslog, because we don't want potential attacker
* to know _why_ login failed */
syslog(LOG_WARNING, "pam_%s call failed: %s (%d)", failed_msg,
pam_strerror(pamh, pamret), pamret);
safe_strncpy(username, "UNKNOWN", sizeof(username));
#else /* not PAM */
pw = getpwnam(username);
if (!pw) {
strcpy(username, "UNKNOWN");
goto fake_it;
}
if (pw->pw_passwd[0] == '!' || pw->pw_passwd[0] == '*')
goto auth_failed;
if (opt & LOGIN_OPT_f)
break; /* -f USER: success without asking passwd */
if (pw->pw_uid == 0 && !check_securetty(short_tty))
goto auth_failed;
/* Don't check the password if password entry is empty (!) */
if (!pw->pw_passwd[0])
break;
fake_it:
/* Password reading and authorization takes place here.
* Note that reads (in no-echo mode) trash tty attributes.
* If we get interrupted by SIGALRM, we need to restore attrs.
*/
if (ask_and_check_password(pw) > 0)
break;
#endif /* ENABLE_PAM */
auth_failed:
opt &= ~LOGIN_OPT_f;
bb_do_delay(LOGIN_FAIL_DELAY);
/* TODO: doesn't sound like correct English phrase to me */
puts("Login incorrect");
if (++count == 3) {
syslog(LOG_WARNING, "invalid password for '%s'%s",
username, fromhost);
if (ENABLE_FEATURE_CLEAN_UP)
free(fromhost);
return EXIT_FAILURE;
}
username[0] = '\0';
} /* while (1) */
alarm(0);
/* We can ignore /etc/nologin if we are logging in as root,
* it doesn't matter whether we are run by root or not */
if (pw->pw_uid != 0)
die_if_nologin();
#if ENABLE_LOGIN_SESSION_AS_CHILD
child_pid = vfork();
if (child_pid != 0) {
if (child_pid < 0)
bb_perror_msg("vfork");
else {
if (safe_waitpid(child_pid, NULL, 0) == -1)
bb_perror_msg("waitpid");
update_utmp_DEAD_PROCESS(child_pid);
}
IF_PAM(login_pam_end(pamh);)
return 0;
}
#endif
IF_SELINUX(initselinux(username, full_tty, &user_sid);)
/* Try these, but don't complain if they fail.
* _f_chown is safe wrt race t=ttyname(0);...;chown(t); */
fchown(0, pw->pw_uid, pw->pw_gid);
fchmod(0, 0600);
update_utmp(getpid(), USER_PROCESS, short_tty, username, run_by_root ? opt_host : NULL);
/* We trust environment only if we run by root */
if (ENABLE_LOGIN_SCRIPTS && run_by_root)
run_login_script(pw, full_tty);
change_identity(pw);
setup_environment(pw->pw_shell,
(!(opt & LOGIN_OPT_p) * SETUP_ENV_CLEARENV) + SETUP_ENV_CHANGEENV,
pw);
#if ENABLE_PAM
/* Modules such as pam_env will setup the PAM environment,
* which should be copied into the new environment. */
pamenv = pam_getenvlist(pamh);
if (pamenv) while (*pamenv) {
putenv(*pamenv);
pamenv++;
}
#endif
if (access(".hushlogin", F_OK) != 0)
motd();
if (pw->pw_uid == 0)
syslog(LOG_INFO, "root login%s", fromhost);
if (ENABLE_FEATURE_CLEAN_UP)
free(fromhost);
/* well, a simple setexeccon() here would do the job as well,
* but let's play the game for now */
IF_SELINUX(set_current_security_context(user_sid);)
// util-linux login also does:
// /* start new session */
// setsid();
// /* TIOCSCTTY: steal tty from other process group */
// if (ioctl(0, TIOCSCTTY, 1)) error_msg...
// BBox login used to do this (see above):
// bb_setpgrp();
// If this stuff is really needed, add it and explain why!
/* Set signals to defaults */
/* Non-ignored signals revert to SIG_DFL on exec anyway */
/*signal(SIGALRM, SIG_DFL);*/
/* Is this correct? This way user can ctrl-c out of /etc/profile,
* potentially creating security breach (tested with bash 3.0).
* But without this, bash 3.0 will not enable ctrl-c either.
* Maybe bash is buggy?
* Need to find out what standards say about /bin/login -
* should we leave SIGINT etc enabled or disabled? */
signal(SIGINT, SIG_DFL);
/* Exec login shell with no additional parameters */
run_shell(pw->pw_shell, 1, NULL, NULL);
/* return EXIT_FAILURE; - not reached */
}
| gpl-2.0 |
saisai/phpmyadmin | libraries/navigation/Nodes/Node_View.class.php | 1597 | <?php
/* vim: set expandtab sw=4 ts=4 sts=4: */
/**
* Functionality for the navigation tree
*
* @package PhpMyAdmin-Navigation
*/
if (! defined('PHPMYADMIN')) {
exit;
}
require_once 'libraries/navigation/Nodes/Node_DatabaseChild.class.php';
/**
* Represents a view node in the navigation tree
*
* @package PhpMyAdmin-Navigation
*/
class Node_View extends Node_DatabaseChild
{
/**
* Initialises the class
*
* @param string $name An identifier for the new node
* @param int $type Type of node, may be one of CONTAINER or OBJECT
* @param bool $is_group Whether this object has been created
* while grouping nodes
*
* @return Node_View
*/
public function __construct($name, $type = Node::OBJECT, $is_group = false)
{
parent::__construct($name, $type, $is_group);
$this->icon = PMA_Util::getImage('b_props.png', __('View'));
$this->links = array(
'text' => 'sql.php?server=' . $GLOBALS['server']
. '&db=%2$s&table=%1$s&pos=0'
. '&token=' . $_SESSION[' PMA_token '],
'icon' => 'tbl_structure.php?server=' . $GLOBALS['server']
. '&db=%2$s&table=%1$s'
. '&token=' . $_SESSION[' PMA_token ']
);
$this->classes = 'view';
}
/**
* Returns the type of the item represented by the node.
*
* @return string type of the item
*/
protected function getItemType()
{
return 'view';
}
}
| gpl-2.0 |
LMOspaceace/ajaxchat | styles/comboot/template/chat_body_chatrow.html | 1933 | {% for chatrow in loops.chatrow %}
<div id="p{{ chatrow.MESSAGE_ID }}" class="chatpost {% if not S_AJAX_CHAT_CHATROW_FULL %}original {% endif %}bg{{ chatrow.CLASS }}{% if not S_AJAX_CHAT_MESSAGES_DOWN %} chat-messages-down{% endif %}">
<div class="chat-postbody">
<div class="{% if S_AJAX_CHAT_AVATARS %}chat-postprofile{% else %}chat-postprofile-no-avatar{% endif %}" id="profile{{ chatrow.MESSAGE_ID }}">
<div {% if S_AJAX_CHAT_AVATARS %}class="chat-avatars"{% endif %}>
{% if S_AJAX_CHAT_AVATARS %}
{% if chatrow.USER_AVATAR %}
{% if S_AJAX_CHAT_AVATAR_HOVER %}
<span>{{ chatrow.USER_AVATAR }}</span>
{% endif %}
{{ chatrow.USER_AVATAR_THUMB }}
{% else %}
{% if S_AJAX_CHAT_AVATAR_HOVER %}
<span>
<img src="{{ STYLE_PATH }}/theme/images/no_avatar.gif" height="128" alt="" />
</span>
{% endif %}
<img src="{{ STYLE_PATH }}/theme/images/no_avatar.gif" height="20" alt="" />
{% endif %}
{% endif %}
</div>
<div class="chat-usernames">
{% if not S_IS_BOT and S_USER_LOGGED_IN and CHAT_PAGE neq 'archive' %}
{% if chatrow.USER_COLOR %}
<a href="#" style="color: #{{ chatrow.USER_COLOR }}" onclick="insert_text('[color=#{{ chatrow.USER_COLOR }}][b]@{{ chatrow.USERNAME_A }} : [/color][/b]', true);return false;" title="{{ lang('RESPOND') }}">@</a>
{% else %}
<a href="#" onclick="insert_text('[b]@{{ chatrow.USERNAME_A }} : [/b]', true);return false;" title="{{ lang('RESPOND') }}">@</a>
{% endif %}
{% else %}
@
{% endif %}
{{ chatrow.USERNAME_FULL }} <b class="chat-time">« {{ chatrow.TIME }} »</b>
</div>
</div>
{{ SMILIES_PATH }}
<div class="message-comboot">
{% INCLUDE 'chatrow_message_icons.html' %}
{{ chatrow.MESSAGE }}
</div>
</div>
</div>
{% endfor %} | gpl-2.0 |
DigitalMediaServer/DigitalMediaServer | src/main/java/net/pms/formats/audio/MPC.java | 1121 | /*
* Digital Media Server, for streaming digital media to UPnP AV or DLNA
* compatible devices based on PS3 Media Server and Universal Media Server.
* Copyright (C) 2016 Digital Media Server developers.
*
* This program is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation, either version 3 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see http://www.gnu.org/licenses/.
*/
package net.pms.formats.audio;
public class MPC extends AudioBase {
@Override
public Identifier getIdentifier() {
return Identifier.MPC;
}
@Override
public String[] getSupportedExtensions() {
return new String[] {
"mpc",
"mp+",
"mpp",
};
}
}
| gpl-2.0 |
adqio/revive-adserver | etc/changes/migration_tables_core_542.php | 1282 | <?php
/*
+---------------------------------------------------------------------------+
| Revive Adserver |
| http://www.revive-adserver.com |
| |
| Copyright: See the COPYRIGHT.txt file. |
| License: GPLv2 or later, see the LICENSE.txt file. |
+---------------------------------------------------------------------------+
*/
require_once(MAX_PATH.'/lib/OA/Upgrade/Migration.php');
class Migration_542 extends Migration
{
function Migration_542()
{
//$this->__construct();
$this->aTaskList_constructive[] = 'beforeAddField__campaigns__as_reject_reason';
$this->aTaskList_constructive[] = 'afterAddField__campaigns__as_reject_reason';
$this->aObjectMap['campaigns']['as_reject_reason'] = array('fromTable'=>'campaigns', 'fromField'=>'as_reject_reason');
}
function beforeAddField__campaigns__as_reject_reason()
{
return $this->beforeAddField('campaigns', 'as_reject_reason');
}
function afterAddField__campaigns__as_reject_reason()
{
return $this->afterAddField('campaigns', 'as_reject_reason');
}
}
?> | gpl-2.0 |
printerpam/Elgg | engine/classes/Elgg/I18n/Translator.php | 15087 | <?php
namespace Elgg\I18n;
/**
* WARNING: API IN FLUX. DO NOT USE DIRECTLY.
*
* @access private
*
* @since 1.10.0
*/
class Translator {
/**
* Global Elgg configuration
*
* @var \stdClass
*/
private $CONFIG;
/**
* Initializes new translator
*/
public function __construct() {
global $CONFIG;
$this->CONFIG = $CONFIG;
$this->defaultPath = dirname(dirname(dirname(dirname(__DIR__)))) . "/languages/";
}
/**
* Given a message key, returns an appropriately translated full-text string
*
* @param string $message_key The short message code
* @param array $args An array of arguments to pass through vsprintf().
* @param string $language Optionally, the standard language code
* (defaults to site/user default, then English)
*
* @return string Either the translated string, the English string,
* or the original language string.
*/
function translate($message_key, $args = array(), $language = "") {
static $CURRENT_LANGUAGE;
// old param order is deprecated
if (!is_array($args)) {
elgg_deprecated_notice(
'As of Elgg 1.8, the 2nd arg to elgg_echo() is an array of string replacements and the 3rd arg is the language.',
1.8
);
$language = $args;
$args = array();
}
if (!isset($GLOBALS['_ELGG']->translations)) {
// this means we probably had an exception before translations were initialized
$this->registerTranslations($this->defaultPath);
}
if (!$CURRENT_LANGUAGE) {
$CURRENT_LANGUAGE = $this->getLanguage();
}
if (!$language) {
$language = $CURRENT_LANGUAGE;
}
if (!isset($GLOBALS['_ELGG']->translations[$language])) {
// The language being requested is not the same as the language of the
// logged in user, so we will have to load it separately. (Most likely
// we're sending a notification and the recipient is using a different
// language than the logged in user.)
_elgg_load_translations_for_language($language);
}
if (isset($GLOBALS['_ELGG']->translations[$language][$message_key])) {
$string = $GLOBALS['_ELGG']->translations[$language][$message_key];
} else if (isset($GLOBALS['_ELGG']->translations["en"][$message_key])) {
$string = $GLOBALS['_ELGG']->translations["en"][$message_key];
_elgg_services()->logger->notice(sprintf('Missing %s translation for "%s" language key', $language, $message_key));
} else {
$string = $message_key;
_elgg_services()->logger->notice(sprintf('Missing English translation for "%s" language key', $message_key));
}
// only pass through if we have arguments to allow backward compatibility
// with manual sprintf() calls.
if ($args) {
$string = vsprintf($string, $args);
}
return $string;
}
/**
* Add a translation.
*
* Translations are arrays in the Zend Translation array format, eg:
*
* $english = array('message1' => 'message1', 'message2' => 'message2');
* $german = array('message1' => 'Nachricht1','message2' => 'Nachricht2');
*
* @param string $country_code Standard country code (eg 'en', 'nl', 'es')
* @param array $language_array Formatted array of strings
*
* @return bool Depending on success
*/
function addTranslation($country_code, $language_array) {
if (!isset($GLOBALS['_ELGG']->translations)) {
$GLOBALS['_ELGG']->translations = array();
}
$country_code = strtolower($country_code);
$country_code = trim($country_code);
if (is_array($language_array) && $country_code != "") {
if (sizeof($language_array) > 0) {
if (!isset($GLOBALS['_ELGG']->translations[$country_code])) {
$GLOBALS['_ELGG']->translations[$country_code] = $language_array;
} else {
$GLOBALS['_ELGG']->translations[$country_code] = $language_array + $GLOBALS['_ELGG']->translations[$country_code];
}
}
return true;
}
return false;
}
/**
* Detect the current language being used by the current site or logged in user.
*
* @return string The language code for the site/user or "en" if not set
*/
function getCurrentLanguage() {
$language = $this->getLanguage();
if (!$language) {
$language = 'en';
}
return $language;
}
/**
* Gets the current language in use by the system or user.
*
* @return string The language code (eg "en") or false if not set
*/
function getLanguage() {
$url_lang = _elgg_services()->input->get('hl');
if ($url_lang) {
return $url_lang;
}
$user = _elgg_services()->session->getLoggedInUser();
$language = false;
if (($user) && ($user->language)) {
$language = $user->language;
}
if ((!$language) && (isset($this->CONFIG->language)) && ($this->CONFIG->language)) {
$language = $this->CONFIG->language;
}
if ($language) {
return $language;
}
return false;
}
/**
* @access private
*/
function loadTranslations() {
if ($this->CONFIG->system_cache_enabled) {
$loaded = true;
$languages = array_unique(array('en', $this->getCurrentLanguage()));
foreach ($languages as $language) {
$data = elgg_load_system_cache("$language.lang");
if ($data) {
$this->addTranslation($language, unserialize($data));
} else {
$loaded = false;
}
}
if ($loaded) {
$GLOBALS['_ELGG']->i18n_loaded_from_cache = true;
// this is here to force
$GLOBALS['_ELGG']->language_paths[$this->defaultPath] = true;
return;
}
}
// load core translations from languages directory
$this->registerTranslations($this->defaultPath);
}
/**
* Registers translations in a directory assuming the standard plugin layout.
*
* @param string $path Without the trailing slash.
*
* @return bool Success
*/
function registerPluginTranslations($path) {
$languages_path = rtrim($path, "\\/") . "/languages";
// don't need to have translations
if (!is_dir($languages_path)) {
return true;
}
return $this->registerTranslations($languages_path);
}
/**
* When given a full path, finds translation files and loads them
*
* @param string $path Full path
* @param bool $load_all If true all languages are loaded, if
* false only the current language + en are loaded
*
* @return bool success
*/
function registerTranslations($path, $load_all = false) {
$path = sanitise_filepath($path);
// Make a note of this path just incase we need to register this language later
if (!isset($GLOBALS['_ELGG']->language_paths)) {
$GLOBALS['_ELGG']->language_paths = array();
}
$GLOBALS['_ELGG']->language_paths[$path] = true;
// Get the current language based on site defaults and user preference
$current_language = $this->getCurrentLanguage();
_elgg_services()->logger->info("Translations loaded from: $path");
// only load these files unless $load_all is true.
$load_language_files = array(
'en.php',
"$current_language.php"
);
$load_language_files = array_unique($load_language_files);
$handle = opendir($path);
if (!$handle) {
_elgg_services()->logger->error("Could not open language path: $path");
return false;
}
$return = true;
while (false !== ($language = readdir($handle))) {
// ignore bad files
if (substr($language, 0, 1) == '.' || substr($language, -4) !== '.php') {
continue;
}
if (in_array($language, $load_language_files) || $load_all) {
$result = include_once($path . $language);
if ($result === false) {
$return = false;
continue;
} elseif (is_array($result)) {
$this->addTranslation(basename($language, '.php'), $result);
}
}
}
return $return;
}
/**
* Reload all translations from all registered paths.
*
* This is only called by functions which need to know all possible translations.
*
* @todo Better on demand loading based on language_paths array
*
* @return void
*/
function reloadAllTranslations() {
static $LANG_RELOAD_ALL_RUN;
if ($LANG_RELOAD_ALL_RUN) {
return;
}
if ($GLOBALS['_ELGG']->i18n_loaded_from_cache) {
$cache = elgg_get_system_cache();
$cache_dir = $cache->getVariable("cache_path");
$filenames = elgg_get_file_list($cache_dir, array(), array(), array(".lang"));
foreach ($filenames as $filename) {
// Look for files matching for example 'en.lang', 'cmn.lang' or 'pt_br.lang'.
// Note that this regex is just for the system cache. The original language
// files are allowed to have uppercase letters (e.g. pt_BR.php).
if (preg_match('/(([a-z]{2,3})(_[a-z]{2})?)\.lang$/', $filename, $matches)) {
$language = $matches[1];
$data = elgg_load_system_cache("$language.lang");
if ($data) {
$this->addTranslation($language, unserialize($data));
}
}
}
} else {
foreach ($GLOBALS['_ELGG']->language_paths as $path => $dummy) {
$this->registerTranslations($path, true);
}
}
$LANG_RELOAD_ALL_RUN = true;
}
/**
* Return an array of installed translations as an associative
* array "two letter code" => "native language name".
*
* @return array
*/
function getInstalledTranslations() {
// Ensure that all possible translations are loaded
$this->reloadAllTranslations();
$installed = array();
$admin_logged_in = _elgg_services()->session->isAdminLoggedIn();
foreach ($GLOBALS['_ELGG']->translations as $k => $v) {
$installed[$k] = $this->translate($k, array(), $k);
if ($admin_logged_in && ($k != 'en')) {
$completeness = $this->getLanguageCompleteness($k);
if ($completeness < 100) {
$installed[$k] .= " (" . $completeness . "% " . $this->translate('complete') . ")";
}
}
}
return $installed;
}
/**
* Return the level of completeness for a given language code (compared to english)
*
* @param string $language Language
*
* @return int
*/
function getLanguageCompleteness($language) {
// Ensure that all possible translations are loaded
$this->reloadAllTranslations();
$language = sanitise_string($language);
$en = count($GLOBALS['_ELGG']->translations['en']);
$missing = $this->getMissingLanguageKeys($language);
if ($missing) {
$missing = count($missing);
} else {
$missing = 0;
}
//$lang = count($GLOBALS['_ELGG']->translations[$language]);
$lang = $en - $missing;
return round(($lang / $en) * 100, 2);
}
/**
* Return the translation keys missing from a given language,
* or those that are identical to the english version.
*
* @param string $language The language
*
* @return mixed
*/
function getMissingLanguageKeys($language) {
// Ensure that all possible translations are loaded
$this->reloadAllTranslations();
$missing = array();
foreach ($GLOBALS['_ELGG']->translations['en'] as $k => $v) {
if ((!isset($GLOBALS['_ELGG']->translations[$language][$k]))
|| ($GLOBALS['_ELGG']->translations[$language][$k] == $GLOBALS['_ELGG']->translations['en'][$k])) {
$missing[] = $k;
}
}
if (count($missing)) {
return $missing;
}
return false;
}
/**
* Check if a give language key exists
*
* @param string $key The translation key
* @param string $language The specific language to check
*
* @return bool
* @since 1.11
*/
function languageKeyExists($key, $language = 'en') {
if (empty($key)) {
return false;
}
if (($language !== 'en') && !array_key_exists($language, $GLOBALS['_ELGG']->translations)) {
// Ensure that all possible translations are loaded
$this->reloadAllTranslations();
}
if (!array_key_exists($language, $GLOBALS['_ELGG']->translations)) {
return false;
}
return array_key_exists($key, $GLOBALS['_ELGG']->translations[$language]);
}
/**
* Returns an array of language codes.
*
* @return array
*/
function getAllLanguageCodes() {
return array(
"aa", // "Afar"
"ab", // "Abkhazian"
"af", // "Afrikaans"
"am", // "Amharic"
"ar", // "Arabic"
"as", // "Assamese"
"ay", // "Aymara"
"az", // "Azerbaijani"
"ba", // "Bashkir"
"be", // "Byelorussian"
"bg", // "Bulgarian"
"bh", // "Bihari"
"bi", // "Bislama"
"bn", // "Bengali; Bangla"
"bo", // "Tibetan"
"br", // "Breton"
"ca", // "Catalan"
"cmn", // "Mandarin Chinese" // ISO 639-3
"co", // "Corsican"
"cs", // "Czech"
"cy", // "Welsh"
"da", // "Danish"
"de", // "German"
"dz", // "Bhutani"
"el", // "Greek"
"en", // "English"
"eo", // "Esperanto"
"es", // "Spanish"
"et", // "Estonian"
"eu", // "Basque"
"fa", // "Persian"
"fi", // "Finnish"
"fj", // "Fiji"
"fo", // "Faeroese"
"fr", // "French"
"fy", // "Frisian"
"ga", // "Irish"
"gd", // "Scots / Gaelic"
"gl", // "Galician"
"gn", // "Guarani"
"gu", // "Gujarati"
"he", // "Hebrew"
"ha", // "Hausa"
"hi", // "Hindi"
"hr", // "Croatian"
"hu", // "Hungarian"
"hy", // "Armenian"
"ia", // "Interlingua"
"id", // "Indonesian"
"ie", // "Interlingue"
"ik", // "Inupiak"
"is", // "Icelandic"
"it", // "Italian"
"iu", // "Inuktitut"
"iw", // "Hebrew (obsolete)"
"ja", // "Japanese"
"ji", // "Yiddish (obsolete)"
"jw", // "Javanese"
"ka", // "Georgian"
"kk", // "Kazakh"
"kl", // "Greenlandic"
"km", // "Cambodian"
"kn", // "Kannada"
"ko", // "Korean"
"ks", // "Kashmiri"
"ku", // "Kurdish"
"ky", // "Kirghiz"
"la", // "Latin"
"ln", // "Lingala"
"lo", // "Laothian"
"lt", // "Lithuanian"
"lv", // "Latvian/Lettish"
"mg", // "Malagasy"
"mi", // "Maori"
"mk", // "Macedonian"
"ml", // "Malayalam"
"mn", // "Mongolian"
"mo", // "Moldavian"
"mr", // "Marathi"
"ms", // "Malay"
"mt", // "Maltese"
"my", // "Burmese"
"na", // "Nauru"
"ne", // "Nepali"
"nl", // "Dutch"
"no", // "Norwegian"
"oc", // "Occitan"
"om", // "(Afan) Oromo"
"or", // "Oriya"
"pa", // "Punjabi"
"pl", // "Polish"
"ps", // "Pashto / Pushto"
"pt", // "Portuguese"
"pt_br", // 'Brazilian Portuguese'
"qu", // "Quechua"
"rm", // "Rhaeto-Romance"
"rn", // "Kirundi"
"ro", // "Romanian"
"ru", // "Russian"
"rw", // "Kinyarwanda"
"sa", // "Sanskrit"
"sd", // "Sindhi"
"sg", // "Sangro"
"sh", // "Serbo-Croatian"
"si", // "Singhalese"
"sk", // "Slovak"
"sl", // "Slovenian"
"sm", // "Samoan"
"sn", // "Shona"
"so", // "Somali"
"sq", // "Albanian"
"sr", // "Serbian"
"ss", // "Siswati"
"st", // "Sesotho"
"su", // "Sundanese"
"sv", // "Swedish"
"sw", // "Swahili"
"ta", // "Tamil"
"te", // "Tegulu"
"tg", // "Tajik"
"th", // "Thai"
"ti", // "Tigrinya"
"tk", // "Turkmen"
"tl", // "Tagalog"
"tn", // "Setswana"
"to", // "Tonga"
"tr", // "Turkish"
"ts", // "Tsonga"
"tt", // "Tatar"
"tw", // "Twi"
"ug", // "Uigur"
"uk", // "Ukrainian"
"ur", // "Urdu"
"uz", // "Uzbek"
"vi", // "Vietnamese"
"vo", // "Volapuk"
"wo", // "Wolof"
"xh", // "Xhosa"
"yi", // "Yiddish"
"yo", // "Yoruba"
"za", // "Zuang"
"zh", // "Chinese"
"zu", // "Zulu"
);
}
} | gpl-2.0 |
huoyao/TL_vegaFEM | vegafem/vegafem/homogeneousMooneyRivlinIsotropicMaterial.h | 3799 | /*************************************************************************
* *
* Vega FEM Simulation Library Version 2.1 *
* *
* "isotropic hyperelastic FEM" library , Copyright (C) 2014 USC *
* All rights reserved. *
* *
* Code authors: Jernej Barbic, Fun Shing Sin *
* http://www.jernejbarbic.com/code *
* *
* Research: Jernej Barbic, Fun Shing Sin, Daniel Schroeder, *
* Doug L. James, Jovan Popovic *
* *
* Funding: National Science Foundation, Link Foundation, *
* Singapore-MIT GAMBIT Game Lab, *
* Zumberge Research and Innovation Fund at USC *
* *
* This library is free software; you can redistribute it and/or *
* modify it under the terms of the BSD-style license that is *
* included with this library in the file LICENSE.txt *
* *
* This library is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the file *
* LICENSE.TXT for more details. *
* *
*************************************************************************/
#ifndef _HOMOGENEOUSMOONEYRIVLINISOTROPICMATERIAL_H_
#define _HOMOGENEOUSMOONEYRIVLINISOTROPICMATERIAL_H_
#include "isotropicMaterialWithCompressionResistance.h"
/*
The implemented compressible Mooney-Rivlin material has the following energy function:
energy = 0.5 * mu01 * ((Ic^2 - IIc) / pow(IIIc, 2/3) - 6) +
mu10 * (Ic / pow(IIIc, 1/3) - 3) +
v1 * (sqrt(IIIc) - 1)^2
This is a standard adaption of the material presented in:
R. S. Rivlin and D. W. Saunders:
Large elastic deformations of isotropic materials VII.
Experiments on the deformation of rubber.
Philosophical Transactions Royal Society London Series A,
243(865), 1951, pp. 251-288.
See the Wikipedia page, "Polynomial_(hyperelastic_model)".
A snapshot of the Wikipedia page is included in this folder.
*/
class HomogeneousMooneyRivlinIsotropicMaterial : public IsotropicMaterialWithCompressionResistance
{
public:
HomogeneousMooneyRivlinIsotropicMaterial(double mu01, double mu10, double v1, int enableCompressionResistance=0, double compressionResistance=0.0);
virtual ~HomogeneousMooneyRivlinIsotropicMaterial();
virtual double ComputeEnergy(int elementIndex, double * invariants);
virtual void ComputeEnergyGradient(int elementIndex, double * invariants, double * gradient); // invariants and gradient are 3-vectors
virtual void ComputeEnergyHessian(int elementIndex, double * invariants, double * hessian); // invariants is a 3-vector, hessian is a 3x3 symmetric matrix, unrolled into a 6-vector, in the following order: (11, 12, 13, 22, 23, 33).
protected:
double mu01, mu10;
double v1;
double compressionResistance;
double EdivNuFactor;
virtual double GetCompressionResistanceFactor(int elementIndex);
};
#endif
| gpl-2.0 |
joncampbell123/dosbox-x | src/libs/decoders/stb.h | 465685 | /* stb.h - v2.35 - Sean's Tool Box -- public domain -- http://nothings.org/stb.h
no warranty is offered or implied; use this code at your own risk
This is a single header file with a bunch of useful utilities
for getting stuff done in C/C++.
Documentation: http://nothings.org/stb/stb_h.html
Unit tests: http://nothings.org/stb/stb.c
============================================================================
You MUST
#define STB_DEFINE
in EXACTLY _one_ C or C++ file that includes this header, BEFORE the
include, like this:
#define STB_DEFINE
#include "stb.h"
All other files should just #include "stb.h" without the #define.
============================================================================
Version History
2.36 various fixes
2.35 fix clang-cl issues with swprintf
2.34 fix warnings
2.33 more fixes to random numbers
2.32 stb_intcmprev, stb_uidict, fix random numbers on Linux
2.31 stb_ucharcmp
2.30 MinGW fix
2.29 attempt to fix use of swprintf()
2.28 various new functionality
2.27 test _WIN32 not WIN32 in STB_THREADS
2.26 various warning & bugfixes
2.25 various warning & bugfixes
2.24 various warning & bugfixes
2.23 fix 2.22
2.22 64-bit fixes from '!='; fix stb_sdict_copy() to have preferred name
2.21 utf-8 decoder rejects "overlong" encodings; attempted 64-bit improvements
2.20 fix to hash "copy" function--reported by someone with handle "!="
2.19 ???
2.18 stb_readdir_subdirs_mask
2.17 stb_cfg_dir
2.16 fix stb_bgio_, add stb_bgio_stat(); begin a streaming wrapper
2.15 upgraded hash table template to allow:
- aggregate keys (explicit comparison func for EMPTY and DEL keys)
- "static" implementations (so they can be culled if unused)
2.14 stb_mprintf
2.13 reduce identifiable strings in STB_NO_STB_STRINGS
2.12 fix STB_ONLY -- lots of uint32s, TRUE/FALSE things had crept in
2.11 fix bug in stb_dirtree_get() which caused "c://path" sorts of stuff
2.10 STB_F(), STB_I() inline constants (also KI,KU,KF,KD)
2.09 stb_box_face_vertex_axis_side
2.08 bugfix stb_trimwhite()
2.07 colored printing in windows (why are we in 1985?)
2.06 comparison functions are now functions-that-return-functions and
accept a struct-offset as a parameter (not thread-safe)
2.05 compile and pass tests under Linux (but no threads); thread cleanup
2.04 stb_cubic_bezier_1d, smoothstep, avoid dependency on registry
2.03 ?
2.02 remove integrated documentation
2.01 integrate various fixes; stb_force_uniprocessor
2.00 revised stb_dupe to use multiple hashes
1.99 stb_charcmp
1.98 stb_arr_deleten, stb_arr_insertn
1.97 fix stb_newell_normal()
1.96 stb_hash_number()
1.95 hack stb__rec_max; clean up recursion code to use new functions
1.94 stb_dirtree; rename stb_extra to stb_ptrmap
1.93 stb_sem_new() API cleanup (no blockflag-starts blocked; use 'extra')
1.92 stb_threadqueue--multi reader/writer queue, fixed size or resizeable
1.91 stb_bgio_* for reading disk asynchronously
1.90 stb_mutex uses CRITICAL_REGION; new stb_sync primitive for thread
joining; workqueue supports stb_sync instead of stb_semaphore
1.89 support ';' in constant-string wildcards; stb_mutex wrapper (can
implement with EnterCriticalRegion eventually)
1.88 portable threading API (only for win32 so far); worker thread queue
1.87 fix wildcard handling in stb_readdir_recursive
1.86 support ';' in wildcards
1.85 make stb_regex work with non-constant strings;
beginnings of stb_introspect()
1.84 (forgot to make notes)
1.83 whoops, stb_keep_if_different wasn't deleting the temp file
1.82 bring back stb_compress from stb_file.h for cmirror
1.81 various bugfixes, STB_FASTMALLOC_INIT inits FASTMALLOC in release
1.80 stb_readdir returns utf8; write own utf8-utf16 because lib was wrong
1.79 stb_write
1.78 calloc() support for malloc wrapper, STB_FASTMALLOC
1.77 STB_FASTMALLOC
1.76 STB_STUA - Lua-like language; (stb_image, stb_csample, stb_bilinear)
1.75 alloc/free array of blocks; stb_hheap bug; a few stb_ps_ funcs;
hash*getkey, hash*copy; stb_bitset; stb_strnicmp; bugfix stb_bst
1.74 stb_replaceinplace; use stdlib C function to convert utf8 to UTF-16
1.73 fix performance bug & leak in stb_ischar (C++ port lost a 'static')
1.72 remove stb_block, stb_block_manager, stb_decompress (to stb_file.h)
1.71 stb_trimwhite, stb_tokens_nested, etc.
1.70 back out 1.69 because it might problemize mixed builds; stb_filec()
1.69 (stb_file returns 'char *' in C++)
1.68 add a special 'tree root' data type for stb_bst; stb_arr_end
1.67 full C++ port. (stb_block_manager)
1.66 stb_newell_normal
1.65 stb_lex_item_wild -- allow wildcard items which MUST match entirely
1.64 stb_data
1.63 stb_log_name
1.62 stb_define_sort; C++ cleanup
1.61 stb_hash_fast -- Paul Hsieh's hash function (beats Bob Jenkins'?)
1.60 stb_delete_directory_recursive
1.59 stb_readdir_recursive
1.58 stb_bst variant with parent pointer for O(1) iteration, not O(log N)
1.57 replace LCG random with Mersenne Twister (found a public domain one)
1.56 stb_perfect_hash, stb_ischar, stb_regex
1.55 new stb_bst API allows multiple BSTs per node (e.g. secondary keys)
1.54 bugfix: stb_define_hash, stb_wildmatch, regexp
1.53 stb_define_hash; recoded stb_extra, stb_sdict use it
1.52 stb_rand_define, stb_bst, stb_reverse
1.51 fix 'stb_arr_setlen(NULL, 0)'
1.50 stb_wordwrap
1.49 minor improvements to enable the scripting language
1.48 better approach for stb_arr using stb_malloc; more invasive, clearer
1.47 stb_lex (lexes stb.h at 1.5ML/s on 3Ghz P4; 60/70% of optimal/flex)
1.46 stb_wrapper_*, STB_MALLOC_WRAPPER
1.45 lightly tested DFA acceleration of regexp searching
1.44 wildcard matching & searching; regexp matching & searching
1.43 stb_temp
1.42 allow stb_arr to use stb_malloc/realloc; note this is global
1.41 make it compile in C++; (disable stb_arr in C++)
1.40 stb_dupe tweak; stb_swap; stb_substr
1.39 stb_dupe; improve stb_file_max to be less stupid
1.38 stb_sha1_file: generate sha1 for file, even > 4GB
1.37 stb_file_max; partial support for utf8 filenames in Windows
1.36 remove STB__NO_PREFIX - poor interaction with IDE, not worth it
streamline stb_arr to make it separately publishable
1.35 bugfixes for stb_sdict, stb_malloc(0), stristr
1.34 (streaming interfaces for stb_compress)
1.33 stb_alloc; bug in stb_getopt; remove stb_overflow
1.32 (stb_compress returns, smaller&faster; encode window & 64-bit len)
1.31 stb_prefix_count
1.30 (STB__NO_PREFIX - remove stb_ prefixes for personal projects)
1.29 stb_fput_varlen64, etc.
1.28 stb_sha1
1.27 ?
1.26 stb_extra
1.25 ?
1.24 stb_copyfile
1.23 stb_readdir
1.22 ?
1.21 ?
1.20 ?
1.19 ?
1.18 ?
1.17 ?
1.16 ?
1.15 stb_fixpath, stb_splitpath, stb_strchr2
1.14 stb_arr
1.13 ?stb, stb_log, stb_fatal
1.12 ?stb_hash2
1.11 miniML
1.10 stb_crc32, stb_adler32
1.09 stb_sdict
1.08 stb_bitreverse, stb_ispow2, stb_big32
stb_fopen, stb_fput_varlen, stb_fput_ranged
stb_fcmp, stb_feq
1.07 (stb_encompress)
1.06 stb_compress
1.05 stb_tokens, (stb_hheap)
1.04 stb_rand
1.03 ?(s-strings)
1.02 ?stb_filelen, stb_tokens
1.01 stb_tolower
1.00 stb_hash, stb_intcmp
stb_file, stb_stringfile, stb_fgets
stb_prefix, stb_strlower, stb_strtok
stb_image
(stb_array), (stb_arena)
Parenthesized items have since been removed.
LICENSE
See end of file for license information.
CREDITS
Written by Sean Barrett.
Fixes:
Philipp Wiesemann
Robert Nix
r-lyeh
blackpawn
github:Mojofreem
Ryan Whitworth
Vincent Isambart
Mike Sartain
Eugene Opalev
Tim Sjostrand
github:infatum
Dave Butler (Croepha)
Ethan Lee (flibitijibibo)
Brian Collins
*/
#include <stdarg.h>
#ifndef STB__INCLUDE_STB_H
#define STB__INCLUDE_STB_H
#define STB_VERSION 1
#ifdef STB_INTROSPECT
#define STB_DEFINE
#endif
#ifdef STB_DEFINE_THREADS
#ifndef STB_DEFINE
#define STB_DEFINE
#endif
#ifndef STB_THREADS
#define STB_THREADS
#endif
#endif
#if defined(_WIN32) && !defined(__MINGW32__)
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#ifndef _CRT_NONSTDC_NO_DEPRECATE
#define _CRT_NONSTDC_NO_DEPRECATE
#endif
#ifndef _CRT_NON_CONFORMING_SWPRINTFS
#define _CRT_NON_CONFORMING_SWPRINTFS
#endif
#if !defined(_MSC_VER) || _MSC_VER > 1700
#include <intrin.h> // _BitScanReverse
#endif
#endif
#include <stdlib.h> // stdlib could have min/max
#include <stdio.h> // need FILE
#include <string.h> // stb_define_hash needs memcpy/memset
#include <time.h> // stb_dirtree
#ifdef __MINGW32__
#include <fcntl.h> // O_RDWR
#endif
#ifdef STB_PERSONAL
typedef int Bool;
#define False 0
#define True 1
#endif
#ifdef STB_MALLOC_WRAPPER_PAGED
#define STB_MALLOC_WRAPPER_DEBUG
#endif
#ifdef STB_MALLOC_WRAPPER_DEBUG
#define STB_MALLOC_WRAPPER
#endif
#ifdef STB_MALLOC_WRAPPER_FASTMALLOC
#define STB_FASTMALLOC
#define STB_MALLOC_WRAPPER
#endif
#ifdef STB_FASTMALLOC
#ifndef _WIN32
#undef STB_FASTMALLOC
#endif
#endif
#ifdef STB_DEFINE
#include <assert.h>
#include <stdarg.h>
#include <stddef.h>
#include <ctype.h>
#include <math.h>
#ifndef _WIN32
#include <unistd.h>
#else
#include <io.h> // _mktemp
#include <direct.h> // _rmdir
#endif
#include <sys/types.h> // stat()/_stat()
#include <sys/stat.h> // stat()/_stat()
#endif
#define stb_min(a,b) ((a) < (b) ? (a) : (b))
#define stb_max(a,b) ((a) > (b) ? (a) : (b))
#ifndef STB_ONLY
#if !defined(__cplusplus) && !defined(min) && !defined(max)
#define min(x,y) stb_min(x,y)
#define max(x,y) stb_max(x,y)
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846f
#endif
#ifndef TRUE
#define TRUE 1
#define FALSE 0
#endif
#ifndef deg2rad
#define deg2rad(a) ((a)*(M_PI/180))
#endif
#ifndef rad2deg
#define rad2deg(a) ((a)*(180/M_PI))
#endif
#ifndef swap
#ifndef __cplusplus
#define swap(TYPE,a,b) \
do { TYPE stb__t; stb__t = (a); (a) = (b); (b) = stb__t; } while (0)
#endif
#endif
typedef unsigned char uint8 ;
typedef signed char int8 ;
typedef unsigned short uint16;
typedef signed short int16;
#if defined(STB_USE_LONG_FOR_32_BIT_INT) || defined(STB_LONG32)
typedef unsigned long uint32;
typedef signed long int32;
#else
typedef unsigned int uint32;
typedef signed int int32;
#endif
typedef unsigned char uchar ;
typedef unsigned short ushort;
typedef unsigned int uint ;
typedef unsigned long ulong ;
// produce compile errors if the sizes aren't right
typedef char stb__testsize16[sizeof(int16)==2];
typedef char stb__testsize32[sizeof(int32)==4];
#endif
#ifndef STB_TRUE
#define STB_TRUE 1
#define STB_FALSE 0
#endif
// if we're STB_ONLY, can't rely on uint32 or even uint, so all the
// variables we'll use herein need typenames prefixed with 'stb':
typedef unsigned char stb_uchar;
typedef unsigned char stb_uint8;
typedef unsigned int stb_uint;
typedef unsigned short stb_uint16;
typedef short stb_int16;
typedef signed char stb_int8;
#if defined(STB_USE_LONG_FOR_32_BIT_INT) || defined(STB_LONG32)
typedef unsigned long stb_uint32;
typedef long stb_int32;
#else
typedef unsigned int stb_uint32;
typedef int stb_int32;
#endif
typedef char stb__testsize2_16[sizeof(stb_uint16)==2 ? 1 : -1];
typedef char stb__testsize2_32[sizeof(stb_uint32)==4 ? 1 : -1];
#ifdef _MSC_VER
typedef unsigned __int64 stb_uint64;
typedef __int64 stb_int64;
#define STB_IMM_UINT64(literalui64) (literalui64##ui64)
#define STB_IMM_INT64(literali64) (literali64##i64)
#else
// ??
typedef unsigned long long stb_uint64;
typedef long long stb_int64;
#define STB_IMM_UINT64(literalui64) (literalui64##ULL)
#define STB_IMM_INT64(literali64) (literali64##LL)
#endif
typedef char stb__testsize2_64[sizeof(stb_uint64)==8 ? 1 : -1];
// add platform-specific ways of checking for sizeof(char*) == 8,
// and make those define STB_PTR64
#if defined(_WIN64) || defined(__x86_64__) || defined(__ia64__) || defined(__LP64__)
#define STB_PTR64
#endif
#ifdef STB_PTR64
typedef char stb__testsize2_ptr[sizeof(char *) == 8];
typedef stb_uint64 stb_uinta;
typedef stb_int64 stb_inta;
#else
typedef char stb__testsize2_ptr[sizeof(char *) == 4];
typedef stb_uint32 stb_uinta;
typedef stb_int32 stb_inta;
#endif
typedef char stb__testsize2_uinta[sizeof(stb_uinta)==sizeof(char*) ? 1 : -1];
// if so, we should define an int type that is the pointer size. until then,
// we'll have to make do with this (which is not the same at all!)
typedef union
{
unsigned int i;
void * p;
} stb_uintptr;
#ifdef __cplusplus
#define STB_EXTERN extern "C"
#else
#define STB_EXTERN extern
#endif
// check for well-known debug defines
#if defined(DEBUG) || defined(_DEBUG) || defined(DBG)
#ifndef NDEBUG
#define STB_DEBUG
#endif
#endif
#ifdef STB_DEBUG
#include <assert.h>
#endif
//////////////////////////////////////////////////////////////////////////////
//
// C library function platform handling
//
#ifdef STB_DEFINE
#if defined(_WIN32) && defined(__STDC_WANT_SECURE_LIB__)
static FILE * stb_p_fopen(const char *filename, const char *mode)
{
FILE *f;
if (0 == fopen_s(&f, filename, mode))
return f;
else
return NULL;
}
static FILE * stb_p_wfopen(const wchar_t *filename, const wchar_t *mode)
{
FILE *f;
if (0 == _wfopen_s(&f, filename, mode))
return f;
else
return NULL;
}
static char *stb_p_strcpy_s(char *a, size_t size, const char *b)
{
strcpy_s(a,size,b);
return a;
}
static char *stb_p_strncpy_s(char *a, size_t size, const char *b, size_t count)
{
strncpy_s(a,size,b,count);
return a;
}
#define stb_p_mktemp(s) (_mktemp_s(s, strlen(s)+1) == 0)
#define stb_p_sprintf sprintf_s
#define stb_p_size(x) ,(x)
#else
#define stb_p_fopen fopen
#define stb_p_wfopen _wfopen
#define stb_p_strcpy_s(a,s,b) strcpy(a,b)
#define stb_p_strncpy_s(a,s,b,c) strncpy(a,b,c)
#define stb_p_mktemp(s) (mktemp(s) != NULL)
#define stb_p_sprintf sprintf
#define stb_p_size(x)
#endif
#if defined(_WIN32)
#define stb_p_vsnprintf _vsnprintf
#else
#define stb_p_vsnprintf vsnprintf
#endif
#endif // STB_DEFINE
#if defined(_WIN32) && (_MSC_VER >= 1300)
#define stb_p_stricmp _stricmp
#define stb_p_strnicmp _strnicmp
#define stb_p_strdup _strdup
#else
#define stb_p_strdup strdup
#define stb_p_stricmp stricmp
#define stb_p_strnicmp strnicmp
#endif
STB_EXTERN void stb_wrapper_malloc(void *newp, size_t sz, char *file, int line);
STB_EXTERN void stb_wrapper_free(void *oldp, char *file, int line);
STB_EXTERN void stb_wrapper_realloc(void *oldp, void *newp, size_t sz, char *file, int line);
STB_EXTERN void stb_wrapper_calloc(size_t num, size_t sz, char *file, int line);
STB_EXTERN void stb_wrapper_listall(void (*func)(void *ptr, size_t sz, char *file, int line));
STB_EXTERN void stb_wrapper_dump(char *filename);
STB_EXTERN size_t stb_wrapper_allocsize(void *oldp);
STB_EXTERN void stb_wrapper_check(void *oldp);
#ifdef STB_DEFINE
// this is a special function used inside malloc wrapper
// to do allocations that aren't tracked (to avoid
// reentrancy). Of course if someone _else_ wraps realloc,
// this breaks, but if they're doing that AND the malloc
// wrapper they need to explicitly check for reentrancy.
//
// only define realloc_raw() and we do realloc(NULL,sz)
// for malloc() and realloc(p,0) for free().
static void * stb__realloc_raw(void *p, int sz)
{
if (p == NULL) return malloc(sz);
if (sz == 0) { free(p); return NULL; }
return realloc(p,sz);
}
#endif
#ifdef _WIN32
STB_EXTERN void * stb_smalloc(size_t sz);
STB_EXTERN void stb_sfree(void *p);
STB_EXTERN void * stb_srealloc(void *p, size_t sz);
STB_EXTERN void * stb_scalloc(size_t n, size_t sz);
STB_EXTERN char * stb_sstrdup(char *s);
#endif
#ifdef STB_FASTMALLOC
#define malloc stb_smalloc
#define free stb_sfree
#define realloc stb_srealloc
#define strdup stb_sstrdup
#define calloc stb_scalloc
#endif
#ifndef STB_MALLOC_ALLCHECK
#define stb__check(p) 1
#else
#ifndef STB_MALLOC_WRAPPER
#error STB_MALLOC_ALLCHECK requires STB_MALLOC_WRAPPER
#else
#define stb__check(p) stb_mcheck(p)
#endif
#endif
#ifdef STB_MALLOC_WRAPPER
STB_EXTERN void * stb__malloc(size_t, char *, int);
STB_EXTERN void * stb__realloc(void *, size_t, char *, int);
STB_EXTERN void * stb__calloc(size_t n, size_t s, char *, int);
STB_EXTERN void stb__free(void *, char *file, int);
STB_EXTERN char * stb__strdup(char *s, char *file, int);
STB_EXTERN void stb_malloc_checkall(void);
STB_EXTERN void stb_malloc_check_counter(int init_delay, int rep_delay);
#ifndef STB_MALLOC_WRAPPER_DEBUG
#define stb_mcheck(p) 1
#else
STB_EXTERN int stb_mcheck(void *);
#endif
#ifdef STB_DEFINE
#ifdef STB_MALLOC_WRAPPER_DEBUG
#define STB__PAD 32
#define STB__BIAS 16
#define STB__SIG 0x51b01234
#define STB__FIXSIZE(sz) (((sz+3) & ~3) + STB__PAD)
#define STB__ptr(x,y) ((char *) (x) + (y))
#else
#define STB__ptr(x,y) (x)
#define STB__FIXSIZE(sz) (sz)
#endif
#ifdef STB_MALLOC_WRAPPER_DEBUG
int stb_mcheck(void *p)
{
unsigned int sz;
if (p == NULL) return 1;
p = ((char *) p) - STB__BIAS;
sz = * (unsigned int *) p;
assert(* (unsigned int *) STB__ptr(p,4) == STB__SIG);
assert(* (unsigned int *) STB__ptr(p,8) == STB__SIG);
assert(* (unsigned int *) STB__ptr(p,12) == STB__SIG);
assert(* (unsigned int *) STB__ptr(p,sz-4) == STB__SIG+1);
assert(* (unsigned int *) STB__ptr(p,sz-8) == STB__SIG+1);
assert(* (unsigned int *) STB__ptr(p,sz-12) == STB__SIG+1);
assert(* (unsigned int *) STB__ptr(p,sz-16) == STB__SIG+1);
stb_wrapper_check(STB__ptr(p, STB__BIAS));
return 1;
}
static void stb__check2(void *p, size_t sz, char *file, int line)
{
stb_mcheck(p);
}
void stb_malloc_checkall(void)
{
stb_wrapper_listall(stb__check2);
}
#else
void stb_malloc_checkall(void) { }
#endif
static int stb__malloc_wait=(1 << 30), stb__malloc_next_wait = (1 << 30), stb__malloc_iter;
void stb_malloc_check_counter(int init_delay, int rep_delay)
{
stb__malloc_wait = init_delay;
stb__malloc_next_wait = rep_delay;
}
void stb_mcheck_all(void)
{
#ifdef STB_MALLOC_WRAPPER_DEBUG
++stb__malloc_iter;
if (--stb__malloc_wait <= 0) {
stb_malloc_checkall();
stb__malloc_wait = stb__malloc_next_wait;
}
#endif
}
#ifdef STB_MALLOC_WRAPPER_PAGED
#define STB__WINDOWS_PAGE (1 << 12)
#ifndef _WINDOWS_
STB_EXTERN __declspec(dllimport) void * __stdcall VirtualAlloc(void *p, unsigned long size, unsigned long type, unsigned long protect);
STB_EXTERN __declspec(dllimport) int __stdcall VirtualFree(void *p, unsigned long size, unsigned long freetype);
#endif
#endif
static void *stb__malloc_final(size_t sz)
{
#ifdef STB_MALLOC_WRAPPER_PAGED
size_t aligned = (sz + STB__WINDOWS_PAGE - 1) & ~(STB__WINDOWS_PAGE-1);
char *p = VirtualAlloc(NULL, aligned + STB__WINDOWS_PAGE, 0x2000, 0x04); // RESERVE, READWRITE
if (p == NULL) return p;
VirtualAlloc(p, aligned, 0x1000, 0x04); // COMMIT, READWRITE
return p;
#else
return malloc(sz);
#endif
}
static void stb__free_final(void *p)
{
#ifdef STB_MALLOC_WRAPPER_PAGED
VirtualFree(p, 0, 0x8000); // RELEASE
#else
free(p);
#endif
}
int stb__malloc_failure;
#ifdef STB_MALLOC_WRAPPER_PAGED
static void *stb__realloc_final(void *p, size_t sz, size_t old_sz)
{
void *q = stb__malloc_final(sz);
if (q == NULL)
return ++stb__malloc_failure, q;
// @TODO: deal with p being smaller!
memcpy(q, p, sz < old_sz ? sz : old_sz);
stb__free_final(p);
return q;
}
#endif
void stb__free(void *p, char *file, int line)
{
stb_mcheck_all();
if (!p) return;
#ifdef STB_MALLOC_WRAPPER_DEBUG
stb_mcheck(p);
#endif
stb_wrapper_free(p,file,line);
#ifdef STB_MALLOC_WRAPPER_DEBUG
p = STB__ptr(p,-STB__BIAS);
* (unsigned int *) STB__ptr(p,0) = 0xdeadbeef;
* (unsigned int *) STB__ptr(p,4) = 0xdeadbeef;
* (unsigned int *) STB__ptr(p,8) = 0xdeadbeef;
* (unsigned int *) STB__ptr(p,12) = 0xdeadbeef;
#endif
stb__free_final(p);
}
void * stb__malloc(size_t sz, char *file, int line)
{
void *p;
stb_mcheck_all();
if (sz == 0) return NULL;
p = stb__malloc_final(STB__FIXSIZE(sz));
if (p == NULL) p = stb__malloc_final(STB__FIXSIZE(sz));
if (p == NULL) p = stb__malloc_final(STB__FIXSIZE(sz));
if (p == NULL) {
++stb__malloc_failure;
#ifdef STB_MALLOC_WRAPPER_DEBUG
stb_malloc_checkall();
#endif
return p;
}
#ifdef STB_MALLOC_WRAPPER_DEBUG
* (int *) STB__ptr(p,0) = STB__FIXSIZE(sz);
* (unsigned int *) STB__ptr(p,4) = STB__SIG;
* (unsigned int *) STB__ptr(p,8) = STB__SIG;
* (unsigned int *) STB__ptr(p,12) = STB__SIG;
* (unsigned int *) STB__ptr(p,STB__FIXSIZE(sz)-4) = STB__SIG+1;
* (unsigned int *) STB__ptr(p,STB__FIXSIZE(sz)-8) = STB__SIG+1;
* (unsigned int *) STB__ptr(p,STB__FIXSIZE(sz)-12) = STB__SIG+1;
* (unsigned int *) STB__ptr(p,STB__FIXSIZE(sz)-16) = STB__SIG+1;
p = STB__ptr(p, STB__BIAS);
#endif
stb_wrapper_malloc(p,sz,file,line);
return p;
}
void * stb__realloc(void *p, size_t sz, char *file, int line)
{
void *q;
stb_mcheck_all();
if (p == NULL) return stb__malloc(sz,file,line);
if (sz == 0 ) { stb__free(p,file,line); return NULL; }
#ifdef STB_MALLOC_WRAPPER_DEBUG
stb_mcheck(p);
p = STB__ptr(p,-STB__BIAS);
#endif
#ifdef STB_MALLOC_WRAPPER_PAGED
{
size_t n = stb_wrapper_allocsize(STB__ptr(p,STB__BIAS));
if (!n)
stb_wrapper_check(STB__ptr(p,STB__BIAS));
q = stb__realloc_final(p, STB__FIXSIZE(sz), STB__FIXSIZE(n));
}
#else
q = realloc(p, STB__FIXSIZE(sz));
#endif
if (q == NULL)
return ++stb__malloc_failure, q;
#ifdef STB_MALLOC_WRAPPER_DEBUG
* (int *) STB__ptr(q,0) = STB__FIXSIZE(sz);
* (unsigned int *) STB__ptr(q,4) = STB__SIG;
* (unsigned int *) STB__ptr(q,8) = STB__SIG;
* (unsigned int *) STB__ptr(q,12) = STB__SIG;
* (unsigned int *) STB__ptr(q,STB__FIXSIZE(sz)-4) = STB__SIG+1;
* (unsigned int *) STB__ptr(q,STB__FIXSIZE(sz)-8) = STB__SIG+1;
* (unsigned int *) STB__ptr(q,STB__FIXSIZE(sz)-12) = STB__SIG+1;
* (unsigned int *) STB__ptr(q,STB__FIXSIZE(sz)-16) = STB__SIG+1;
q = STB__ptr(q, STB__BIAS);
p = STB__ptr(p, STB__BIAS);
#endif
stb_wrapper_realloc(p,q,sz,file,line);
return q;
}
STB_EXTERN int stb_log2_ceil(size_t);
static void *stb__calloc(size_t n, size_t sz, char *file, int line)
{
void *q;
stb_mcheck_all();
if (n == 0 || sz == 0) return NULL;
if (stb_log2_ceil(n) + stb_log2_ceil(sz) >= 32) return NULL;
q = stb__malloc(n*sz, file, line);
if (q) memset(q, 0, n*sz);
return q;
}
char * stb__strdup(char *s, char *file, int line)
{
char *p;
stb_mcheck_all();
p = stb__malloc(strlen(s)+1, file, line);
if (!p) return p;
stb_p_strcpy_s(p, strlen(s)+1, s);
return p;
}
#endif // STB_DEFINE
#ifdef STB_FASTMALLOC
#undef malloc
#undef realloc
#undef free
#undef strdup
#undef calloc
#endif
// include everything that might define these, BEFORE making macros
#include <stdlib.h>
#include <string.h>
#include <malloc.h>
#define malloc(s) stb__malloc ( s, __FILE__, __LINE__)
#define realloc(p,s) stb__realloc(p,s, __FILE__, __LINE__)
#define calloc(n,s) stb__calloc (n,s, __FILE__, __LINE__)
#define free(p) stb__free (p, __FILE__, __LINE__)
#define strdup(p) stb__strdup (p, __FILE__, __LINE__)
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Windows pretty display
//
STB_EXTERN void stbprint(const char *fmt, ...);
STB_EXTERN char *stb_sprintf(const char *fmt, ...);
STB_EXTERN char *stb_mprintf(const char *fmt, ...);
STB_EXTERN int stb_snprintf(char *s, size_t n, const char *fmt, ...);
STB_EXTERN int stb_vsnprintf(char *s, size_t n, const char *fmt, va_list v);
#ifdef STB_DEFINE
int stb_vsnprintf(char *s, size_t n, const char *fmt, va_list v)
{
int res;
#ifdef _WIN32
#ifdef __STDC_WANT_SECURE_LIB__
res = _vsnprintf_s(s, n, _TRUNCATE, fmt, v);
#else
res = stb_p_vsnprintf(s,n,fmt,v);
#endif
#else
res = vsnprintf(s,n,fmt,v);
#endif
if (n) s[n-1] = 0;
// Unix returns length output would require, Windows returns negative when truncated.
return (res >= (int) n || res < 0) ? -1 : res;
}
int stb_snprintf(char *s, size_t n, const char *fmt, ...)
{
int res;
va_list v;
va_start(v,fmt);
res = stb_vsnprintf(s, n, fmt, v);
va_end(v);
return res;
}
char *stb_sprintf(const char *fmt, ...)
{
static char buffer[1024];
va_list v;
va_start(v,fmt);
stb_vsnprintf(buffer,1024,fmt,v);
va_end(v);
return buffer;
}
char *stb_mprintf(const char *fmt, ...)
{
static char buffer[1024];
va_list v;
va_start(v,fmt);
stb_vsnprintf(buffer,1024,fmt,v);
va_end(v);
return stb_p_strdup(buffer);
}
#ifdef _WIN32
#ifndef _WINDOWS_
STB_EXTERN __declspec(dllimport) int __stdcall WriteConsoleA(void *, const void *, unsigned int, unsigned int *, void *);
STB_EXTERN __declspec(dllimport) void * __stdcall GetStdHandle(unsigned int);
STB_EXTERN __declspec(dllimport) int __stdcall SetConsoleTextAttribute(void *, unsigned short);
#endif
static void stb__print_one(void *handle, char *s, ptrdiff_t len)
{
if (len)
if (0==WriteConsoleA(handle, s, (unsigned) len, NULL,NULL))
// if it fails, maybe redirected, so output normally...
// but it's supriously reporting failure now on Win7 and later
{}//fwrite(s, 1, (unsigned) len, stdout);
}
static void stb__print(char *s)
{
void *handle = GetStdHandle((unsigned int) -11); // STD_OUTPUT_HANDLE
int pad=0; // number of padding characters to add
char *t = s;
while (*s) {
int lpad;
while (*s && *s != '{') {
if (pad) {
if (*s == '\r' || *s == '\n')
pad = 0;
else if (s[0] == ' ' && s[1] == ' ') {
stb__print_one(handle, t, s-t);
t = s;
while (pad) {
stb__print_one(handle, t, 1);
--pad;
}
}
}
++s;
}
if (!*s) break;
stb__print_one(handle, t, s-t);
if (s[1] == '{') {
++s;
continue;
}
if (s[1] == '#') {
t = s+3;
if (isxdigit(s[2]))
if (isdigit(s[2]))
SetConsoleTextAttribute(handle, s[2] - '0');
else
SetConsoleTextAttribute(handle, tolower(s[2]) - 'a' + 10);
else {
SetConsoleTextAttribute(handle, 0x0f);
t=s+2;
}
} else if (s[1] == '!') {
SetConsoleTextAttribute(handle, 0x0c);
t = s+2;
} else if (s[1] == '@') {
SetConsoleTextAttribute(handle, 0x09);
t = s+2;
} else if (s[1] == '$') {
SetConsoleTextAttribute(handle, 0x0a);
t = s+2;
} else {
SetConsoleTextAttribute(handle, 0x08); // 0,7,8,15 => shades of grey
t = s+1;
}
lpad = (int) (t-s);
s = t;
while (*s && *s != '}') ++s;
if (!*s) break;
stb__print_one(handle, t, s-t);
if (s[1] == '}') {
t = s+2;
} else {
pad += 1+lpad;
t = s+1;
}
s=t;
SetConsoleTextAttribute(handle, 0x07);
}
stb__print_one(handle, t, s-t);
SetConsoleTextAttribute(handle, 0x07);
}
void stbprint(const char *fmt, ...)
{
int res;
char buffer[1024];
char *tbuf = buffer;
va_list v;
va_start(v,fmt);
res = stb_vsnprintf(buffer, sizeof(buffer), fmt, v);
va_end(v);
if (res < 0) {
tbuf = (char *) malloc(16384);
va_start(v,fmt);
res = stb_vsnprintf(tbuf,16384, fmt, v);
va_end(v);
tbuf[16383] = 0;
}
stb__print(tbuf);
if (tbuf != buffer)
free(tbuf);
}
#else // _WIN32
void stbprint(const char *fmt, ...)
{
va_list v;
va_start(v,fmt);
vprintf(fmt,v);
va_end(v);
}
#endif // _WIN32
#endif // STB_DEFINE
//////////////////////////////////////////////////////////////////////////////
//
// Windows UTF8 filename handling
//
// Windows stupidly treats 8-bit filenames as some dopey code page,
// rather than utf-8. If we want to use utf8 filenames, we have to
// convert them to WCHAR explicitly and call WCHAR versions of the
// file functions. So, ok, we do.
#ifdef _WIN32
#define stb__fopen(x,y) stb_p_wfopen((const wchar_t *)stb__from_utf8(x), (const wchar_t *)stb__from_utf8_alt(y))
#define stb__windows(x,y) x
#else
#define stb__fopen(x,y) stb_p_fopen(x,y)
#define stb__windows(x,y) y
#endif
typedef unsigned short stb__wchar;
STB_EXTERN stb__wchar * stb_from_utf8(stb__wchar *buffer, const char *str, int n);
STB_EXTERN char * stb_to_utf8 (char *buffer, const stb__wchar *str, int n);
STB_EXTERN stb__wchar *stb__from_utf8(const char *str);
STB_EXTERN stb__wchar *stb__from_utf8_alt(const char *str);
STB_EXTERN char *stb__to_utf8(const stb__wchar *str);
#ifdef STB_DEFINE
stb__wchar * stb_from_utf8(stb__wchar *buffer, const char *ostr, int n)
{
unsigned char *str = (unsigned char *) ostr;
stb_uint32 c;
int i=0;
--n;
while (*str) {
if (i >= n)
return NULL;
if (!(*str & 0x80))
buffer[i++] = *str++;
else if ((*str & 0xe0) == 0xc0) {
if (*str < 0xc2) return NULL;
c = (*str++ & 0x1f) << 6;
if ((*str & 0xc0) != 0x80) return NULL;
buffer[i++] = c + (*str++ & 0x3f);
} else if ((*str & 0xf0) == 0xe0) {
if (*str == 0xe0 && (str[1] < 0xa0 || str[1] > 0xbf)) return NULL;
if (*str == 0xed && str[1] > 0x9f) return NULL; // str[1] < 0x80 is checked below
c = (*str++ & 0x0f) << 12;
if ((*str & 0xc0) != 0x80) return NULL;
c += (*str++ & 0x3f) << 6;
if ((*str & 0xc0) != 0x80) return NULL;
buffer[i++] = c + (*str++ & 0x3f);
} else if ((*str & 0xf8) == 0xf0) {
if (*str > 0xf4) return NULL;
if (*str == 0xf0 && (str[1] < 0x90 || str[1] > 0xbf)) return NULL;
if (*str == 0xf4 && str[1] > 0x8f) return NULL; // str[1] < 0x80 is checked below
c = (*str++ & 0x07) << 18;
if ((*str & 0xc0) != 0x80) return NULL;
c += (*str++ & 0x3f) << 12;
if ((*str & 0xc0) != 0x80) return NULL;
c += (*str++ & 0x3f) << 6;
if ((*str & 0xc0) != 0x80) return NULL;
c += (*str++ & 0x3f);
// utf-8 encodings of values used in surrogate pairs are invalid
if ((c & 0xFFFFF800) == 0xD800) return NULL;
if (c >= 0x10000) {
c -= 0x10000;
if (i + 2 > n) return NULL;
buffer[i++] = 0xD800 | (0x3ff & (c >> 10));
buffer[i++] = 0xDC00 | (0x3ff & (c ));
}
} else
return NULL;
}
buffer[i] = 0;
return buffer;
}
char * stb_to_utf8(char *buffer, const stb__wchar *str, int n)
{
int i=0;
--n;
while (*str) {
if (*str < 0x80) {
if (i+1 > n) return NULL;
buffer[i++] = (char) *str++;
} else if (*str < 0x800) {
if (i+2 > n) return NULL;
buffer[i++] = 0xc0 + (*str >> 6);
buffer[i++] = 0x80 + (*str & 0x3f);
str += 1;
} else if (*str >= 0xd800 && *str < 0xdc00) {
stb_uint32 c;
if (i+4 > n) return NULL;
c = ((str[0] - 0xd800) << 10) + ((str[1]) - 0xdc00) + 0x10000;
buffer[i++] = 0xf0 + (c >> 18);
buffer[i++] = 0x80 + ((c >> 12) & 0x3f);
buffer[i++] = 0x80 + ((c >> 6) & 0x3f);
buffer[i++] = 0x80 + ((c ) & 0x3f);
str += 2;
} else if (*str >= 0xdc00 && *str < 0xe000) {
return NULL;
} else {
if (i+3 > n) return NULL;
buffer[i++] = 0xe0 + (*str >> 12);
buffer[i++] = 0x80 + ((*str >> 6) & 0x3f);
buffer[i++] = 0x80 + ((*str ) & 0x3f);
str += 1;
}
}
buffer[i] = 0;
return buffer;
}
stb__wchar *stb__from_utf8(const char *str)
{
static stb__wchar buffer[4096];
return stb_from_utf8(buffer, str, 4096);
}
stb__wchar *stb__from_utf8_alt(const char *str)
{
static stb__wchar buffer[4096];
return stb_from_utf8(buffer, str, 4096);
}
char *stb__to_utf8(const stb__wchar *str)
{
static char buffer[4096];
return stb_to_utf8(buffer, str, 4096);
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Miscellany
//
STB_EXTERN void stb_fatal(const char *fmt, ...);
STB_EXTERN void stb_(char *fmt, ...);
STB_EXTERN void stb_append_to_file(char *file, char *fmt, ...);
STB_EXTERN void stb_log(int active);
STB_EXTERN void stb_log_fileline(int active);
STB_EXTERN void stb_log_name(char *filename);
STB_EXTERN void stb_swap(void *p, void *q, size_t sz);
STB_EXTERN void *stb_copy(void *p, size_t sz);
STB_EXTERN void stb_pointer_array_free(void *p, int len);
STB_EXTERN void **stb_array_block_alloc(int count, int blocksize);
#define stb_arrcount(x) (sizeof(x)/sizeof((x)[0]))
STB_EXTERN int stb__record_fileline(const char *f, int n);
#ifdef STB_DEFINE
static char *stb__file;
static int stb__line;
int stb__record_fileline(const char *f, int n)
{
stb__file = (char*) f;
stb__line = n;
return 0;
}
void stb_fatal(const char *s, ...)
{
va_list a;
if (stb__file)
fprintf(stderr, "[%s:%d] ", stb__file, stb__line);
va_start(a,s);
fputs("Fatal error: ", stderr);
vfprintf(stderr, s, a);
va_end(a);
fputs("\n", stderr);
#ifdef STB_DEBUG
#ifdef _MSC_VER
#ifndef STB_PTR64
__asm int 3; // trap to debugger!
#else
__debugbreak();
#endif
#else
__builtin_trap();
#endif
#endif
exit(1);
}
static int stb__log_active=1, stb__log_fileline=1;
void stb_log(int active)
{
stb__log_active = active;
}
void stb_log_fileline(int active)
{
stb__log_fileline = active;
}
#ifdef STB_NO_STB_STRINGS
const char *stb__log_filename = "temp.log";
#else
const char *stb__log_filename = "stb.log";
#endif
void stb_log_name(char *s)
{
stb__log_filename = s;
}
void stb_(char *s, ...)
{
if (stb__log_active) {
FILE *f = stb_p_fopen(stb__log_filename, "a");
if (f) {
va_list a;
if (stb__log_fileline && stb__file)
fprintf(f, "[%s:%4d] ", stb__file, stb__line);
va_start(a,s);
vfprintf(f, s, a);
va_end(a);
fputs("\n", f);
fclose(f);
}
}
}
void stb_append_to_file(char *filename, char *s, ...)
{
FILE *f = stb_p_fopen(filename, "a");
if (f) {
va_list a;
va_start(a,s);
vfprintf(f, s, a);
va_end(a);
fputs("\n", f);
fclose(f);
}
}
typedef struct { char d[4]; } stb__4;
typedef struct { char d[8]; } stb__8;
// optimize the small cases, though you shouldn't be calling this for those!
void stb_swap(void *p, void *q, size_t sz)
{
char buffer[256];
if (p == q) return;
if (sz == 4) {
stb__4 temp = * ( stb__4 *) p;
* (stb__4 *) p = * ( stb__4 *) q;
* (stb__4 *) q = temp;
return;
} else if (sz == 8) {
stb__8 temp = * ( stb__8 *) p;
* (stb__8 *) p = * ( stb__8 *) q;
* (stb__8 *) q = temp;
return;
}
while (sz > sizeof(buffer)) {
stb_swap(p, q, sizeof(buffer));
p = (char *) p + sizeof(buffer);
q = (char *) q + sizeof(buffer);
sz -= sizeof(buffer);
}
memcpy(buffer, p , sz);
memcpy(p , q , sz);
memcpy(q , buffer, sz);
}
void *stb_copy(void *p, size_t sz)
{
void *q = malloc(sz);
memcpy(q, p, sz);
return q;
}
void stb_pointer_array_free(void *q, int len)
{
void **p = (void **) q;
int i;
for (i=0; i < len; ++i)
free(p[i]);
}
void **stb_array_block_alloc(int count, int blocksize)
{
int i;
char *p = (char *) malloc(sizeof(void *) * count + count * blocksize);
void **q;
if (p == NULL) return NULL;
q = (void **) p;
p += sizeof(void *) * count;
for (i=0; i < count; ++i)
q[i] = p + i * blocksize;
return q;
}
#endif
#ifdef STB_DEBUG
// tricky hack to allow recording FILE,LINE even in varargs functions
#define STB__RECORD_FILE(x) (stb__record_fileline(__FILE__, __LINE__),(x))
#define stb_log STB__RECORD_FILE(stb_log)
#define stb_ STB__RECORD_FILE(stb_)
#ifndef STB_FATAL_CLEAN
#define stb_fatal STB__RECORD_FILE(stb_fatal)
#endif
#define STB__DEBUG(x) x
#else
#define STB__DEBUG(x)
#endif
//////////////////////////////////////////////////////////////////////////////
//
// stb_temp
//
#define stb_temp(block, sz) stb__temp(block, sizeof(block), (sz))
STB_EXTERN void * stb__temp(void *b, int b_sz, int want_sz);
STB_EXTERN void stb_tempfree(void *block, void *ptr);
#ifdef STB_DEFINE
void * stb__temp(void *b, int b_sz, int want_sz)
{
if (b_sz >= want_sz)
return b;
else
return malloc(want_sz);
}
void stb_tempfree(void *b, void *p)
{
if (p != b)
free(p);
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// math/sampling operations
//
#define stb_lerp(t,a,b) ( (a) + (t) * (float) ((b)-(a)) )
#define stb_unlerp(t,a,b) ( ((t) - (a)) / (float) ((b) - (a)) )
#define stb_clamp(x,xmin,xmax) ((x) < (xmin) ? (xmin) : (x) > (xmax) ? (xmax) : (x))
STB_EXTERN void stb_newell_normal(float *normal, int num_vert, float **vert, int normalize);
STB_EXTERN int stb_box_face_vertex_axis_side(int face_number, int vertex_number, int axis);
STB_EXTERN void stb_linear_controller(float *curpos, float target_pos, float acc, float deacc, float dt);
STB_EXTERN int stb_float_eq(float x, float y, float delta, int max_ulps);
STB_EXTERN int stb_is_prime(unsigned int m);
STB_EXTERN unsigned int stb_power_of_two_nearest_prime(int n);
STB_EXTERN float stb_smoothstep(float t);
STB_EXTERN float stb_cubic_bezier_1d(float t, float p0, float p1, float p2, float p3);
STB_EXTERN double stb_linear_remap(double x, double a, double b,
double c, double d);
#ifdef STB_DEFINE
float stb_smoothstep(float t)
{
return (3 - 2*t)*(t*t);
}
float stb_cubic_bezier_1d(float t, float p0, float p1, float p2, float p3)
{
float it = 1-t;
return it*it*it*p0 + 3*it*it*t*p1 + 3*it*t*t*p2 + t*t*t*p3;
}
void stb_newell_normal(float *normal, int num_vert, float **vert, int normalize)
{
int i,j;
float p;
normal[0] = normal[1] = normal[2] = 0;
for (i=num_vert-1,j=0; j < num_vert; i=j++) {
float *u = vert[i];
float *v = vert[j];
normal[0] += (u[1] - v[1]) * (u[2] + v[2]);
normal[1] += (u[2] - v[2]) * (u[0] + v[0]);
normal[2] += (u[0] - v[0]) * (u[1] + v[1]);
}
if (normalize) {
p = normal[0]*normal[0] + normal[1]*normal[1] + normal[2]*normal[2];
p = (float) (1.0 / sqrt(p));
normal[0] *= p;
normal[1] *= p;
normal[2] *= p;
}
}
int stb_box_face_vertex_axis_side(int face_number, int vertex_number, int axis)
{
static int box_vertices[6][4][3] =
{
{ { 1,1,1 }, { 1,0,1 }, { 1,0,0 }, { 1,1,0 } },
{ { 0,0,0 }, { 0,0,1 }, { 0,1,1 }, { 0,1,0 } },
{ { 0,0,0 }, { 0,1,0 }, { 1,1,0 }, { 1,0,0 } },
{ { 0,0,0 }, { 1,0,0 }, { 1,0,1 }, { 0,0,1 } },
{ { 1,1,1 }, { 0,1,1 }, { 0,0,1 }, { 1,0,1 } },
{ { 1,1,1 }, { 1,1,0 }, { 0,1,0 }, { 0,1,1 } },
};
assert(face_number >= 0 && face_number < 6);
assert(vertex_number >= 0 && vertex_number < 4);
assert(axis >= 0 && axis < 3);
return box_vertices[face_number][vertex_number][axis];
}
void stb_linear_controller(float *curpos, float target_pos, float acc, float deacc, float dt)
{
float sign = 1, p, cp = *curpos;
if (cp == target_pos) return;
if (target_pos < cp) {
target_pos = -target_pos;
cp = -cp;
sign = -1;
}
// first decelerate
if (cp < 0) {
p = cp + deacc * dt;
if (p > 0) {
p = 0;
dt = dt - cp / deacc;
if (dt < 0) dt = 0;
} else {
dt = 0;
}
cp = p;
}
// now accelerate
p = cp + acc*dt;
if (p > target_pos) p = target_pos;
*curpos = p * sign;
// @TODO: testing
}
float stb_quadratic_controller(float target_pos, float curpos, float maxvel, float maxacc, float dt, float *curvel)
{
return 0; // @TODO
}
int stb_float_eq(float x, float y, float delta, int max_ulps)
{
if (fabs(x-y) <= delta) return 1;
if (abs(*(int *)&x - *(int *)&y) <= max_ulps) return 1;
return 0;
}
int stb_is_prime(unsigned int m)
{
unsigned int i,j;
if (m < 2) return 0;
if (m == 2) return 1;
if (!(m & 1)) return 0;
if (m % 3 == 0) return (m == 3);
for (i=5; (j=i*i), j <= m && j > i; i += 6) {
if (m % i == 0) return 0;
if (m % (i+2) == 0) return 0;
}
return 1;
}
unsigned int stb_power_of_two_nearest_prime(int n)
{
static signed char tab[32] = { 0,0,0,0,1,0,-1,0,1,-1,-1,3,-1,0,-1,2,1,
0,2,0,-1,-4,-1,5,-1,18,-2,15,2,-1,2,0 };
if (!tab[0]) {
int i;
for (i=0; i < 32; ++i)
tab[i] = (1 << i) + 2*tab[i] - 1;
tab[1] = 2;
tab[0] = 1;
}
if (n >= 32) return 0xfffffffb;
return tab[n];
}
double stb_linear_remap(double x, double x_min, double x_max,
double out_min, double out_max)
{
return stb_lerp(stb_unlerp(x,x_min,x_max),out_min,out_max);
}
#endif
// create a macro so it's faster, but you can get at the function pointer
#define stb_linear_remap(t,a,b,c,d) stb_lerp(stb_unlerp(t,a,b),c,d)
//////////////////////////////////////////////////////////////////////////////
//
// bit operations
//
#define stb_big32(c) (((c)[0]<<24) + (c)[1]*65536 + (c)[2]*256 + (c)[3])
#define stb_little32(c) (((c)[3]<<24) + (c)[2]*65536 + (c)[1]*256 + (c)[0])
#define stb_big16(c) ((c)[0]*256 + (c)[1])
#define stb_little16(c) ((c)[1]*256 + (c)[0])
STB_EXTERN int stb_bitcount(unsigned int a);
STB_EXTERN unsigned int stb_bitreverse8(unsigned char n);
STB_EXTERN unsigned int stb_bitreverse(unsigned int n);
STB_EXTERN int stb_is_pow2(size_t);
STB_EXTERN int stb_log2_ceil(size_t);
STB_EXTERN int stb_log2_floor(size_t);
STB_EXTERN int stb_lowbit8(unsigned int n);
STB_EXTERN int stb_highbit8(unsigned int n);
#ifdef STB_DEFINE
int stb_bitcount(unsigned int a)
{
a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2
a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4
a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits
a = (a + (a >> 8)); // max 16 per 8 bits
a = (a + (a >> 16)); // max 32 per 8 bits
return a & 0xff;
}
unsigned int stb_bitreverse8(unsigned char n)
{
n = ((n & 0xAA) >> 1) + ((n & 0x55) << 1);
n = ((n & 0xCC) >> 2) + ((n & 0x33) << 2);
return (unsigned char) ((n >> 4) + (n << 4));
}
unsigned int stb_bitreverse(unsigned int n)
{
n = ((n & 0xAAAAAAAA) >> 1) | ((n & 0x55555555) << 1);
n = ((n & 0xCCCCCCCC) >> 2) | ((n & 0x33333333) << 2);
n = ((n & 0xF0F0F0F0) >> 4) | ((n & 0x0F0F0F0F) << 4);
n = ((n & 0xFF00FF00) >> 8) | ((n & 0x00FF00FF) << 8);
return (n >> 16) | (n << 16);
}
int stb_is_pow2(size_t n)
{
return (n & (n-1)) == 0;
}
// tricky use of 4-bit table to identify 5 bit positions (note the '-1')
// 3-bit table would require another tree level; 5-bit table wouldn't save one
#if defined(_WIN32) && !defined(__MINGW32__)
#pragma warning(push)
#pragma warning(disable: 4035) // disable warning about no return value
int stb_log2_floor(size_t n)
{
#if _MSC_VER > 1700
unsigned long i;
#ifdef STB_PTR64
_BitScanReverse64(&i, n);
#else
_BitScanReverse(&i, n);
#endif
return i != 0 ? i : -1;
#else
__asm {
bsr eax,n
jnz done
mov eax,-1
}
done:;
#endif
}
#pragma warning(pop)
#else
int stb_log2_floor(size_t n)
{
static signed char log2_4[16] = { -1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3 };
#ifdef STB_PTR64
if (n >= ((size_t) 1u << 32))
return stb_log2_floor(n >> 32);
#endif
// 2 compares if n < 16, 3 compares otherwise
if (n < (1U << 14))
if (n < (1U << 4)) return 0 + log2_4[n ];
else if (n < (1U << 9)) return 5 + log2_4[n >> 5];
else return 10 + log2_4[n >> 10];
else if (n < (1U << 24))
if (n < (1U << 19)) return 15 + log2_4[n >> 15];
else return 20 + log2_4[n >> 20];
else if (n < (1U << 29)) return 25 + log2_4[n >> 25];
else return 30 + log2_4[n >> 30];
}
#endif
// define ceil from floor
int stb_log2_ceil(size_t n)
{
if (stb_is_pow2(n)) return stb_log2_floor(n);
else return 1 + stb_log2_floor(n);
}
int stb_highbit8(unsigned int n)
{
return stb_log2_ceil(n&255);
}
int stb_lowbit8(unsigned int n)
{
static signed char lowbit4[16] = { -1,0,1,0, 2,0,1,0, 3,0,1,0, 2,0,1,0 };
int k = lowbit4[n & 15];
if (k >= 0) return k;
k = lowbit4[(n >> 4) & 15];
if (k >= 0) return k+4;
return k;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// qsort Compare Routines
//
#ifdef _WIN32
#define stb_stricmp(a,b) stb_p_stricmp(a,b)
#define stb_strnicmp(a,b,n) stb_p_strnicmp(a,b,n)
#else
#define stb_stricmp(a,b) strcasecmp(a,b)
#define stb_strnicmp(a,b,n) strncasecmp(a,b,n)
#endif
STB_EXTERN int (*stb_intcmp(int offset))(const void *a, const void *b);
STB_EXTERN int (*stb_intcmprev(int offset))(const void *a, const void *b);
STB_EXTERN int (*stb_qsort_strcmp(int offset))(const void *a, const void *b);
STB_EXTERN int (*stb_qsort_stricmp(int offset))(const void *a, const void *b);
STB_EXTERN int (*stb_floatcmp(int offset))(const void *a, const void *b);
STB_EXTERN int (*stb_doublecmp(int offset))(const void *a, const void *b);
STB_EXTERN int (*stb_charcmp(int offset))(const void *a, const void *b);
#ifdef STB_DEFINE
static int stb__intcmpoffset, stb__ucharcmpoffset, stb__strcmpoffset;
static int stb__floatcmpoffset, stb__doublecmpoffset;
static int stb__memcmpoffset, stb__memcmpsize;
int stb__intcmp(const void *a, const void *b)
{
const int p = *(const int *) ((const char *) a + stb__intcmpoffset);
const int q = *(const int *) ((const char *) b + stb__intcmpoffset);
return p < q ? -1 : p > q;
}
int stb__intcmprev(const void *a, const void *b)
{
const int p = *(const int *) ((const char *) a + stb__intcmpoffset);
const int q = *(const int *) ((const char *) b + stb__intcmpoffset);
return q < p ? -1 : q > p;
}
int stb__ucharcmp(const void *a, const void *b)
{
const int p = *(const unsigned char *) ((const char *) a + stb__ucharcmpoffset);
const int q = *(const unsigned char *) ((const char *) b + stb__ucharcmpoffset);
return p < q ? -1 : p > q;
}
int stb__floatcmp(const void *a, const void *b)
{
const float p = *(const float *) ((const char *) a + stb__floatcmpoffset);
const float q = *(const float *) ((const char *) b + stb__floatcmpoffset);
return p < q ? -1 : p > q;
}
int stb__doublecmp(const void *a, const void *b)
{
const double p = *(const double *) ((const char *) a + stb__doublecmpoffset);
const double q = *(const double *) ((const char *) b + stb__doublecmpoffset);
return p < q ? -1 : p > q;
}
int stb__qsort_strcmp(const void *a, const void *b)
{
const char *p = *(const char **) ((const char *) a + stb__strcmpoffset);
const char *q = *(const char **) ((const char *) b + stb__strcmpoffset);
return strcmp(p,q);
}
int stb__qsort_stricmp(const void *a, const void *b)
{
const char *p = *(const char **) ((const char *) a + stb__strcmpoffset);
const char *q = *(const char **) ((const char *) b + stb__strcmpoffset);
return stb_stricmp(p,q);
}
int stb__memcmp(const void *a, const void *b)
{
return memcmp((char *) a + stb__memcmpoffset, (char *) b + stb__memcmpoffset, stb__memcmpsize);
}
int (*stb_intcmp(int offset))(const void *, const void *)
{
stb__intcmpoffset = offset;
return &stb__intcmp;
}
int (*stb_intcmprev(int offset))(const void *, const void *)
{
stb__intcmpoffset = offset;
return &stb__intcmprev;
}
int (*stb_ucharcmp(int offset))(const void *, const void *)
{
stb__ucharcmpoffset = offset;
return &stb__ucharcmp;
}
int (*stb_qsort_strcmp(int offset))(const void *, const void *)
{
stb__strcmpoffset = offset;
return &stb__qsort_strcmp;
}
int (*stb_qsort_stricmp(int offset))(const void *, const void *)
{
stb__strcmpoffset = offset;
return &stb__qsort_stricmp;
}
int (*stb_floatcmp(int offset))(const void *, const void *)
{
stb__floatcmpoffset = offset;
return &stb__floatcmp;
}
int (*stb_doublecmp(int offset))(const void *, const void *)
{
stb__doublecmpoffset = offset;
return &stb__doublecmp;
}
int (*stb_memcmp(int offset, int size))(const void *, const void *)
{
stb__memcmpoffset = offset;
stb__memcmpsize = size;
return &stb__memcmp;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Binary Search Toolkit
//
typedef struct
{
int minval, maxval, guess;
int mode, step;
} stb_search;
STB_EXTERN int stb_search_binary(stb_search *s, int minv, int maxv, int find_smallest);
STB_EXTERN int stb_search_open(stb_search *s, int minv, int find_smallest);
STB_EXTERN int stb_probe(stb_search *s, int compare, int *result); // return 0 when done
#ifdef STB_DEFINE
enum
{
STB_probe_binary_smallest,
STB_probe_binary_largest,
STB_probe_open_smallest,
STB_probe_open_largest,
};
static int stb_probe_guess(stb_search *s, int *result)
{
switch(s->mode) {
case STB_probe_binary_largest:
if (s->minval == s->maxval) {
*result = s->minval;
return 0;
}
assert(s->minval < s->maxval);
// if a < b, then a < p <= b
s->guess = s->minval + (((unsigned) s->maxval - s->minval + 1) >> 1);
break;
case STB_probe_binary_smallest:
if (s->minval == s->maxval) {
*result = s->minval;
return 0;
}
assert(s->minval < s->maxval);
// if a < b, then a <= p < b
s->guess = s->minval + (((unsigned) s->maxval - s->minval) >> 1);
break;
case STB_probe_open_smallest:
case STB_probe_open_largest:
s->guess = s->maxval; // guess the current maxval
break;
}
*result = s->guess;
return 1;
}
int stb_probe(stb_search *s, int compare, int *result)
{
switch(s->mode) {
case STB_probe_open_smallest:
case STB_probe_open_largest: {
if (compare <= 0) {
// then it lies within minval & maxval
if (s->mode == STB_probe_open_smallest)
s->mode = STB_probe_binary_smallest;
else
s->mode = STB_probe_binary_largest;
} else {
// otherwise, we need to probe larger
s->minval = s->maxval + 1;
s->maxval = s->minval + s->step;
s->step += s->step;
}
break;
}
case STB_probe_binary_smallest: {
// if compare < 0, then s->minval <= a < p
// if compare = 0, then s->minval <= a <= p
// if compare > 0, then p < a <= s->maxval
if (compare <= 0)
s->maxval = s->guess;
else
s->minval = s->guess+1;
break;
}
case STB_probe_binary_largest: {
// if compare < 0, then s->minval <= a < p
// if compare = 0, then p <= a <= s->maxval
// if compare > 0, then p < a <= s->maxval
if (compare < 0)
s->maxval = s->guess-1;
else
s->minval = s->guess;
break;
}
}
return stb_probe_guess(s, result);
}
int stb_search_binary(stb_search *s, int minv, int maxv, int find_smallest)
{
int r;
if (maxv < minv) return minv-1;
s->minval = minv;
s->maxval = maxv;
s->mode = find_smallest ? STB_probe_binary_smallest : STB_probe_binary_largest;
stb_probe_guess(s, &r);
return r;
}
int stb_search_open(stb_search *s, int minv, int find_smallest)
{
int r;
s->step = 4;
s->minval = minv;
s->maxval = minv+s->step;
s->mode = find_smallest ? STB_probe_open_smallest : STB_probe_open_largest;
stb_probe_guess(s, &r);
return r;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// String Processing
//
#define stb_prefixi(s,t) (0==stb_strnicmp((s),(t),strlen(t)))
enum stb_splitpath_flag
{
STB_PATH = 1,
STB_FILE = 2,
STB_EXT = 4,
STB_PATH_FILE = STB_PATH + STB_FILE,
STB_FILE_EXT = STB_FILE + STB_EXT,
STB_EXT_NO_PERIOD = 8,
};
STB_EXTERN char * stb_skipwhite(char *s);
STB_EXTERN char * stb_trimwhite(char *s);
STB_EXTERN char * stb_skipnewline(char *s);
STB_EXTERN char * stb_strncpy(char *s, char *t, int n);
STB_EXTERN char * stb_substr(char *t, int n);
STB_EXTERN char * stb_duplower(char *s);
STB_EXTERN void stb_tolower (char *s);
STB_EXTERN char * stb_strchr2 (char *s, char p1, char p2);
STB_EXTERN char * stb_strrchr2(char *s, char p1, char p2);
STB_EXTERN char * stb_strtok(char *output, char *src, char *delimit);
STB_EXTERN char * stb_strtok_keep(char *output, char *src, char *delimit);
STB_EXTERN char * stb_strtok_invert(char *output, char *src, char *allowed);
STB_EXTERN char * stb_dupreplace(char *s, char *find, char *replace);
STB_EXTERN void stb_replaceinplace(char *s, char *find, char *replace);
STB_EXTERN char * stb_splitpath(char *output, char *src, int flag);
STB_EXTERN char * stb_splitpathdup(char *src, int flag);
STB_EXTERN char * stb_replacedir(char *output, char *src, char *dir);
STB_EXTERN char * stb_replaceext(char *output, char *src, char *ext);
STB_EXTERN void stb_fixpath(char *path);
STB_EXTERN char * stb_shorten_path_readable(char *path, int max_len);
STB_EXTERN int stb_suffix (char *s, char *t);
STB_EXTERN int stb_suffixi(char *s, char *t);
STB_EXTERN int stb_prefix (char *s, char *t);
STB_EXTERN char * stb_strichr(char *s, char t);
STB_EXTERN char * stb_stristr(char *s, char *t);
STB_EXTERN int stb_prefix_count(char *s, char *t);
STB_EXTERN const char * stb_plural(int n); // "s" or ""
STB_EXTERN size_t stb_strscpy(char *d, const char *s, size_t n);
STB_EXTERN char **stb_tokens(char *src, char *delimit, int *count);
STB_EXTERN char **stb_tokens_nested(char *src, char *delimit, int *count, char *nest_in, char *nest_out);
STB_EXTERN char **stb_tokens_nested_empty(char *src, char *delimit, int *count, char *nest_in, char *nest_out);
STB_EXTERN char **stb_tokens_allowempty(char *src, char *delimit, int *count);
STB_EXTERN char **stb_tokens_stripwhite(char *src, char *delimit, int *count);
STB_EXTERN char **stb_tokens_withdelim(char *src, char *delimit, int *count);
STB_EXTERN char **stb_tokens_quoted(char *src, char *delimit, int *count);
// with 'quoted', allow delimiters to appear inside quotation marks, and don't
// strip whitespace inside them (and we delete the quotation marks unless they
// appear back to back, in which case they're considered escaped)
#ifdef STB_DEFINE
size_t stb_strscpy(char *d, const char *s, size_t n)
{
size_t len = strlen(s);
if (len >= n) {
if (n) d[0] = 0;
return 0;
}
stb_p_strcpy_s(d,n+1,s);
return len + 1;
}
const char *stb_plural(int n)
{
return n == 1 ? "" : "s";
}
int stb_prefix(char *s, char *t)
{
while (*t)
if (*s++ != *t++)
return STB_FALSE;
return STB_TRUE;
}
int stb_prefix_count(char *s, char *t)
{
int c=0;
while (*t) {
if (*s++ != *t++)
break;
++c;
}
return c;
}
int stb_suffix(char *s, char *t)
{
size_t n = strlen(s);
size_t m = strlen(t);
if (m <= n)
return 0 == strcmp(s+n-m, t);
else
return 0;
}
int stb_suffixi(char *s, char *t)
{
size_t n = strlen(s);
size_t m = strlen(t);
if (m <= n)
return 0 == stb_stricmp(s+n-m, t);
else
return 0;
}
// originally I was using this table so that I could create known sentinel
// values--e.g. change whitetable[0] to be true if I was scanning for whitespace,
// and false if I was scanning for nonwhite. I don't appear to be using that
// functionality anymore (I do for tokentable, though), so just replace it
// with isspace()
char *stb_skipwhite(char *s)
{
while (isspace((unsigned char) *s)) ++s;
return s;
}
char *stb_skipnewline(char *s)
{
if (s[0] == '\r' || s[0] == '\n') {
if (s[0]+s[1] == '\r' + '\n') ++s;
++s;
}
return s;
}
char *stb_trimwhite(char *s)
{
int i,n;
s = stb_skipwhite(s);
n = (int) strlen(s);
for (i=n-1; i >= 0; --i)
if (!isspace(s[i]))
break;
s[i+1] = 0;
return s;
}
char *stb_strncpy(char *s, char *t, int n)
{
stb_p_strncpy_s(s,n+1,t,n);
s[n] = 0;
return s;
}
char *stb_substr(char *t, int n)
{
char *a;
int z = (int) strlen(t);
if (z < n) n = z;
a = (char *) malloc(n+1);
stb_p_strncpy_s(a,n+1,t,n);
a[n] = 0;
return a;
}
char *stb_duplower(char *s)
{
char *p = stb_p_strdup(s), *q = p;
while (*q) {
*q = tolower(*q);
++q;
}
return p;
}
void stb_tolower(char *s)
{
while (*s) {
*s = tolower(*s);
++s;
}
}
char *stb_strchr2(char *s, char x, char y)
{
for(; *s; ++s)
if (*s == x || *s == y)
return s;
return NULL;
}
char *stb_strrchr2(char *s, char x, char y)
{
char *r = NULL;
for(; *s; ++s)
if (*s == x || *s == y)
r = s;
return r;
}
char *stb_strichr(char *s, char t)
{
if (tolower(t) == toupper(t))
return strchr(s,t);
return stb_strchr2(s, (char) tolower(t), (char) toupper(t));
}
char *stb_stristr(char *s, char *t)
{
size_t n = strlen(t);
char *z;
if (n==0) return s;
while ((z = stb_strichr(s, *t)) != NULL) {
if (0==stb_strnicmp(z, t, n))
return z;
s = z+1;
}
return NULL;
}
static char *stb_strtok_raw(char *output, char *src, char *delimit, int keep, int invert)
{
if (invert) {
while (*src && strchr(delimit, *src) != NULL) {
*output++ = *src++;
}
} else {
while (*src && strchr(delimit, *src) == NULL) {
*output++ = *src++;
}
}
*output = 0;
if (keep)
return src;
else
return *src ? src+1 : src;
}
char *stb_strtok(char *output, char *src, char *delimit)
{
return stb_strtok_raw(output, src, delimit, 0, 0);
}
char *stb_strtok_keep(char *output, char *src, char *delimit)
{
return stb_strtok_raw(output, src, delimit, 1, 0);
}
char *stb_strtok_invert(char *output, char *src, char *delimit)
{
return stb_strtok_raw(output, src, delimit, 1,1);
}
static char **stb_tokens_raw(char *src_, char *delimit, int *count,
int stripwhite, int allow_empty, char *start, char *end)
{
int nested = 0;
unsigned char *src = (unsigned char *) src_;
static char stb_tokentable[256]; // rely on static initializion to 0
static char stable[256],etable[256];
char *out;
char **result;
int num=0;
unsigned char *s;
s = (unsigned char *) delimit; while (*s) stb_tokentable[*s++] = 1;
if (start) {
s = (unsigned char *) start; while (*s) stable[*s++] = 1;
s = (unsigned char *) end; if (s) while (*s) stable[*s++] = 1;
s = (unsigned char *) end; if (s) while (*s) etable[*s++] = 1;
}
stable[0] = 1;
// two passes through: the first time, counting how many
s = (unsigned char *) src;
while (*s) {
// state: just found delimiter
// skip further delimiters
if (!allow_empty) {
stb_tokentable[0] = 0;
while (stb_tokentable[*s])
++s;
if (!*s) break;
}
++num;
// skip further non-delimiters
stb_tokentable[0] = 1;
if (stripwhite == 2) { // quoted strings
while (!stb_tokentable[*s]) {
if (*s != '"')
++s;
else {
++s;
if (*s == '"')
++s; // "" -> ", not start a string
else {
// begin a string
while (*s) {
if (s[0] == '"') {
if (s[1] == '"') s += 2; // "" -> "
else { ++s; break; } // terminating "
} else
++s;
}
}
}
}
} else
while (nested || !stb_tokentable[*s]) {
if (stable[*s]) {
if (!*s) break;
if (end ? etable[*s] : nested)
--nested;
else
++nested;
}
++s;
}
if (allow_empty) {
if (*s) ++s;
}
}
// now num has the actual count... malloc our output structure
// need space for all the strings: strings won't be any longer than
// original input, since for every '\0' there's at least one delimiter
result = (char **) malloc(sizeof(*result) * (num+1) + (s-src+1));
if (result == NULL) return result;
out = (char *) (result + (num+1));
// second pass: copy out the data
s = (unsigned char *) src;
num = 0;
nested = 0;
while (*s) {
char *last_nonwhite;
// state: just found delimiter
// skip further delimiters
if (!allow_empty) {
stb_tokentable[0] = 0;
if (stripwhite)
while (stb_tokentable[*s] || isspace(*s))
++s;
else
while (stb_tokentable[*s])
++s;
} else if (stripwhite) {
while (isspace(*s)) ++s;
}
if (!*s) break;
// we're past any leading delimiters and whitespace
result[num] = out;
++num;
// copy non-delimiters
stb_tokentable[0] = 1;
last_nonwhite = out-1;
if (stripwhite == 2) {
while (!stb_tokentable[*s]) {
if (*s != '"') {
if (!isspace(*s)) last_nonwhite = out;
*out++ = *s++;
} else {
++s;
if (*s == '"') {
if (!isspace(*s)) last_nonwhite = out;
*out++ = *s++; // "" -> ", not start string
} else {
// begin a quoted string
while (*s) {
if (s[0] == '"') {
if (s[1] == '"') { *out++ = *s; s += 2; }
else { ++s; break; } // terminating "
} else
*out++ = *s++;
}
last_nonwhite = out-1; // all in quotes counts as non-white
}
}
}
} else {
while (nested || !stb_tokentable[*s]) {
if (!isspace(*s)) last_nonwhite = out;
if (stable[*s]) {
if (!*s) break;
if (end ? etable[*s] : nested)
--nested;
else
++nested;
}
*out++ = *s++;
}
}
if (stripwhite) // rewind to last non-whitespace char
out = last_nonwhite+1;
*out++ = '\0';
if (*s) ++s; // skip delimiter
}
s = (unsigned char *) delimit; while (*s) stb_tokentable[*s++] = 0;
if (start) {
s = (unsigned char *) start; while (*s) stable[*s++] = 1;
s = (unsigned char *) end; if (s) while (*s) stable[*s++] = 1;
s = (unsigned char *) end; if (s) while (*s) etable[*s++] = 1;
}
if (count != NULL) *count = num;
result[num] = 0;
return result;
}
char **stb_tokens(char *src, char *delimit, int *count)
{
return stb_tokens_raw(src,delimit,count,0,0,0,0);
}
char **stb_tokens_nested(char *src, char *delimit, int *count, char *nest_in, char *nest_out)
{
return stb_tokens_raw(src,delimit,count,0,0,nest_in,nest_out);
}
char **stb_tokens_nested_empty(char *src, char *delimit, int *count, char *nest_in, char *nest_out)
{
return stb_tokens_raw(src,delimit,count,0,1,nest_in,nest_out);
}
char **stb_tokens_allowempty(char *src, char *delimit, int *count)
{
return stb_tokens_raw(src,delimit,count,0,1,0,0);
}
char **stb_tokens_stripwhite(char *src, char *delimit, int *count)
{
return stb_tokens_raw(src,delimit,count,1,1,0,0);
}
char **stb_tokens_quoted(char *src, char *delimit, int *count)
{
return stb_tokens_raw(src,delimit,count,2,1,0,0);
}
char *stb_dupreplace(char *src, char *find, char *replace)
{
size_t len_find = strlen(find);
size_t len_replace = strlen(replace);
int count = 0;
char *s,*p,*q;
s = strstr(src, find);
if (s == NULL) return stb_p_strdup(src);
do {
++count;
s = strstr(s + len_find, find);
} while (s != NULL);
p = (char *) malloc(strlen(src) + count * (len_replace - len_find) + 1);
if (p == NULL) return p;
q = p;
s = src;
for (;;) {
char *t = strstr(s, find);
if (t == NULL) {
stb_p_strcpy_s(q,strlen(src)+count*(len_replace-len_find)+1,s);
assert(strlen(p) == strlen(src) + count*(len_replace-len_find));
return p;
}
memcpy(q, s, t-s);
q += t-s;
memcpy(q, replace, len_replace);
q += len_replace;
s = t + len_find;
}
}
void stb_replaceinplace(char *src, char *find, char *replace)
{
size_t len_find = strlen(find);
size_t len_replace = strlen(replace);
int delta;
char *s,*p,*q;
delta = (int) (len_replace - len_find);
assert(delta <= 0);
if (delta > 0) return;
p = strstr(src, find);
if (p == NULL) return;
s = q = p;
while (*s) {
memcpy(q, replace, len_replace);
p += len_find;
q += len_replace;
s = strstr(p, find);
if (s == NULL) s = p + strlen(p);
memmove(q, p, s-p);
q += s-p;
p = s;
}
*q = 0;
}
void stb_fixpath(char *path)
{
for(; *path; ++path)
if (*path == '\\')
*path = '/';
}
void stb__add_section(char *buffer, char *data, ptrdiff_t curlen, ptrdiff_t newlen)
{
if (newlen < curlen) {
ptrdiff_t z1 = newlen >> 1, z2 = newlen-z1;
memcpy(buffer, data, z1-1);
buffer[z1-1] = '.';
buffer[z1-0] = '.';
memcpy(buffer+z1+1, data+curlen-z2+1, z2-1);
} else
memcpy(buffer, data, curlen);
}
char * stb_shorten_path_readable(char *path, int len)
{
static char buffer[1024];
ptrdiff_t n = strlen(path),n1,n2,r1,r2;
char *s;
if (n <= len) return path;
if (len > 1024) return path;
s = stb_strrchr2(path, '/', '\\');
if (s) {
n1 = s - path + 1;
n2 = n - n1;
++s;
} else {
n1 = 0;
n2 = n;
s = path;
}
// now we need to reduce r1 and r2 so that they fit in len
if (n1 < len>>1) {
r1 = n1;
r2 = len - r1;
} else if (n2 < len >> 1) {
r2 = n2;
r1 = len - r2;
} else {
r1 = n1 * len / n;
r2 = n2 * len / n;
if (r1 < len>>2) r1 = len>>2, r2 = len-r1;
if (r2 < len>>2) r2 = len>>2, r1 = len-r2;
}
assert(r1 <= n1 && r2 <= n2);
if (n1)
stb__add_section(buffer, path, n1, r1);
stb__add_section(buffer+r1, s, n2, r2);
buffer[len] = 0;
return buffer;
}
static char *stb__splitpath_raw(char *buffer, char *path, int flag)
{
ptrdiff_t len=0,x,y, n = (int) strlen(path), f1,f2;
char *s = stb_strrchr2(path, '/', '\\');
char *t = strrchr(path, '.');
if (s && t && t < s) t = NULL;
if (s) ++s;
if (flag == STB_EXT_NO_PERIOD)
flag |= STB_EXT;
if (!(flag & (STB_PATH | STB_FILE | STB_EXT))) return NULL;
f1 = s == NULL ? 0 : s-path; // start of filename
f2 = t == NULL ? n : t-path; // just past end of filename
if (flag & STB_PATH) {
x = 0; if (f1 == 0 && flag == STB_PATH) len=2;
} else if (flag & STB_FILE) {
x = f1;
} else {
x = f2;
if (flag & STB_EXT_NO_PERIOD)
if (path[x] == '.')
++x;
}
if (flag & STB_EXT)
y = n;
else if (flag & STB_FILE)
y = f2;
else
y = f1;
if (buffer == NULL) {
buffer = (char *) malloc(y-x + len + 1);
if (!buffer) return NULL;
}
if (len) { stb_p_strcpy_s(buffer, 3, "./"); return buffer; }
stb_strncpy(buffer, path+(int)x, (int)(y-x));
return buffer;
}
char *stb_splitpath(char *output, char *src, int flag)
{
return stb__splitpath_raw(output, src, flag);
}
char *stb_splitpathdup(char *src, int flag)
{
return stb__splitpath_raw(NULL, src, flag);
}
char *stb_replacedir(char *output, char *src, char *dir)
{
char buffer[4096];
stb_splitpath(buffer, src, STB_FILE | STB_EXT);
if (dir)
stb_p_sprintf(output stb_p_size(9999), "%s/%s", dir, buffer);
else
stb_p_strcpy_s(output, sizeof(buffer), buffer); // @UNSAFE
return output;
}
char *stb_replaceext(char *output, char *src, char *ext)
{
char buffer[4096];
stb_splitpath(buffer, src, STB_PATH | STB_FILE);
if (ext)
stb_p_sprintf(output stb_p_size(9999), "%s.%s", buffer, ext[0] == '.' ? ext+1 : ext);
else
stb_p_strcpy_s(output, sizeof(buffer), buffer); // @UNSAFE
return output;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// stb_alloc - hierarchical allocator
//
// inspired by http://swapped.cc/halloc
//
//
// When you alloc a given block through stb_alloc, you have these choices:
//
// 1. does it have a parent?
// 2. can it have children?
// 3. can it be freed directly?
// 4. is it transferrable?
// 5. what is its alignment?
//
// Here are interesting combinations of those:
//
// children free transfer alignment
// arena Y Y N n/a
// no-overhead, chunked N N N normal
// string pool alloc N N N 1
// parent-ptr, chunked Y N N normal
// low-overhead, unchunked N Y Y normal
// general purpose alloc Y Y Y normal
//
// Unchunked allocations will probably return 16-aligned pointers. If
// we 16-align the results, we have room for 4 pointers. For smaller
// allocations that allow finer alignment, we can reduce the pointers.
//
// The strategy is that given a pointer, assuming it has a header (only
// the no-overhead allocations have no header), we can determine the
// type of the header fields, and the number of them, by stepping backwards
// through memory and looking at the tags in the bottom bits.
//
// Implementation strategy:
// chunked allocations come from the middle of chunks, and can't
// be freed. thefore they do not need to be on a sibling chain.
// they may need child pointers if they have children.
//
// chunked, with-children
// void *parent;
//
// unchunked, no-children -- reduced storage
// void *next_sibling;
// void *prev_sibling_nextp;
//
// unchunked, general
// void *first_child;
// void *next_sibling;
// void *prev_sibling_nextp;
// void *chunks;
//
// so, if we code each of these fields with different bit patterns
// (actually same one for next/prev/child), then we can identify which
// each one is from the last field.
STB_EXTERN void stb_free(void *p);
STB_EXTERN void *stb_malloc_global(size_t size);
STB_EXTERN void *stb_malloc(void *context, size_t size);
STB_EXTERN void *stb_malloc_nofree(void *context, size_t size);
STB_EXTERN void *stb_malloc_leaf(void *context, size_t size);
STB_EXTERN void *stb_malloc_raw(void *context, size_t size);
STB_EXTERN void *stb_realloc(void *ptr, size_t newsize);
STB_EXTERN void stb_reassign(void *new_context, void *ptr);
STB_EXTERN void stb_malloc_validate(void *p, void *parent);
extern int stb_alloc_chunk_size ;
extern int stb_alloc_count_free ;
extern int stb_alloc_count_alloc;
extern int stb_alloc_alignment ;
#ifdef STB_DEFINE
int stb_alloc_chunk_size = 65536;
int stb_alloc_count_free = 0;
int stb_alloc_count_alloc = 0;
int stb_alloc_alignment = -16;
typedef struct stb__chunk
{
struct stb__chunk *next;
int data_left;
int alloc;
} stb__chunk;
typedef struct
{
void * next;
void ** prevn;
} stb__nochildren;
typedef struct
{
void ** prevn;
void * child;
void * next;
stb__chunk *chunks;
} stb__alloc;
typedef struct
{
stb__alloc *parent;
} stb__chunked;
#define STB__PARENT 1
#define STB__CHUNKS 2
typedef enum
{
STB__nochildren = 0,
STB__chunked = STB__PARENT,
STB__alloc = STB__CHUNKS,
STB__chunk_raw = 4,
} stb__alloc_type;
// these functions set the bottom bits of a pointer efficiently
#define STB__DECODE(x,v) ((void *) ((char *) (x) - (v)))
#define STB__ENCODE(x,v) ((void *) ((char *) (x) + (v)))
#define stb__parent(z) (stb__alloc *) STB__DECODE((z)->parent, STB__PARENT)
#define stb__chunks(z) (stb__chunk *) STB__DECODE((z)->chunks, STB__CHUNKS)
#define stb__setparent(z,p) (z)->parent = (stb__alloc *) STB__ENCODE((p), STB__PARENT)
#define stb__setchunks(z,c) (z)->chunks = (stb__chunk *) STB__ENCODE((c), STB__CHUNKS)
static stb__alloc stb__alloc_global =
{
NULL,
NULL,
NULL,
(stb__chunk *) STB__ENCODE(NULL, STB__CHUNKS)
};
static stb__alloc_type stb__identify(void *p)
{
void **q = (void **) p;
return (stb__alloc_type) ((stb_uinta) q[-1] & 3);
}
static void *** stb__prevn(void *p)
{
if (stb__identify(p) == STB__alloc) {
stb__alloc *s = (stb__alloc *) p - 1;
return &s->prevn;
} else {
stb__nochildren *s = (stb__nochildren *) p - 1;
return &s->prevn;
}
}
void stb_free(void *p)
{
if (p == NULL) return;
// count frees so that unit tests can see what's happening
++stb_alloc_count_free;
switch(stb__identify(p)) {
case STB__chunked:
// freeing a chunked-block with children does nothing;
// they only get freed when the parent does
// surely this is wrong, and it should free them immediately?
// otherwise how are they getting put on the right chain?
return;
case STB__nochildren: {
stb__nochildren *s = (stb__nochildren *) p - 1;
// unlink from sibling chain
*(s->prevn) = s->next;
if (s->next)
*stb__prevn(s->next) = s->prevn;
free(s);
return;
}
case STB__alloc: {
stb__alloc *s = (stb__alloc *) p - 1;
stb__chunk *c, *n;
void *q;
// unlink from sibling chain, if any
*(s->prevn) = s->next;
if (s->next)
*stb__prevn(s->next) = s->prevn;
// first free chunks
c = (stb__chunk *) stb__chunks(s);
while (c != NULL) {
n = c->next;
stb_alloc_count_free += c->alloc;
free(c);
c = n;
}
// validating
stb__setchunks(s,NULL);
s->prevn = NULL;
s->next = NULL;
// now free children
while ((q = s->child) != NULL) {
stb_free(q);
}
// now free self
free(s);
return;
}
default:
assert(0); /* NOTREACHED */
}
}
void stb_malloc_validate(void *p, void *parent)
{
if (p == NULL) return;
switch(stb__identify(p)) {
case STB__chunked:
return;
case STB__nochildren: {
stb__nochildren *n = (stb__nochildren *) p - 1;
if (n->prevn)
assert(*n->prevn == p);
if (n->next) {
assert(*stb__prevn(n->next) == &n->next);
stb_malloc_validate(n, parent);
}
return;
}
case STB__alloc: {
stb__alloc *s = (stb__alloc *) p - 1;
if (s->prevn)
assert(*s->prevn == p);
if (s->child) {
assert(*stb__prevn(s->child) == &s->child);
stb_malloc_validate(s->child, p);
}
if (s->next) {
assert(*stb__prevn(s->next) == &s->next);
stb_malloc_validate(s->next, parent);
}
return;
}
default:
assert(0); /* NOTREACHED */
}
}
static void * stb__try_chunk(stb__chunk *c, int size, int align, int pre_align)
{
char *memblock = (char *) (c+1), *q;
stb_inta iq;
int start_offset;
// we going to allocate at the end of the chunk, not the start. confusing,
// but it means we don't need both a 'limit' and a 'cur', just a 'cur'.
// the block ends at: p + c->data_left
// then we move back by size
start_offset = c->data_left - size;
// now we need to check the alignment of that
q = memblock + start_offset;
iq = (stb_inta) q;
assert(sizeof(q) == sizeof(iq));
// suppose align = 2
// then we need to retreat iq far enough that (iq & (2-1)) == 0
// to get (iq & (align-1)) = 0 requires subtracting (iq & (align-1))
start_offset -= iq & (align-1);
assert(((stb_uinta) (memblock+start_offset) & (align-1)) == 0);
// now, if that + pre_align works, go for it!
start_offset -= pre_align;
if (start_offset >= 0) {
c->data_left = start_offset;
return memblock + start_offset;
}
return NULL;
}
static void stb__sort_chunks(stb__alloc *src)
{
// of the first two chunks, put the chunk with more data left in it first
stb__chunk *c = stb__chunks(src), *d;
if (c == NULL) return;
d = c->next;
if (d == NULL) return;
if (c->data_left > d->data_left) return;
c->next = d->next;
d->next = c;
stb__setchunks(src, d);
}
static void * stb__alloc_chunk(stb__alloc *src, int size, int align, int pre_align)
{
void *p;
stb__chunk *c = stb__chunks(src);
if (c && size <= stb_alloc_chunk_size) {
p = stb__try_chunk(c, size, align, pre_align);
if (p) { ++c->alloc; return p; }
// try a second chunk to reduce wastage
if (c->next) {
p = stb__try_chunk(c->next, size, align, pre_align);
if (p) { ++c->alloc; return p; }
// put the bigger chunk first, since the second will get buried
// the upshot of this is that, until it gets allocated from, chunk #2
// is always the largest remaining chunk. (could formalize
// this with a heap!)
stb__sort_chunks(src);
c = stb__chunks(src);
}
}
// allocate a new chunk
{
stb__chunk *n;
int chunk_size = stb_alloc_chunk_size;
// we're going to allocate a new chunk to put this in
if (size > chunk_size)
chunk_size = size;
assert(sizeof(*n) + pre_align <= 16);
// loop trying to allocate a large enough chunk
// the loop is because the alignment may cause problems if it's big...
// and we don't know what our chunk alignment is going to be
while (1) {
n = (stb__chunk *) malloc(16 + chunk_size);
if (n == NULL) return NULL;
n->data_left = chunk_size - sizeof(*n);
p = stb__try_chunk(n, size, align, pre_align);
if (p != NULL) {
n->next = c;
stb__setchunks(src, n);
// if we just used up the whole block immediately,
// move the following chunk up
n->alloc = 1;
if (size == chunk_size)
stb__sort_chunks(src);
return p;
}
free(n);
chunk_size += 16+align;
}
}
}
static stb__alloc * stb__get_context(void *context)
{
if (context == NULL) {
return &stb__alloc_global;
} else {
int u = stb__identify(context);
// if context is chunked, grab parent
if (u == STB__chunked) {
stb__chunked *s = (stb__chunked *) context - 1;
return stb__parent(s);
} else {
return (stb__alloc *) context - 1;
}
}
}
static void stb__insert_alloc(stb__alloc *src, stb__alloc *s)
{
s->prevn = &src->child;
s->next = src->child;
src->child = s+1;
if (s->next)
*stb__prevn(s->next) = &s->next;
}
static void stb__insert_nochild(stb__alloc *src, stb__nochildren *s)
{
s->prevn = &src->child;
s->next = src->child;
src->child = s+1;
if (s->next)
*stb__prevn(s->next) = &s->next;
}
static void * malloc_base(void *context, size_t size, stb__alloc_type t, int align)
{
void *p;
stb__alloc *src = stb__get_context(context);
if (align <= 0) {
// compute worst-case C packed alignment
// e.g. a 24-byte struct is 8-aligned
int align_proposed = 1 << stb_lowbit8((unsigned int) size);
if (align_proposed < 0)
align_proposed = 4;
if (align_proposed == 0) {
if (size == 0)
align_proposed = 1;
else
align_proposed = 256;
}
// a negative alignment means 'don't align any larger
// than this'; so -16 means we align 1,2,4,8, or 16
if (align < 0) {
if (align_proposed > -align)
align_proposed = -align;
}
align = align_proposed;
}
assert(stb_is_pow2(align));
// don't cause misalignment when allocating nochildren
if (t == STB__nochildren && align > 8)
t = STB__alloc;
switch (t) {
case STB__alloc: {
stb__alloc *s = (stb__alloc *) malloc(size + sizeof(*s));
if (s == NULL) return NULL;
p = s+1;
s->child = NULL;
stb__insert_alloc(src, s);
stb__setchunks(s,NULL);
break;
}
case STB__nochildren: {
stb__nochildren *s = (stb__nochildren *) malloc(size + sizeof(*s));
if (s == NULL) return NULL;
p = s+1;
stb__insert_nochild(src, s);
break;
}
case STB__chunk_raw: {
p = stb__alloc_chunk(src, (int) size, align, 0);
if (p == NULL) return NULL;
break;
}
case STB__chunked: {
stb__chunked *s;
if (align < sizeof(stb_uintptr)) align = sizeof(stb_uintptr);
s = (stb__chunked *) stb__alloc_chunk(src, (int) size, align, sizeof(*s));
if (s == NULL) return NULL;
stb__setparent(s, src);
p = s+1;
break;
}
default: p = NULL; assert(0); /* NOTREACHED */
}
++stb_alloc_count_alloc;
return p;
}
void *stb_malloc_global(size_t size)
{
return malloc_base(NULL, size, STB__alloc, stb_alloc_alignment);
}
void *stb_malloc(void *context, size_t size)
{
return malloc_base(context, size, STB__alloc, stb_alloc_alignment);
}
void *stb_malloc_nofree(void *context, size_t size)
{
return malloc_base(context, size, STB__chunked, stb_alloc_alignment);
}
void *stb_malloc_leaf(void *context, size_t size)
{
return malloc_base(context, size, STB__nochildren, stb_alloc_alignment);
}
void *stb_malloc_raw(void *context, size_t size)
{
return malloc_base(context, size, STB__chunk_raw, stb_alloc_alignment);
}
char *stb_malloc_string(void *context, size_t size)
{
return (char *) malloc_base(context, size, STB__chunk_raw, 1);
}
void *stb_realloc(void *ptr, size_t newsize)
{
stb__alloc_type t;
if (ptr == NULL) return stb_malloc(NULL, newsize);
if (newsize == 0) { stb_free(ptr); return NULL; }
t = stb__identify(ptr);
assert(t == STB__alloc || t == STB__nochildren);
if (t == STB__alloc) {
stb__alloc *s = (stb__alloc *) ptr - 1;
s = (stb__alloc *) realloc(s, newsize + sizeof(*s));
if (s == NULL) return NULL;
ptr = s+1;
// update pointers
(*s->prevn) = ptr;
if (s->next)
*stb__prevn(s->next) = &s->next;
if (s->child)
*stb__prevn(s->child) = &s->child;
return ptr;
} else {
stb__nochildren *s = (stb__nochildren *) ptr - 1;
s = (stb__nochildren *) realloc(ptr, newsize + sizeof(s));
if (s == NULL) return NULL;
// update pointers
(*s->prevn) = s+1;
if (s->next)
*stb__prevn(s->next) = &s->next;
return s+1;
}
}
void *stb_realloc_c(void *context, void *ptr, size_t newsize)
{
if (ptr == NULL) return stb_malloc(context, newsize);
if (newsize == 0) { stb_free(ptr); return NULL; }
// @TODO: verify you haven't changed contexts
return stb_realloc(ptr, newsize);
}
void stb_reassign(void *new_context, void *ptr)
{
stb__alloc *src = stb__get_context(new_context);
stb__alloc_type t = stb__identify(ptr);
assert(t == STB__alloc || t == STB__nochildren);
if (t == STB__alloc) {
stb__alloc *s = (stb__alloc *) ptr - 1;
// unlink from old
*(s->prevn) = s->next;
if (s->next)
*stb__prevn(s->next) = s->prevn;
stb__insert_alloc(src, s);
} else {
stb__nochildren *s = (stb__nochildren *) ptr - 1;
// unlink from old
*(s->prevn) = s->next;
if (s->next)
*stb__prevn(s->next) = s->prevn;
stb__insert_nochild(src, s);
}
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// stb_arr
//
// An stb_arr is directly useable as a pointer (use the actual type in your
// definition), but when it resizes, it returns a new pointer and you can't
// use the old one, so you have to be careful to copy-in-out as necessary.
//
// Use a NULL pointer as a 0-length array.
//
// float *my_array = NULL, *temp;
//
// // add elements on the end one at a time
// stb_arr_push(my_array, 0.0f);
// stb_arr_push(my_array, 1.0f);
// stb_arr_push(my_array, 2.0f);
//
// assert(my_array[1] == 2.0f);
//
// // add an uninitialized element at the end, then assign it
// *stb_arr_add(my_array) = 3.0f;
//
// // add three uninitialized elements at the end
// temp = stb_arr_addn(my_array,3);
// temp[0] = 4.0f;
// temp[1] = 5.0f;
// temp[2] = 6.0f;
//
// assert(my_array[5] == 5.0f);
//
// // remove the last one
// stb_arr_pop(my_array);
//
// assert(stb_arr_len(my_array) == 6);
#ifdef STB_MALLOC_WRAPPER
#define STB__PARAMS , char *file, int line
#define STB__ARGS , file, line
#else
#define STB__PARAMS
#define STB__ARGS
#endif
// calling this function allocates an empty stb_arr attached to p
// (whereas NULL isn't attached to anything)
STB_EXTERN void stb_arr_malloc(void **target, void *context);
// call this function with a non-NULL value to have all successive
// stbs that are created be attached to the associated parent. Note
// that once a given stb_arr is non-empty, it stays attached to its
// current parent, even if you call this function again.
// it turns the previous value, so you can restore it
STB_EXTERN void* stb_arr_malloc_parent(void *p);
// simple functions written on top of other functions
#define stb_arr_empty(a) ( stb_arr_len(a) == 0 )
#define stb_arr_add(a) ( stb_arr_addn((a),1) )
#define stb_arr_push(a,v) ( *stb_arr_add(a)=(v) )
typedef struct
{
int len, limit;
int stb_malloc;
unsigned int signature;
} stb__arr;
#define stb_arr_signature 0x51bada7b // ends with 0123 in decimal
// access the header block stored before the data
#define stb_arrhead(a) /*lint --e(826)*/ (((stb__arr *) (a)) - 1)
#define stb_arrhead2(a) /*lint --e(826)*/ (((stb__arr *) (a)) - 1)
#ifdef STB_DEBUG
#define stb_arr_check(a) assert(!a || stb_arrhead(a)->signature == stb_arr_signature)
#define stb_arr_check2(a) assert(!a || stb_arrhead2(a)->signature == stb_arr_signature)
#else
#define stb_arr_check(a) ((void) 0)
#define stb_arr_check2(a) ((void) 0)
#endif
// ARRAY LENGTH
// get the array length; special case if pointer is NULL
#define stb_arr_len(a) (a ? stb_arrhead(a)->len : 0)
#define stb_arr_len2(a) ((stb__arr *) (a) ? stb_arrhead2(a)->len : 0)
#define stb_arr_lastn(a) (stb_arr_len(a)-1)
// check whether a given index is valid -- tests 0 <= i < stb_arr_len(a)
#define stb_arr_valid(a,i) (a ? (int) (i) < stb_arrhead(a)->len : 0)
// change the array length so is is exactly N entries long, creating
// uninitialized entries as needed
#define stb_arr_setlen(a,n) \
(stb__arr_setlen((void **) &(a), sizeof(a[0]), (n)))
// change the array length so that N is a valid index (that is, so
// it is at least N entries long), creating uninitialized entries as needed
#define stb_arr_makevalid(a,n) \
(stb_arr_len(a) < (n)+1 ? stb_arr_setlen(a,(n)+1),(a) : (a))
// remove the last element of the array, returning it
#define stb_arr_pop(a) ((stb_arr_check(a), (a))[--stb_arrhead(a)->len])
// access the last element in the array
#define stb_arr_last(a) ((stb_arr_check(a), (a))[stb_arr_len(a)-1])
// is iterator at end of list?
#define stb_arr_end(a,i) ((i) >= &(a)[stb_arr_len(a)])
// (internal) change the allocated length of the array
#define stb_arr__grow(a,n) (stb_arr_check(a), stb_arrhead(a)->len += (n))
// add N new uninitialized elements to the end of the array
#define stb_arr__addn(a,n) /*lint --e(826)*/ \
((stb_arr_len(a)+(n) > stb_arrcurmax(a)) \
? (stb__arr_addlen((void **) &(a),sizeof(*a),(n)),0) \
: ((stb_arr__grow(a,n), 0)))
// add N new uninitialized elements to the end of the array, and return
// a pointer to the first new one
#define stb_arr_addn(a,n) (stb_arr__addn((a),n),(a)+stb_arr_len(a)-(n))
// add N new uninitialized elements starting at index 'i'
#define stb_arr_insertn(a,i,n) (stb__arr_insertn((void **) &(a), sizeof(*a), (i), (n)))
// insert an element at i
#define stb_arr_insert(a,i,v) (stb__arr_insertn((void **) &(a), sizeof(*a), (i), (1)), ((a)[i] = v))
// delete N elements from the middle starting at index 'i'
#define stb_arr_deleten(a,i,n) (stb__arr_deleten((void **) &(a), sizeof(*a), (i), (n)))
// delete the i'th element
#define stb_arr_delete(a,i) stb_arr_deleten(a,i,1)
// delete the i'th element, swapping down from the end
#define stb_arr_fastdelete(a,i) \
(stb_swap(&a[i], &a[stb_arrhead(a)->len-1], sizeof(*a)), stb_arr_pop(a))
// ARRAY STORAGE
// get the array maximum storage; special case if NULL
#define stb_arrcurmax(a) (a ? stb_arrhead(a)->limit : 0)
#define stb_arrcurmax2(a) (a ? stb_arrhead2(a)->limit : 0)
// set the maxlength of the array to n in anticipation of further growth
#define stb_arr_setsize(a,n) (stb_arr_check(a), stb__arr_setsize((void **) &(a),sizeof((a)[0]),n))
// make sure maxlength is large enough for at least N new allocations
#define stb_arr_atleast(a,n) (stb_arr_len(a)+(n) > stb_arrcurmax(a) \
? stb_arr_setsize((a), (n)) : 0)
// make a copy of a given array (copies contents via 'memcpy'!)
#define stb_arr_copy(a) stb__arr_copy(a, sizeof((a)[0]))
// compute the storage needed to store all the elements of the array
#define stb_arr_storage(a) (stb_arr_len(a) * sizeof((a)[0]))
#define stb_arr_for(v,arr) for((v)=(arr); (v) < (arr)+stb_arr_len(arr); ++(v))
// IMPLEMENTATION
STB_EXTERN void stb_arr_free_(void **p);
STB_EXTERN void *stb__arr_copy_(void *p, int elem_size);
STB_EXTERN void stb__arr_setsize_(void **p, int size, int limit STB__PARAMS);
STB_EXTERN void stb__arr_setlen_(void **p, int size, int newlen STB__PARAMS);
STB_EXTERN void stb__arr_addlen_(void **p, int size, int addlen STB__PARAMS);
STB_EXTERN void stb__arr_deleten_(void **p, int size, int loc, int n STB__PARAMS);
STB_EXTERN void stb__arr_insertn_(void **p, int size, int loc, int n STB__PARAMS);
#define stb_arr_free(p) stb_arr_free_((void **) &(p))
#define stb__arr_copy stb__arr_copy_
#ifndef STB_MALLOC_WRAPPER
#define stb__arr_setsize stb__arr_setsize_
#define stb__arr_setlen stb__arr_setlen_
#define stb__arr_addlen stb__arr_addlen_
#define stb__arr_deleten stb__arr_deleten_
#define stb__arr_insertn stb__arr_insertn_
#else
#define stb__arr_addlen(p,s,n) stb__arr_addlen_(p,s,n,__FILE__,__LINE__)
#define stb__arr_setlen(p,s,n) stb__arr_setlen_(p,s,n,__FILE__,__LINE__)
#define stb__arr_setsize(p,s,n) stb__arr_setsize_(p,s,n,__FILE__,__LINE__)
#define stb__arr_deleten(p,s,i,n) stb__arr_deleten_(p,s,i,n,__FILE__,__LINE__)
#define stb__arr_insertn(p,s,i,n) stb__arr_insertn_(p,s,i,n,__FILE__,__LINE__)
#endif
#ifdef STB_DEFINE
static void *stb__arr_context;
void *stb_arr_malloc_parent(void *p)
{
void *q = stb__arr_context;
stb__arr_context = p;
return q;
}
void stb_arr_malloc(void **target, void *context)
{
stb__arr *q = (stb__arr *) stb_malloc(context, sizeof(*q));
q->len = q->limit = 0;
q->stb_malloc = 1;
q->signature = stb_arr_signature;
*target = (void *) (q+1);
}
static void * stb__arr_malloc(int size)
{
if (stb__arr_context)
return stb_malloc(stb__arr_context, size);
return malloc(size);
}
void * stb__arr_copy_(void *p, int elem_size)
{
stb__arr *q;
if (p == NULL) return p;
q = (stb__arr *) stb__arr_malloc(sizeof(*q) + elem_size * stb_arrhead2(p)->limit);
stb_arr_check2(p);
memcpy(q, stb_arrhead2(p), sizeof(*q) + elem_size * stb_arrhead2(p)->len);
q->stb_malloc = !!stb__arr_context;
return q+1;
}
void stb_arr_free_(void **pp)
{
void *p = *pp;
stb_arr_check2(p);
if (p) {
stb__arr *q = stb_arrhead2(p);
if (q->stb_malloc)
stb_free(q);
else
free(q);
}
*pp = NULL;
}
static void stb__arrsize_(void **pp, int size, int limit, int len STB__PARAMS)
{
void *p = *pp;
stb__arr *a;
stb_arr_check2(p);
if (p == NULL) {
if (len == 0 && size == 0) return;
a = (stb__arr *) stb__arr_malloc(sizeof(*a) + size*limit);
a->limit = limit;
a->len = len;
a->stb_malloc = !!stb__arr_context;
a->signature = stb_arr_signature;
} else {
a = stb_arrhead2(p);
a->len = len;
if (a->limit < limit) {
void *p;
if (a->limit >= 4 && limit < a->limit * 2)
limit = a->limit * 2;
if (a->stb_malloc)
p = stb_realloc(a, sizeof(*a) + limit*size);
else
#ifdef STB_MALLOC_WRAPPER
p = stb__realloc(a, sizeof(*a) + limit*size, file, line);
#else
p = realloc(a, sizeof(*a) + limit*size);
#endif
if (p) {
a = (stb__arr *) p;
a->limit = limit;
} else {
// throw an error!
}
}
}
a->len = stb_min(a->len, a->limit);
*pp = a+1;
}
void stb__arr_setsize_(void **pp, int size, int limit STB__PARAMS)
{
void *p = *pp;
stb_arr_check2(p);
stb__arrsize_(pp, size, limit, stb_arr_len2(p) STB__ARGS);
}
void stb__arr_setlen_(void **pp, int size, int newlen STB__PARAMS)
{
void *p = *pp;
stb_arr_check2(p);
if (stb_arrcurmax2(p) < newlen || p == NULL) {
stb__arrsize_(pp, size, newlen, newlen STB__ARGS);
} else {
stb_arrhead2(p)->len = newlen;
}
}
void stb__arr_addlen_(void **p, int size, int addlen STB__PARAMS)
{
stb__arr_setlen_(p, size, stb_arr_len2(*p) + addlen STB__ARGS);
}
void stb__arr_insertn_(void **pp, int size, int i, int n STB__PARAMS)
{
void *p = *pp;
if (n) {
int z;
if (p == NULL) {
stb__arr_addlen_(pp, size, n STB__ARGS);
return;
}
z = stb_arr_len2(p);
stb__arr_addlen_(&p, size, n STB__ARGS);
memmove((char *) p + (i+n)*size, (char *) p + i*size, size * (z-i));
}
*pp = p;
}
void stb__arr_deleten_(void **pp, int size, int i, int n STB__PARAMS)
{
void *p = *pp;
if (n) {
memmove((char *) p + i*size, (char *) p + (i+n)*size, size * (stb_arr_len2(p)-(i+n)));
stb_arrhead2(p)->len -= n;
}
*pp = p;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Hashing
//
// typical use for this is to make a power-of-two hash table.
//
// let N = size of table (2^n)
// let H = stb_hash(str)
// let S = stb_rehash(H) | 1
//
// then hash probe sequence P(i) for i=0..N-1
// P(i) = (H + S*i) & (N-1)
//
// the idea is that H has 32 bits of hash information, but the
// table has only, say, 2^20 entries so only uses 20 of the bits.
// then by rehashing the original H we get 2^12 different probe
// sequences for a given initial probe location. (So it's optimal
// for 64K tables and its optimality decreases past that.)
//
// ok, so I've added something that generates _two separate_
// 32-bit hashes simultaneously which should scale better to
// very large tables.
STB_EXTERN unsigned int stb_hash(char *str);
STB_EXTERN unsigned int stb_hashptr(void *p);
STB_EXTERN unsigned int stb_hashlen(char *str, int len);
STB_EXTERN unsigned int stb_rehash_improved(unsigned int v);
STB_EXTERN unsigned int stb_hash_fast(void *p, int len);
STB_EXTERN unsigned int stb_hash2(char *str, unsigned int *hash2_ptr);
STB_EXTERN unsigned int stb_hash_number(unsigned int hash);
#define stb_rehash(x) ((x) + ((x) >> 6) + ((x) >> 19))
#ifdef STB_DEFINE
unsigned int stb_hash(char *str)
{
unsigned int hash = 0;
while (*str)
hash = (hash << 7) + (hash >> 25) + *str++;
return hash + (hash >> 16);
}
unsigned int stb_hashlen(char *str, int len)
{
unsigned int hash = 0;
while (len-- > 0 && *str)
hash = (hash << 7) + (hash >> 25) + *str++;
return hash + (hash >> 16);
}
unsigned int stb_hashptr(void *p)
{
unsigned int x = (unsigned int)(size_t) p;
// typically lacking in low bits and high bits
x = stb_rehash(x);
x += x << 16;
// pearson's shuffle
x ^= x << 3;
x += x >> 5;
x ^= x << 2;
x += x >> 15;
x ^= x << 10;
return stb_rehash(x);
}
unsigned int stb_rehash_improved(unsigned int v)
{
return stb_hashptr((void *)(size_t) v);
}
unsigned int stb_hash2(char *str, unsigned int *hash2_ptr)
{
unsigned int hash1 = 0x3141592c;
unsigned int hash2 = 0x77f044ed;
while (*str) {
hash1 = (hash1 << 7) + (hash1 >> 25) + *str;
hash2 = (hash2 << 11) + (hash2 >> 21) + *str;
++str;
}
*hash2_ptr = hash2 + (hash1 >> 16);
return hash1 + (hash2 >> 16);
}
// Paul Hsieh hash
#define stb__get16(p) ((p)[0] | ((p)[1] << 8))
unsigned int stb_hash_fast(void *p, int len)
{
unsigned char *q = (unsigned char *) p;
unsigned int hash = len;
if (len <= 0 || q == NULL) return 0;
/* Main loop */
for (;len > 3; len -= 4) {
unsigned int val;
hash += stb__get16(q);
val = (stb__get16(q+2) << 11);
hash = (hash << 16) ^ hash ^ val;
q += 4;
hash += hash >> 11;
}
/* Handle end cases */
switch (len) {
case 3: hash += stb__get16(q);
hash ^= hash << 16;
hash ^= q[2] << 18;
hash += hash >> 11;
break;
case 2: hash += stb__get16(q);
hash ^= hash << 11;
hash += hash >> 17;
break;
case 1: hash += q[0];
hash ^= hash << 10;
hash += hash >> 1;
break;
case 0: break;
}
/* Force "avalanching" of final 127 bits */
hash ^= hash << 3;
hash += hash >> 5;
hash ^= hash << 4;
hash += hash >> 17;
hash ^= hash << 25;
hash += hash >> 6;
return hash;
}
unsigned int stb_hash_number(unsigned int hash)
{
hash ^= hash << 3;
hash += hash >> 5;
hash ^= hash << 4;
hash += hash >> 17;
hash ^= hash << 25;
hash += hash >> 6;
return hash;
}
#endif
#ifdef STB_PERFECT_HASH
//////////////////////////////////////////////////////////////////////////////
//
// Perfect hashing for ints/pointers
//
// This is mainly useful for making faster pointer-indexed tables
// that don't change frequently. E.g. for stb_ischar().
//
typedef struct
{
stb_uint32 addend;
stb_uint multiplicand;
stb_uint b_mask;
stb_uint8 small_bmap[16];
stb_uint16 *large_bmap;
stb_uint table_mask;
stb_uint32 *table;
} stb_perfect;
STB_EXTERN int stb_perfect_create(stb_perfect *,unsigned int*,int n);
STB_EXTERN void stb_perfect_destroy(stb_perfect *);
STB_EXTERN int stb_perfect_hash(stb_perfect *, unsigned int x);
extern int stb_perfect_hash_max_failures;
#ifdef STB_DEFINE
int stb_perfect_hash_max_failures;
int stb_perfect_hash(stb_perfect *p, unsigned int x)
{
stb_uint m = x * p->multiplicand;
stb_uint y = x >> 16;
stb_uint bv = (m >> 24) + y;
stb_uint av = (m + y) >> 12;
if (p->table == NULL) return -1; // uninitialized table fails
bv &= p->b_mask;
av &= p->table_mask;
if (p->large_bmap)
av ^= p->large_bmap[bv];
else
av ^= p->small_bmap[bv];
return p->table[av] == x ? av : -1;
}
static void stb__perfect_prehash(stb_perfect *p, stb_uint x, stb_uint16 *a, stb_uint16 *b)
{
stb_uint m = x * p->multiplicand;
stb_uint y = x >> 16;
stb_uint bv = (m >> 24) + y;
stb_uint av = (m + y) >> 12;
bv &= p->b_mask;
av &= p->table_mask;
*b = bv;
*a = av;
}
static unsigned long stb__perfect_rand(void)
{
static unsigned long stb__rand;
stb__rand = stb__rand * 2147001325 + 715136305;
return 0x31415926 ^ ((stb__rand >> 16) + (stb__rand << 16));
}
typedef struct {
unsigned short count;
unsigned short b;
unsigned short map;
unsigned short *entries;
} stb__slot;
static int stb__slot_compare(const void *p, const void *q)
{
stb__slot *a = (stb__slot *) p;
stb__slot *b = (stb__slot *) q;
return a->count > b->count ? -1 : a->count < b->count; // sort large to small
}
int stb_perfect_create(stb_perfect *p, unsigned int *v, int n)
{
unsigned int buffer1[64], buffer2[64], buffer3[64], buffer4[64], buffer5[32];
unsigned short *as = (unsigned short *) stb_temp(buffer1, sizeof(*v)*n);
unsigned short *bs = (unsigned short *) stb_temp(buffer2, sizeof(*v)*n);
unsigned short *entries = (unsigned short *) stb_temp(buffer4, sizeof(*entries) * n);
int size = 1 << stb_log2_ceil(n), bsize=8;
int failure = 0,i,j,k;
assert(n <= 32768);
p->large_bmap = NULL;
for(;;) {
stb__slot *bcount = (stb__slot *) stb_temp(buffer3, sizeof(*bcount) * bsize);
unsigned short *bloc = (unsigned short *) stb_temp(buffer5, sizeof(*bloc) * bsize);
unsigned short *e;
int bad=0;
p->addend = stb__perfect_rand();
p->multiplicand = stb__perfect_rand() | 1;
p->table_mask = size-1;
p->b_mask = bsize-1;
p->table = (stb_uint32 *) malloc(size * sizeof(*p->table));
for (i=0; i < bsize; ++i) {
bcount[i].b = i;
bcount[i].count = 0;
bcount[i].map = 0;
}
for (i=0; i < n; ++i) {
stb__perfect_prehash(p, v[i], as+i, bs+i);
++bcount[bs[i]].count;
}
qsort(bcount, bsize, sizeof(*bcount), stb__slot_compare);
e = entries; // now setup up their entries index
for (i=0; i < bsize; ++i) {
bcount[i].entries = e;
e += bcount[i].count;
bcount[i].count = 0;
bloc[bcount[i].b] = i;
}
// now fill them out
for (i=0; i < n; ++i) {
int b = bs[i];
int w = bloc[b];
bcount[w].entries[bcount[w].count++] = i;
}
stb_tempfree(buffer5,bloc);
// verify
for (i=0; i < bsize; ++i)
for (j=0; j < bcount[i].count; ++j)
assert(bs[bcount[i].entries[j]] == bcount[i].b);
memset(p->table, 0, size*sizeof(*p->table));
// check if any b has duplicate a
for (i=0; i < bsize; ++i) {
if (bcount[i].count > 1) {
for (j=0; j < bcount[i].count; ++j) {
if (p->table[as[bcount[i].entries[j]]])
bad = 1;
p->table[as[bcount[i].entries[j]]] = 1;
}
for (j=0; j < bcount[i].count; ++j) {
p->table[as[bcount[i].entries[j]]] = 0;
}
if (bad) break;
}
}
if (!bad) {
// go through the bs and populate the table, first fit
for (i=0; i < bsize; ++i) {
if (bcount[i].count) {
// go through the candidate table[b] values
for (j=0; j < size; ++j) {
// go through the a values and see if they fit
for (k=0; k < bcount[i].count; ++k) {
int a = as[bcount[i].entries[k]];
if (p->table[(a^j)&p->table_mask]) {
break; // fails
}
}
// if succeeded, accept
if (k == bcount[i].count) {
bcount[i].map = j;
for (k=0; k < bcount[i].count; ++k) {
int a = as[bcount[i].entries[k]];
p->table[(a^j)&p->table_mask] = 1;
}
break;
}
}
if (j == size)
break; // no match for i'th entry, so break out in failure
}
}
if (i == bsize) {
// success... fill out map
if (bsize <= 16 && size <= 256) {
p->large_bmap = NULL;
for (i=0; i < bsize; ++i)
p->small_bmap[bcount[i].b] = (stb_uint8) bcount[i].map;
} else {
p->large_bmap = (unsigned short *) malloc(sizeof(*p->large_bmap) * bsize);
for (i=0; i < bsize; ++i)
p->large_bmap[bcount[i].b] = bcount[i].map;
}
// initialize table to v[0], so empty slots will fail
for (i=0; i < size; ++i)
p->table[i] = v[0];
for (i=0; i < n; ++i)
if (p->large_bmap)
p->table[as[i] ^ p->large_bmap[bs[i]]] = v[i];
else
p->table[as[i] ^ p->small_bmap[bs[i]]] = v[i];
// and now validate that none of them collided
for (i=0; i < n; ++i)
assert(stb_perfect_hash(p, v[i]) >= 0);
stb_tempfree(buffer3, bcount);
break;
}
}
free(p->table);
p->table = NULL;
stb_tempfree(buffer3, bcount);
++failure;
if (failure >= 4 && bsize < size) bsize *= 2;
if (failure >= 8 && (failure & 3) == 0 && size < 4*n) {
size *= 2;
bsize *= 2;
}
if (failure == 6) {
// make sure the input data is unique, so we don't infinite loop
unsigned int *data = (unsigned int *) stb_temp(buffer3, n * sizeof(*data));
memcpy(data, v, sizeof(*data) * n);
qsort(data, n, sizeof(*data), stb_intcmp(0));
for (i=1; i < n; ++i) {
if (data[i] == data[i-1])
size = 0; // size is return value, so 0 it
}
stb_tempfree(buffer3, data);
if (!size) break;
}
}
if (failure > stb_perfect_hash_max_failures)
stb_perfect_hash_max_failures = failure;
stb_tempfree(buffer1, as);
stb_tempfree(buffer2, bs);
stb_tempfree(buffer4, entries);
return size;
}
void stb_perfect_destroy(stb_perfect *p)
{
if (p->large_bmap) free(p->large_bmap);
if (p->table ) free(p->table);
p->large_bmap = NULL;
p->table = NULL;
p->b_mask = 0;
p->table_mask = 0;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Perfect hash clients
STB_EXTERN int stb_ischar(char s, char *set);
#ifdef STB_DEFINE
int stb_ischar(char c, char *set)
{
static unsigned char bit[8] = { 1,2,4,8,16,32,64,128 };
static stb_perfect p;
static unsigned char (*tables)[256];
static char ** sets = NULL;
int z = stb_perfect_hash(&p, (int)(size_t) set);
if (z < 0) {
int i,k,n,j,f;
// special code that means free all existing data
if (set == NULL) {
stb_arr_free(sets);
free(tables);
tables = NULL;
stb_perfect_destroy(&p);
return 0;
}
stb_arr_push(sets, set);
stb_perfect_destroy(&p);
n = stb_perfect_create(&p, (unsigned int *) (char **) sets, stb_arr_len(sets));
assert(n != 0);
k = (n+7) >> 3;
tables = (unsigned char (*)[256]) realloc(tables, sizeof(*tables) * k);
memset(tables, 0, sizeof(*tables) * k);
for (i=0; i < stb_arr_len(sets); ++i) {
k = stb_perfect_hash(&p, (int)(size_t) sets[i]);
assert(k >= 0);
n = k >> 3;
f = bit[k&7];
for (j=0; !j || sets[i][j]; ++j) {
tables[n][(unsigned char) sets[i][j]] |= f;
}
}
z = stb_perfect_hash(&p, (int)(size_t) set);
}
return tables[z >> 3][(unsigned char) c] & bit[z & 7];
}
#endif
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Instantiated data structures
//
// This is an attempt to implement a templated data structure.
//
// Hash table: call stb_define_hash(TYPE,N,KEY,K1,K2,HASH,VALUE)
// TYPE -- will define a structure type containing the hash table
// N -- the name, will prefix functions named:
// N create
// N destroy
// N get
// N set, N add, N update,
// N remove
// KEY -- the type of the key. 'x == y' must be valid
// K1,K2 -- keys never used by the app, used as flags in the hashtable
// HASH -- a piece of code ending with 'return' that hashes key 'k'
// VALUE -- the type of the value. 'x = y' must be valid
//
// Note that stb_define_hash_base can be used to define more sophisticated
// hash tables, e.g. those that make copies of the key or use special
// comparisons (e.g. strcmp).
#define STB_(prefix,name) stb__##prefix##name
#define STB__(prefix,name) prefix##name
#define STB__use(x) x
#define STB__skip(x)
#define stb_declare_hash(PREFIX,TYPE,N,KEY,VALUE) \
typedef struct stb__st_##TYPE TYPE;\
PREFIX int STB__(N, init)(TYPE *h, int count);\
PREFIX int STB__(N, memory_usage)(TYPE *h);\
PREFIX TYPE * STB__(N, create)(void);\
PREFIX TYPE * STB__(N, copy)(TYPE *h);\
PREFIX void STB__(N, destroy)(TYPE *h);\
PREFIX int STB__(N,get_flag)(TYPE *a, KEY k, VALUE *v);\
PREFIX VALUE STB__(N,get)(TYPE *a, KEY k);\
PREFIX int STB__(N, set)(TYPE *a, KEY k, VALUE v);\
PREFIX int STB__(N, add)(TYPE *a, KEY k, VALUE v);\
PREFIX int STB__(N, update)(TYPE*a,KEY k,VALUE v);\
PREFIX int STB__(N, remove)(TYPE *a, KEY k, VALUE *v);
#define STB_nocopy(x) (x)
#define STB_nodelete(x) 0
#define STB_nofields
#define STB_nonullvalue(x)
#define STB_nullvalue(x) x
#define STB_safecompare(x) x
#define STB_nosafe(x)
#define STB_noprefix
#ifdef __GNUC__
#define STB__nogcc(x)
#else
#define STB__nogcc(x) x
#endif
#define stb_define_hash_base(PREFIX,TYPE,FIELDS,N,NC,LOAD_FACTOR, \
KEY,EMPTY,DEL,COPY,DISPOSE,SAFE, \
VCOMPARE,CCOMPARE,HASH, \
VALUE,HASVNULL,VNULL) \
\
typedef struct \
{ \
KEY k; \
VALUE v; \
} STB_(N,_hashpair); \
\
STB__nogcc( typedef struct stb__st_##TYPE TYPE; ) \
struct stb__st_##TYPE { \
FIELDS \
STB_(N,_hashpair) *table; \
unsigned int mask; \
int count, limit; \
int deleted; \
\
int delete_threshhold; \
int grow_threshhold; \
int shrink_threshhold; \
unsigned char alloced, has_empty, has_del; \
VALUE ev; VALUE dv; \
}; \
\
static unsigned int STB_(N, hash)(KEY k) \
{ \
HASH \
} \
\
PREFIX int STB__(N, init)(TYPE *h, int count) \
{ \
int i; \
if (count < 4) count = 4; \
h->limit = count; \
h->count = 0; \
h->mask = count-1; \
h->deleted = 0; \
h->grow_threshhold = (int) (count * LOAD_FACTOR); \
h->has_empty = h->has_del = 0; \
h->alloced = 0; \
if (count <= 64) \
h->shrink_threshhold = 0; \
else \
h->shrink_threshhold = (int) (count * (LOAD_FACTOR/2.25)); \
h->delete_threshhold = (int) (count * (1-LOAD_FACTOR)/2); \
h->table = (STB_(N,_hashpair)*) malloc(sizeof(h->table[0]) * count); \
if (h->table == NULL) return 0; \
/* ideally this gets turned into a memset32 automatically */ \
for (i=0; i < count; ++i) \
h->table[i].k = EMPTY; \
return 1; \
} \
\
PREFIX int STB__(N, memory_usage)(TYPE *h) \
{ \
return sizeof(*h) + h->limit * sizeof(h->table[0]); \
} \
\
PREFIX TYPE * STB__(N, create)(void) \
{ \
TYPE *h = (TYPE *) malloc(sizeof(*h)); \
if (h) { \
if (STB__(N, init)(h, 16)) \
h->alloced = 1; \
else { free(h); h=NULL; } \
} \
return h; \
} \
\
PREFIX void STB__(N, destroy)(TYPE *a) \
{ \
int i; \
for (i=0; i < a->limit; ++i) \
if (!CCOMPARE(a->table[i].k,EMPTY) && !CCOMPARE(a->table[i].k, DEL)) \
DISPOSE(a->table[i].k); \
free(a->table); \
if (a->alloced) \
free(a); \
} \
\
static void STB_(N, rehash)(TYPE *a, int count); \
\
PREFIX int STB__(N,get_flag)(TYPE *a, KEY k, VALUE *v) \
{ \
unsigned int h = STB_(N, hash)(k); \
unsigned int n = h & a->mask, s; \
if (CCOMPARE(k,EMPTY)){ if (a->has_empty) *v = a->ev; return a->has_empty;}\
if (CCOMPARE(k,DEL)) { if (a->has_del ) *v = a->dv; return a->has_del; }\
if (CCOMPARE(a->table[n].k,EMPTY)) return 0; \
SAFE(if (!CCOMPARE(a->table[n].k,DEL))) \
if (VCOMPARE(a->table[n].k,k)) { *v = a->table[n].v; return 1; } \
s = stb_rehash(h) | 1; \
for(;;) { \
n = (n + s) & a->mask; \
if (CCOMPARE(a->table[n].k,EMPTY)) return 0; \
SAFE(if (CCOMPARE(a->table[n].k,DEL)) continue;) \
if (VCOMPARE(a->table[n].k,k)) \
{ *v = a->table[n].v; return 1; } \
} \
} \
\
HASVNULL( \
PREFIX VALUE STB__(N,get)(TYPE *a, KEY k) \
{ \
VALUE v; \
if (STB__(N,get_flag)(a,k,&v)) return v; \
else return VNULL; \
} \
) \
\
PREFIX int STB__(N,getkey)(TYPE *a, KEY k, KEY *kout) \
{ \
unsigned int h = STB_(N, hash)(k); \
unsigned int n = h & a->mask, s; \
if (CCOMPARE(k,EMPTY)||CCOMPARE(k,DEL)) return 0; \
if (CCOMPARE(a->table[n].k,EMPTY)) return 0; \
SAFE(if (!CCOMPARE(a->table[n].k,DEL))) \
if (VCOMPARE(a->table[n].k,k)) { *kout = a->table[n].k; return 1; } \
s = stb_rehash(h) | 1; \
for(;;) { \
n = (n + s) & a->mask; \
if (CCOMPARE(a->table[n].k,EMPTY)) return 0; \
SAFE(if (CCOMPARE(a->table[n].k,DEL)) continue;) \
if (VCOMPARE(a->table[n].k,k)) \
{ *kout = a->table[n].k; return 1; } \
} \
} \
\
static int STB_(N,addset)(TYPE *a, KEY k, VALUE v, \
int allow_new, int allow_old, int copy) \
{ \
unsigned int h = STB_(N, hash)(k); \
unsigned int n = h & a->mask; \
int b = -1; \
if (CCOMPARE(k,EMPTY)) { \
if (a->has_empty ? allow_old : allow_new) { \
n=a->has_empty; a->ev = v; a->has_empty = 1; return !n; \
} else return 0; \
} \
if (CCOMPARE(k,DEL)) { \
if (a->has_del ? allow_old : allow_new) { \
n=a->has_del; a->dv = v; a->has_del = 1; return !n; \
} else return 0; \
} \
if (!CCOMPARE(a->table[n].k, EMPTY)) { \
unsigned int s; \
if (CCOMPARE(a->table[n].k, DEL)) \
b = n; \
else if (VCOMPARE(a->table[n].k,k)) { \
if (allow_old) \
a->table[n].v = v; \
return !allow_new; \
} \
s = stb_rehash(h) | 1; \
for(;;) { \
n = (n + s) & a->mask; \
if (CCOMPARE(a->table[n].k, EMPTY)) break; \
if (CCOMPARE(a->table[n].k, DEL)) { \
if (b < 0) b = n; \
} else if (VCOMPARE(a->table[n].k,k)) { \
if (allow_old) \
a->table[n].v = v; \
return !allow_new; \
} \
} \
} \
if (!allow_new) return 0; \
if (b < 0) b = n; else --a->deleted; \
a->table[b].k = copy ? COPY(k) : k; \
a->table[b].v = v; \
++a->count; \
if (a->count > a->grow_threshhold) \
STB_(N,rehash)(a, a->limit*2); \
return 1; \
} \
\
PREFIX int STB__(N, set)(TYPE *a, KEY k, VALUE v){return STB_(N,addset)(a,k,v,1,1,1);}\
PREFIX int STB__(N, add)(TYPE *a, KEY k, VALUE v){return STB_(N,addset)(a,k,v,1,0,1);}\
PREFIX int STB__(N, update)(TYPE*a,KEY k,VALUE v){return STB_(N,addset)(a,k,v,0,1,1);}\
\
PREFIX int STB__(N, remove)(TYPE *a, KEY k, VALUE *v) \
{ \
unsigned int h = STB_(N, hash)(k); \
unsigned int n = h & a->mask, s; \
if (CCOMPARE(k,EMPTY)) { if (a->has_empty) { if(v)*v = a->ev; a->has_empty=0; return 1; } return 0; } \
if (CCOMPARE(k,DEL)) { if (a->has_del ) { if(v)*v = a->dv; a->has_del =0; return 1; } return 0; } \
if (CCOMPARE(a->table[n].k,EMPTY)) return 0; \
if (SAFE(CCOMPARE(a->table[n].k,DEL) || ) !VCOMPARE(a->table[n].k,k)) { \
s = stb_rehash(h) | 1; \
for(;;) { \
n = (n + s) & a->mask; \
if (CCOMPARE(a->table[n].k,EMPTY)) return 0; \
SAFE(if (CCOMPARE(a->table[n].k, DEL)) continue;) \
if (VCOMPARE(a->table[n].k,k)) break; \
} \
} \
DISPOSE(a->table[n].k); \
a->table[n].k = DEL; \
--a->count; \
++a->deleted; \
if (v != NULL) \
*v = a->table[n].v; \
if (a->count < a->shrink_threshhold) \
STB_(N, rehash)(a, a->limit >> 1); \
else if (a->deleted > a->delete_threshhold) \
STB_(N, rehash)(a, a->limit); \
return 1; \
} \
\
PREFIX TYPE * STB__(NC, copy)(TYPE *a) \
{ \
int i; \
TYPE *h = (TYPE *) malloc(sizeof(*h)); \
if (!h) return NULL; \
if (!STB__(N, init)(h, a->limit)) { free(h); return NULL; } \
h->count = a->count; \
h->deleted = a->deleted; \
h->alloced = 1; \
h->ev = a->ev; h->dv = a->dv; \
h->has_empty = a->has_empty; h->has_del = a->has_del; \
memcpy(h->table, a->table, h->limit * sizeof(h->table[0])); \
for (i=0; i < a->limit; ++i) \
if (!CCOMPARE(h->table[i].k,EMPTY) && !CCOMPARE(h->table[i].k,DEL)) \
h->table[i].k = COPY(h->table[i].k); \
return h; \
} \
\
static void STB_(N, rehash)(TYPE *a, int count) \
{ \
int i; \
TYPE b; \
STB__(N, init)(&b, count); \
for (i=0; i < a->limit; ++i) \
if (!CCOMPARE(a->table[i].k,EMPTY) && !CCOMPARE(a->table[i].k,DEL)) \
STB_(N,addset)(&b, a->table[i].k, a->table[i].v,1,1,0); \
free(a->table); \
a->table = b.table; \
a->mask = b.mask; \
a->count = b.count; \
a->limit = b.limit; \
a->deleted = b.deleted; \
a->delete_threshhold = b.delete_threshhold; \
a->grow_threshhold = b.grow_threshhold; \
a->shrink_threshhold = b.shrink_threshhold; \
}
#define STB_equal(a,b) ((a) == (b))
#define stb_define_hash(TYPE,N,KEY,EMPTY,DEL,HASH,VALUE) \
stb_define_hash_base(STB_noprefix, TYPE,STB_nofields,N,NC,0.85f, \
KEY,EMPTY,DEL,STB_nocopy,STB_nodelete,STB_nosafe, \
STB_equal,STB_equal,HASH, \
VALUE,STB_nonullvalue,0)
#define stb_define_hash_vnull(TYPE,N,KEY,EMPTY,DEL,HASH,VALUE,VNULL) \
stb_define_hash_base(STB_noprefix, TYPE,STB_nofields,N,NC,0.85f, \
KEY,EMPTY,DEL,STB_nocopy,STB_nodelete,STB_nosafe, \
STB_equal,STB_equal,HASH, \
VALUE,STB_nullvalue,VNULL)
//////////////////////////////////////////////////////////////////////////////
//
// stb_ptrmap
//
// An stb_ptrmap data structure is an O(1) hash table between pointers. One
// application is to let you store "extra" data associated with pointers,
// which is why it was originally called stb_extra.
stb_declare_hash(STB_EXTERN, stb_ptrmap, stb_ptrmap_, void *, void *)
stb_declare_hash(STB_EXTERN, stb_idict, stb_idict_, stb_int32, stb_int32)
stb_declare_hash(STB_EXTERN, stb_uidict, stbi_uidict_, stb_uint32, stb_uint32)
STB_EXTERN void stb_ptrmap_delete(stb_ptrmap *e, void (*free_func)(void *));
STB_EXTERN stb_ptrmap *stb_ptrmap_new(void);
STB_EXTERN stb_idict * stb_idict_new_size(int size);
STB_EXTERN void stb_idict_remove_all(stb_idict *e);
STB_EXTERN void stb_uidict_reset(stb_uidict *e);
#ifdef STB_DEFINE
#define STB_EMPTY ((void *) 2)
#define STB_EDEL ((void *) 6)
stb_define_hash_base(STB_noprefix,stb_ptrmap, STB_nofields, stb_ptrmap_,stb_ptrmap_,0.85f,
void *,STB_EMPTY,STB_EDEL,STB_nocopy,STB_nodelete,STB_nosafe,
STB_equal,STB_equal,return stb_hashptr(k);,
void *,STB_nullvalue,NULL)
stb_ptrmap *stb_ptrmap_new(void)
{
return stb_ptrmap_create();
}
void stb_ptrmap_delete(stb_ptrmap *e, void (*free_func)(void *))
{
int i;
if (free_func)
for (i=0; i < e->limit; ++i)
if (e->table[i].k != STB_EMPTY && e->table[i].k != STB_EDEL) {
if (free_func == free)
free(e->table[i].v); // allow STB_MALLOC_WRAPPER to operate
else
free_func(e->table[i].v);
}
stb_ptrmap_destroy(e);
}
// extra fields needed for stua_dict
#define STB_IEMPTY ((int) 1)
#define STB_IDEL ((int) 3)
stb_define_hash_base(STB_noprefix, stb_idict, short type; short gc; STB_nofields, stb_idict_,stb_idict_,0.95f,
stb_int32,STB_IEMPTY,STB_IDEL,STB_nocopy,STB_nodelete,STB_nosafe,
STB_equal,STB_equal,
return stb_rehash_improved(k);,stb_int32,STB_nonullvalue,0)
stb_idict * stb_idict_new_size(int size)
{
stb_idict *e = (stb_idict *) malloc(sizeof(*e));
if (e) {
if (!stb_is_pow2(size))
size = 1 << stb_log2_ceil(size);
stb_idict_init(e, size);
e->alloced = 1;
}
return e;
}
void stb_idict_remove_all(stb_idict *e)
{
int n;
for (n=0; n < e->limit; ++n)
e->table[n].k = STB_IEMPTY;
e->has_empty = e->has_del = 0;
e->count = 0;
e->deleted = 0;
}
stb_define_hash_base(STB_noprefix, stb_uidict, STB_nofields, stb_uidict_,stb_uidict_,0.85f,
stb_int32,0xffffffff,0xfffffffe,STB_nocopy,STB_nodelete,STB_nosafe,
STB_equal,STB_equal,
return stb_rehash_improved(k);,stb_uint32,STB_nonullvalue,0)
void stb_uidict_reset(stb_uidict *e)
{
int n;
for (n=0; n < e->limit; ++n)
e->table[n].k = 0xffffffff;
e->has_empty = e->has_del = 0;
e->count = 0;
e->deleted = 0;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// stb_sparse_ptr_matrix
//
// An stb_ptrmap data structure is an O(1) hash table storing an arbitrary
// block of data for a given pair of pointers.
//
// If create=0, returns
typedef struct stb__st_stb_spmatrix stb_spmatrix;
STB_EXTERN stb_spmatrix * stb_sparse_ptr_matrix_new(int val_size);
STB_EXTERN void stb_sparse_ptr_matrix_free(stb_spmatrix *z);
STB_EXTERN void * stb_sparse_ptr_matrix_get(stb_spmatrix *z, void *a, void *b, int create);
#ifdef STB_DEFINE
typedef struct
{
void *a;
void *b;
} stb__ptrpair;
static stb__ptrpair stb__ptrpair_empty = { (void *) 1, (void *) 1 };
static stb__ptrpair stb__ptrpair_del = { (void *) 2, (void *) 2 };
#define STB__equal_ptrpair(x,y) ((x).a == (y).a && (x).b == (y).b)
stb_define_hash_base(STB_noprefix, stb_spmatrix, int val_size; void *arena;, stb__spmatrix_,stb__spmatrix_, 0.85,
stb__ptrpair, stb__ptrpair_empty, stb__ptrpair_del,
STB_nocopy, STB_nodelete, STB_nosafe,
STB__equal_ptrpair, STB__equal_ptrpair, return stb_rehash(stb_hashptr(k.a))+stb_hashptr(k.b);,
void *, STB_nullvalue, 0)
stb_spmatrix *stb_sparse_ptr_matrix_new(int val_size)
{
stb_spmatrix *m = stb__spmatrix_create();
if (m) m->val_size = val_size;
if (m) m->arena = stb_malloc_global(1);
return m;
}
void stb_sparse_ptr_matrix_free(stb_spmatrix *z)
{
if (z->arena) stb_free(z->arena);
stb__spmatrix_destroy(z);
}
void *stb_sparse_ptr_matrix_get(stb_spmatrix *z, void *a, void *b, int create)
{
stb__ptrpair t = { a,b };
void *data = stb__spmatrix_get(z, t);
if (!data && create) {
data = stb_malloc_raw(z->arena, z->val_size);
if (!data) return NULL;
memset(data, 0, z->val_size);
stb__spmatrix_add(z, t, data);
}
return data;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// SDICT: Hash Table for Strings (symbol table)
//
// if "use_arena=1", then strings will be copied
// into blocks and never freed until the sdict is freed;
// otherwise they're malloc()ed and free()d on the fly.
// (specify use_arena=1 if you never stb_sdict_remove)
stb_declare_hash(STB_EXTERN, stb_sdict, stb_sdict_, char *, void *)
STB_EXTERN stb_sdict * stb_sdict_new(int use_arena);
STB_EXTERN stb_sdict * stb_sdict_copy(stb_sdict*);
STB_EXTERN void stb_sdict_delete(stb_sdict *);
STB_EXTERN void * stb_sdict_change(stb_sdict *, char *str, void *p);
STB_EXTERN int stb_sdict_count(stb_sdict *d);
STB_EXTERN int stb_sdict_internal_limit(stb_sdict *d);
STB_EXTERN char * stb_sdict_internal_key(stb_sdict *d, int n);
STB_EXTERN void * stb_sdict_internal_value(stb_sdict *d, int n);
#define stb_sdict_for(d,i,q,z) \
for(i=0; i < stb_sdict_internal_limit(d) ? (q=stb_sdict_internal_key(d,i),z=stb_sdict_internal_value(d,i),1) : 0; ++i) \
if (q==NULL||q==(void *) 1);else // reversed makes macro friendly
#ifdef STB_DEFINE
// if in same translation unit, for speed, don't call accessors
#undef stb_sdict_for
#define stb_sdict_for(d,i,q,z) \
for(i=0; i < (d)->limit ? (q=(d)->table[i].k,z=(d)->table[i].v,1) : 0; ++i) \
if (q==NULL||q==(void *) 1);else // reversed makes macro friendly
#define STB_DEL ((void *) 1)
#define STB_SDEL ((char *) 1)
#define stb_sdict__copy(x) \
stb_p_strcpy_s(a->arena ? stb_malloc_string(a->arena, strlen(x)+1) \
: (char *) malloc(strlen(x)+1), strlen(x)+1, x)
#define stb_sdict__dispose(x) if (!a->arena) free(x)
stb_define_hash_base(STB_noprefix, stb_sdict, void*arena;, stb_sdict_,stb_sdictinternal_, 0.85f,
char *, NULL, STB_SDEL, stb_sdict__copy, stb_sdict__dispose,
STB_safecompare, !strcmp, STB_equal, return stb_hash(k);,
void *, STB_nullvalue, NULL)
int stb_sdict_count(stb_sdict *a)
{
return a->count;
}
int stb_sdict_internal_limit(stb_sdict *a)
{
return a->limit;
}
char* stb_sdict_internal_key(stb_sdict *a, int n)
{
return a->table[n].k;
}
void* stb_sdict_internal_value(stb_sdict *a, int n)
{
return a->table[n].v;
}
stb_sdict * stb_sdict_new(int use_arena)
{
stb_sdict *d = stb_sdict_create();
if (d == NULL) return NULL;
d->arena = use_arena ? stb_malloc_global(1) : NULL;
return d;
}
stb_sdict* stb_sdict_copy(stb_sdict *old)
{
stb_sdict *n;
void *old_arena = old->arena;
void *new_arena = old_arena ? stb_malloc_global(1) : NULL;
old->arena = new_arena;
n = stb_sdictinternal_copy(old);
old->arena = old_arena;
if (n)
n->arena = new_arena;
else if (new_arena)
stb_free(new_arena);
return n;
}
void stb_sdict_delete(stb_sdict *d)
{
if (d->arena)
stb_free(d->arena);
stb_sdict_destroy(d);
}
void * stb_sdict_change(stb_sdict *d, char *str, void *p)
{
void *q = stb_sdict_get(d, str);
stb_sdict_set(d, str, p);
return q;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Instantiated data structures
//
// This is an attempt to implement a templated data structure.
// What you do is define a struct foo, and then include several
// pointer fields to struct foo in your struct. Then you call
// the instantiator, which creates the functions that implement
// the data structure. This requires massive undebuggable #defines,
// so we limit the cases where we do this.
//
// AA tree is an encoding of a 2-3 tree whereas RB trees encode a 2-3-4 tree;
// much simpler code due to fewer cases.
#define stb__bst_parent(x) x
#define stb__bst_noparent(x)
#define stb_bst_fields(N) \
*STB_(N,left), *STB_(N,right); \
unsigned char STB_(N,level)
#define stb_bst_fields_parent(N) \
*STB_(N,left), *STB_(N,right), *STB_(N,parent); \
unsigned char STB_(N,level)
#define STB__level(N,x) ((x) ? (x)->STB_(N,level) : 0)
#define stb_bst_base(TYPE, N, TREE, M, compare, PAR) \
\
static int STB_(N,_compare)(TYPE *p, TYPE *q) \
{ \
compare \
} \
\
static void STB_(N,setleft)(TYPE *q, TYPE *v) \
{ \
q->STB_(N,left) = v; \
PAR(if (v) v->STB_(N,parent) = q;) \
} \
\
static void STB_(N,setright)(TYPE *q, TYPE *v) \
{ \
q->STB_(N,right) = v; \
PAR(if (v) v->STB_(N,parent) = q;) \
} \
\
static TYPE *STB_(N,skew)(TYPE *q) \
{ \
if (q == NULL) return q; \
if (q->STB_(N,left) \
&& q->STB_(N,left)->STB_(N,level) == q->STB_(N,level)) { \
TYPE *p = q->STB_(N,left); \
STB_(N,setleft)(q, p->STB_(N,right)); \
STB_(N,setright)(p, q); \
return p; \
} \
return q; \
} \
\
static TYPE *STB_(N,split)(TYPE *p) \
{ \
TYPE *q = p->STB_(N,right); \
if (q && q->STB_(N,right) \
&& q->STB_(N,right)->STB_(N,level) == p->STB_(N,level)) { \
STB_(N,setright)(p, q->STB_(N,left)); \
STB_(N,setleft)(q,p); \
++q->STB_(N,level); \
return q; \
} \
return p; \
} \
\
TYPE *STB__(N,insert)(TYPE *tree, TYPE *item) \
{ \
int c; \
if (tree == NULL) { \
item->STB_(N,left) = NULL; \
item->STB_(N,right) = NULL; \
item->STB_(N,level) = 1; \
PAR(item->STB_(N,parent) = NULL;) \
return item; \
} \
c = STB_(N,_compare)(item,tree); \
if (c == 0) { \
if (item != tree) { \
STB_(N,setleft)(item, tree->STB_(N,left)); \
STB_(N,setright)(item, tree->STB_(N,right)); \
item->STB_(N,level) = tree->STB_(N,level); \
PAR(item->STB_(N,parent) = NULL;) \
} \
return item; \
} \
if (c < 0) \
STB_(N,setleft )(tree, STB__(N,insert)(tree->STB_(N,left), item)); \
else \
STB_(N,setright)(tree, STB__(N,insert)(tree->STB_(N,right), item)); \
tree = STB_(N,skew)(tree); \
tree = STB_(N,split)(tree); \
PAR(tree->STB_(N,parent) = NULL;) \
return tree; \
} \
\
TYPE *STB__(N,remove)(TYPE *tree, TYPE *item) \
{ \
static TYPE *delnode, *leaf, *restore; \
if (tree == NULL) return NULL; \
leaf = tree; \
if (STB_(N,_compare)(item, tree) < 0) { \
STB_(N,setleft)(tree, STB__(N,remove)(tree->STB_(N,left), item)); \
} else { \
TYPE *r; \
delnode = tree; \
r = STB__(N,remove)(tree->STB_(N,right), item); \
/* maybe move 'leaf' up to this location */ \
if (restore == tree) { tree = leaf; leaf = restore = NULL; } \
STB_(N,setright)(tree,r); \
assert(tree->STB_(N,right) != tree); \
} \
if (tree == leaf) { \
if (delnode == item) { \
tree = tree->STB_(N,right); \
assert(leaf->STB_(N,left) == NULL); \
/* move leaf (the right sibling) up to delnode */ \
STB_(N,setleft )(leaf, item->STB_(N,left )); \
STB_(N,setright)(leaf, item->STB_(N,right)); \
leaf->STB_(N,level) = item->STB_(N,level); \
if (leaf != item) \
restore = delnode; \
} \
delnode = NULL; \
} else { \
if (STB__level(N,tree->STB_(N,left) ) < tree->STB_(N,level)-1 || \
STB__level(N,tree->STB_(N,right)) < tree->STB_(N,level)-1) { \
--tree->STB_(N,level); \
if (STB__level(N,tree->STB_(N,right)) > tree->STB_(N,level)) \
tree->STB_(N,right)->STB_(N,level) = tree->STB_(N,level); \
tree = STB_(N,skew)(tree); \
STB_(N,setright)(tree, STB_(N,skew)(tree->STB_(N,right))); \
if (tree->STB_(N,right)) \
STB_(N,setright)(tree->STB_(N,right), \
STB_(N,skew)(tree->STB_(N,right)->STB_(N,right))); \
tree = STB_(N,split)(tree); \
if (tree->STB_(N,right)) \
STB_(N,setright)(tree, STB_(N,split)(tree->STB_(N,right))); \
} \
} \
PAR(if (tree) tree->STB_(N,parent) = NULL;) \
return tree; \
} \
\
TYPE *STB__(N,last)(TYPE *tree) \
{ \
if (tree) \
while (tree->STB_(N,right)) tree = tree->STB_(N,right); \
return tree; \
} \
\
TYPE *STB__(N,first)(TYPE *tree) \
{ \
if (tree) \
while (tree->STB_(N,left)) tree = tree->STB_(N,left); \
return tree; \
} \
\
TYPE *STB__(N,next)(TYPE *tree, TYPE *item) \
{ \
TYPE *next = NULL; \
if (item->STB_(N,right)) \
return STB__(N,first)(item->STB_(N,right)); \
PAR( \
while(item->STB_(N,parent)) { \
TYPE *up = item->STB_(N,parent); \
if (up->STB_(N,left) == item) return up; \
item = up; \
} \
return NULL; \
) \
while (tree != item) { \
if (STB_(N,_compare)(item, tree) < 0) { \
next = tree; \
tree = tree->STB_(N,left); \
} else { \
tree = tree->STB_(N,right); \
} \
} \
return next; \
} \
\
TYPE *STB__(N,prev)(TYPE *tree, TYPE *item) \
{ \
TYPE *next = NULL; \
if (item->STB_(N,left)) \
return STB__(N,last)(item->STB_(N,left)); \
PAR( \
while(item->STB_(N,parent)) { \
TYPE *up = item->STB_(N,parent); \
if (up->STB_(N,right) == item) return up; \
item = up; \
} \
return NULL; \
) \
while (tree != item) { \
if (STB_(N,_compare)(item, tree) < 0) { \
tree = tree->STB_(N,left); \
} else { \
next = tree; \
tree = tree->STB_(N,right); \
} \
} \
return next; \
} \
\
STB__DEBUG( \
void STB__(N,_validate)(TYPE *tree, int root) \
{ \
if (tree == NULL) return; \
PAR(if(root) assert(tree->STB_(N,parent) == NULL);) \
assert(STB__level(N,tree->STB_(N,left) ) == tree->STB_(N,level)-1); \
assert(STB__level(N,tree->STB_(N,right)) <= tree->STB_(N,level)); \
assert(STB__level(N,tree->STB_(N,right)) >= tree->STB_(N,level)-1); \
if (tree->STB_(N,right)) { \
assert(STB__level(N,tree->STB_(N,right)->STB_(N,right)) \
!= tree->STB_(N,level)); \
PAR(assert(tree->STB_(N,right)->STB_(N,parent) == tree);) \
} \
PAR(if(tree->STB_(N,left)) assert(tree->STB_(N,left)->STB_(N,parent) == tree);) \
STB__(N,_validate)(tree->STB_(N,left) ,0); \
STB__(N,_validate)(tree->STB_(N,right),0); \
} \
) \
\
typedef struct \
{ \
TYPE *root; \
} TREE; \
\
void STB__(M,Insert)(TREE *tree, TYPE *item) \
{ tree->root = STB__(N,insert)(tree->root, item); } \
void STB__(M,Remove)(TREE *tree, TYPE *item) \
{ tree->root = STB__(N,remove)(tree->root, item); } \
TYPE *STB__(M,Next)(TREE *tree, TYPE *item) \
{ return STB__(N,next)(tree->root, item); } \
TYPE *STB__(M,Prev)(TREE *tree, TYPE *item) \
{ return STB__(N,prev)(tree->root, item); } \
TYPE *STB__(M,First)(TREE *tree) { return STB__(N,first)(tree->root); } \
TYPE *STB__(M,Last) (TREE *tree) { return STB__(N,last) (tree->root); } \
void STB__(M,Init)(TREE *tree) { tree->root = NULL; }
#define stb_bst_find(N,tree,fcompare) \
{ \
int c; \
while (tree != NULL) { \
fcompare \
if (c == 0) return tree; \
if (c < 0) tree = tree->STB_(N,left); \
else tree = tree->STB_(N,right); \
} \
return NULL; \
}
#define stb_bst_raw(TYPE,N,TREE,M,vfield,VTYPE,compare,PAR) \
stb_bst_base(TYPE,N,TREE,M, \
VTYPE a = p->vfield; VTYPE b = q->vfield; return (compare);, PAR ) \
\
TYPE *STB__(N,find)(TYPE *tree, VTYPE a) \
stb_bst_find(N,tree,VTYPE b = tree->vfield; c = (compare);) \
TYPE *STB__(M,Find)(TREE *tree, VTYPE a) \
{ return STB__(N,find)(tree->root, a); }
#define stb_bst(TYPE,N,TREE,M,vfield,VTYPE,compare) \
stb_bst_raw(TYPE,N,TREE,M,vfield,VTYPE,compare,stb__bst_noparent)
#define stb_bst_parent(TYPE,N,TREE,M,vfield,VTYPE,compare) \
stb_bst_raw(TYPE,N,TREE,M,vfield,VTYPE,compare,stb__bst_parent)
//////////////////////////////////////////////////////////////////////////////
//
// Pointer Nulling
//
// This lets you automatically NULL dangling pointers to "registered"
// objects. Note that you have to make sure you call the appropriate
// functions when you free or realloc blocks of memory that contain
// pointers or pointer targets. stb.h can automatically do this for
// stb_arr, or for all frees/reallocs if it's wrapping them.
//
#ifdef STB_NPTR
STB_EXTERN void stb_nptr_set(void *address_of_pointer, void *value_to_write);
STB_EXTERN void stb_nptr_didset(void *address_of_pointer);
STB_EXTERN void stb_nptr_didfree(void *address_being_freed, int len);
STB_EXTERN void stb_nptr_free(void *address_being_freed, int len);
STB_EXTERN void stb_nptr_didrealloc(void *new_address, void *old_address, int len);
STB_EXTERN void stb_nptr_recache(void); // recache all known pointers
// do this after pointer sets outside your control, slow
#ifdef STB_DEFINE
// for fast updating on free/realloc, we need to be able to find
// all the objects (pointers and targets) within a given block;
// this precludes hashing
// we use a three-level hierarchy of memory to minimize storage:
// level 1: 65536 pointers to stb__memory_node (always uses 256 KB)
// level 2: each stb__memory_node represents a 64K block of memory
// with 256 stb__memory_leafs (worst case 64MB)
// level 3: each stb__memory_leaf represents 256 bytes of memory
// using a list of target locations and a list of pointers
// (which are hopefully fairly short normally!)
// this approach won't work in 64-bit, which has a much larger address
// space. need to redesign
#define STB__NPTR_ROOT_LOG2 16
#define STB__NPTR_ROOT_NUM (1 << STB__NPTR_ROOT_LOG2)
#define STB__NPTR_ROOT_SHIFT (32 - STB__NPTR_ROOT_LOG2)
#define STB__NPTR_NODE_LOG2 5
#define STB__NPTR_NODE_NUM (1 << STB__NPTR_NODE_LOG2)
#define STB__NPTR_NODE_MASK (STB__NPTR_NODE_NUM-1)
#define STB__NPTR_NODE_SHIFT (STB__NPTR_ROOT_SHIFT - STB__NPTR_NODE_LOG2)
#define STB__NPTR_NODE_OFFSET(x) (((x) >> STB__NPTR_NODE_SHIFT) & STB__NPTR_NODE_MASK)
typedef struct stb__st_nptr
{
void *ptr; // address of actual pointer
struct stb__st_nptr *next; // next pointer with same target
struct stb__st_nptr **prev; // prev pointer with same target, address of 'next' field (or first)
struct stb__st_nptr *next_in_block;
} stb__nptr;
typedef struct stb__st_nptr_target
{
void *ptr; // address of target
stb__nptr *first; // address of first nptr pointing to this
struct stb__st_nptr_target *next_in_block;
} stb__nptr_target;
typedef struct
{
stb__nptr *pointers;
stb__nptr_target *targets;
} stb__memory_leaf;
typedef struct
{
stb__memory_leaf *children[STB__NPTR_NODE_NUM];
} stb__memory_node;
stb__memory_node *stb__memtab_root[STB__NPTR_ROOT_NUM];
static stb__memory_leaf *stb__nptr_find_leaf(void *mem)
{
stb_uint32 address = (stb_uint32) mem;
stb__memory_node *z = stb__memtab_root[address >> STB__NPTR_ROOT_SHIFT];
if (z)
return z->children[STB__NPTR_NODE_OFFSET(address)];
else
return NULL;
}
static void * stb__nptr_alloc(int size)
{
return stb__realloc_raw(0,size);
}
static void stb__nptr_free(void *p)
{
stb__realloc_raw(p,0);
}
static stb__memory_leaf *stb__nptr_make_leaf(void *mem)
{
stb_uint32 address = (stb_uint32) mem;
stb__memory_node *z = stb__memtab_root[address >> STB__NPTR_ROOT_SHIFT];
stb__memory_leaf *f;
if (!z) {
int i;
z = (stb__memory_node *) stb__nptr_alloc(sizeof(*stb__memtab_root[0]));
stb__memtab_root[address >> STB__NPTR_ROOT_SHIFT] = z;
for (i=0; i < 256; ++i)
z->children[i] = 0;
}
f = (stb__memory_leaf *) stb__nptr_alloc(sizeof(*f));
z->children[STB__NPTR_NODE_OFFSET(address)] = f;
f->pointers = NULL;
f->targets = NULL;
return f;
}
static stb__nptr_target *stb__nptr_find_target(void *target, int force)
{
stb__memory_leaf *p = stb__nptr_find_leaf(target);
if (p) {
stb__nptr_target *t = p->targets;
while (t) {
if (t->ptr == target)
return t;
t = t->next_in_block;
}
}
if (force) {
stb__nptr_target *t = (stb__nptr_target*) stb__nptr_alloc(sizeof(*t));
if (!p) p = stb__nptr_make_leaf(target);
t->ptr = target;
t->first = NULL;
t->next_in_block = p->targets;
p->targets = t;
return t;
} else
return NULL;
}
static stb__nptr *stb__nptr_find_pointer(void *ptr, int force)
{
stb__memory_leaf *p = stb__nptr_find_leaf(ptr);
if (p) {
stb__nptr *t = p->pointers;
while (t) {
if (t->ptr == ptr)
return t;
t = t->next_in_block;
}
}
if (force) {
stb__nptr *t = (stb__nptr *) stb__nptr_alloc(sizeof(*t));
if (!p) p = stb__nptr_make_leaf(ptr);
t->ptr = ptr;
t->next = NULL;
t->prev = NULL;
t->next_in_block = p->pointers;
p->pointers = t;
return t;
} else
return NULL;
}
void stb_nptr_set(void *address_of_pointer, void *value_to_write)
{
if (*(void **)address_of_pointer != value_to_write) {
*(void **) address_of_pointer = value_to_write;
stb_nptr_didset(address_of_pointer);
}
}
void stb_nptr_didset(void *address_of_pointer)
{
// first unlink from old chain
void *new_address;
stb__nptr *p = stb__nptr_find_pointer(address_of_pointer, 1); // force building if doesn't exist
if (p->prev) { // if p->prev is NULL, we just built it, or it was NULL
*(p->prev) = p->next;
if (p->next) p->next->prev = p->prev;
}
// now add to new chain
new_address = *(void **)address_of_pointer;
if (new_address != NULL) {
stb__nptr_target *t = stb__nptr_find_target(new_address, 1);
p->next = t->first;
if (p->next) p->next->prev = &p->next;
p->prev = &t->first;
t->first = p;
} else {
p->prev = NULL;
p->next = NULL;
}
}
void stb__nptr_block(void *address, int len, void (*function)(stb__memory_leaf *f, int datum, void *start, void *end), int datum)
{
void *end_address = (void *) ((char *) address + len - 1);
stb__memory_node *n;
stb_uint32 start = (stb_uint32) address;
stb_uint32 end = start + len - 1;
int b0 = start >> STB__NPTR_ROOT_SHIFT;
int b1 = end >> STB__NPTR_ROOT_SHIFT;
int b=b0,i,e0,e1;
e0 = STB__NPTR_NODE_OFFSET(start);
if (datum <= 0) {
// first block
n = stb__memtab_root[b0];
if (n) {
if (b0 != b1)
e1 = STB__NPTR_NODE_NUM-1;
else
e1 = STB__NPTR_NODE_OFFSET(end);
for (i=e0; i <= e1; ++i)
if (n->children[i])
function(n->children[i], datum, address, end_address);
}
if (b1 > b0) {
// blocks other than the first and last block
for (b=b0+1; b < b1; ++b) {
n = stb__memtab_root[b];
if (n)
for (i=0; i <= STB__NPTR_NODE_NUM-1; ++i)
if (n->children[i])
function(n->children[i], datum, address, end_address);
}
// last block
n = stb__memtab_root[b1];
if (n) {
e1 = STB__NPTR_NODE_OFFSET(end);
for (i=0; i <= e1; ++i)
if (n->children[i])
function(n->children[i], datum, address, end_address);
}
}
} else {
if (b1 > b0) {
// last block
n = stb__memtab_root[b1];
if (n) {
e1 = STB__NPTR_NODE_OFFSET(end);
for (i=e1; i >= 0; --i)
if (n->children[i])
function(n->children[i], datum, address, end_address);
}
// blocks other than the first and last block
for (b=b1-1; b > b0; --b) {
n = stb__memtab_root[b];
if (n)
for (i=STB__NPTR_NODE_NUM-1; i >= 0; --i)
if (n->children[i])
function(n->children[i], datum, address, end_address);
}
}
// first block
n = stb__memtab_root[b0];
if (n) {
if (b0 != b1)
e1 = STB__NPTR_NODE_NUM-1;
else
e1 = STB__NPTR_NODE_OFFSET(end);
for (i=e1; i >= e0; --i)
if (n->children[i])
function(n->children[i], datum, address, end_address);
}
}
}
static void stb__nptr_delete_pointers(stb__memory_leaf *f, int offset, void *start, void *end)
{
stb__nptr **p = &f->pointers;
while (*p) {
stb__nptr *n = *p;
if (n->ptr >= start && n->ptr <= end) {
// unlink
if (n->prev) {
*(n->prev) = n->next;
if (n->next) n->next->prev = n->prev;
}
*p = n->next_in_block;
stb__nptr_free(n);
} else
p = &(n->next_in_block);
}
}
static void stb__nptr_delete_targets(stb__memory_leaf *f, int offset, void *start, void *end)
{
stb__nptr_target **p = &f->targets;
while (*p) {
stb__nptr_target *n = *p;
if (n->ptr >= start && n->ptr <= end) {
// null pointers
stb__nptr *z = n->first;
while (z) {
stb__nptr *y = z->next;
z->prev = NULL;
z->next = NULL;
*(void **) z->ptr = NULL;
z = y;
}
// unlink this target
*p = n->next_in_block;
stb__nptr_free(n);
} else
p = &(n->next_in_block);
}
}
void stb_nptr_didfree(void *address_being_freed, int len)
{
// step one: delete all pointers in this block
stb__nptr_block(address_being_freed, len, stb__nptr_delete_pointers, 0);
// step two: NULL all pointers to this block; do this second to avoid NULLing deleted pointers
stb__nptr_block(address_being_freed, len, stb__nptr_delete_targets, 0);
}
void stb_nptr_free(void *address_being_freed, int len)
{
free(address_being_freed);
stb_nptr_didfree(address_being_freed, len);
}
static void stb__nptr_move_targets(stb__memory_leaf *f, int offset, void *start, void *end)
{
stb__nptr_target **t = &f->targets;
while (*t) {
stb__nptr_target *n = *t;
if (n->ptr >= start && n->ptr <= end) {
stb__nptr *z;
stb__memory_leaf *f;
// unlink n
*t = n->next_in_block;
// update n to new address
n->ptr = (void *) ((char *) n->ptr + offset);
f = stb__nptr_find_leaf(n->ptr);
if (!f) f = stb__nptr_make_leaf(n->ptr);
n->next_in_block = f->targets;
f->targets = n;
// now go through all pointers and make them point here
z = n->first;
while (z) {
*(void**) z->ptr = n->ptr;
z = z->next;
}
} else
t = &(n->next_in_block);
}
}
static void stb__nptr_move_pointers(stb__memory_leaf *f, int offset, void *start, void *end)
{
stb__nptr **p = &f->pointers;
while (*p) {
stb__nptr *n = *p;
if (n->ptr >= start && n->ptr <= end) {
// unlink
*p = n->next_in_block;
n->ptr = (void *) ((int) n->ptr + offset);
// move to new block
f = stb__nptr_find_leaf(n->ptr);
if (!f) f = stb__nptr_make_leaf(n->ptr);
n->next_in_block = f->pointers;
f->pointers = n;
} else
p = &(n->next_in_block);
}
}
void stb_nptr_realloc(void *new_address, void *old_address, int len)
{
if (new_address == old_address) return;
// have to move the pointers first, because moving the targets
// requires writing to the pointers-to-the-targets, and if some of those moved too,
// we need to make sure we don't write to the old memory
// step one: move all pointers within the block
stb__nptr_block(old_address, len, stb__nptr_move_pointers, (char *) new_address - (char *) old_address);
// step two: move all targets within the block
stb__nptr_block(old_address, len, stb__nptr_move_targets, (char *) new_address - (char *) old_address);
}
void stb_nptr_move(void *new_address, void *old_address)
{
stb_nptr_realloc(new_address, old_address, 1);
}
void stb_nptr_recache(void)
{
int i,j;
for (i=0; i < STB__NPTR_ROOT_NUM; ++i)
if (stb__memtab_root[i])
for (j=0; j < STB__NPTR_NODE_NUM; ++j)
if (stb__memtab_root[i]->children[j]) {
stb__nptr *p = stb__memtab_root[i]->children[j]->pointers;
while (p) {
stb_nptr_didset(p->ptr);
p = p->next_in_block;
}
}
}
#endif // STB_DEFINE
#endif // STB_NPTR
//////////////////////////////////////////////////////////////////////////////
//
// File Processing
//
#ifdef _WIN32
#define stb_rename(x,y) _wrename((const wchar_t *)stb__from_utf8(x), (const wchar_t *)stb__from_utf8_alt(y))
#else
#define stb_rename rename
#endif
STB_EXTERN void stb_fput_varlen64(FILE *f, stb_uint64 v);
STB_EXTERN stb_uint64 stb_fget_varlen64(FILE *f);
STB_EXTERN int stb_size_varlen64(stb_uint64 v);
#define stb_filec (char *) stb_file
#define stb_fileu (unsigned char *) stb_file
STB_EXTERN void * stb_file(char *filename, size_t *length);
STB_EXTERN void * stb_file_max(char *filename, size_t *length);
STB_EXTERN size_t stb_filelen(FILE *f);
STB_EXTERN int stb_filewrite(char *filename, void *data, size_t length);
STB_EXTERN int stb_filewritestr(char *filename, char *data);
STB_EXTERN char ** stb_stringfile(char *filename, int *len);
STB_EXTERN char ** stb_stringfile_trimmed(char *name, int *len, char comm);
STB_EXTERN char * stb_fgets(char *buffer, int buflen, FILE *f);
STB_EXTERN char * stb_fgets_malloc(FILE *f);
STB_EXTERN int stb_fexists(char *filename);
STB_EXTERN int stb_fcmp(char *s1, char *s2);
STB_EXTERN int stb_feq(char *s1, char *s2);
STB_EXTERN time_t stb_ftimestamp(char *filename);
STB_EXTERN int stb_fullpath(char *abs, int abs_size, char *rel);
STB_EXTERN FILE * stb_fopen(char *filename, const char *mode);
STB_EXTERN int stb_fclose(FILE *f, int keep);
enum
{
stb_keep_no = 0,
stb_keep_yes = 1,
stb_keep_if_different = 2,
};
STB_EXTERN int stb_copyfile(char *src, char *dest);
STB_EXTERN void stb_fput_varlen64(FILE *f, stb_uint64 v);
STB_EXTERN stb_uint64 stb_fget_varlen64(FILE *f);
STB_EXTERN int stb_size_varlen64(stb_uint64 v);
STB_EXTERN void stb_fwrite32(FILE *f, stb_uint32 datum);
STB_EXTERN void stb_fput_varlen (FILE *f, int v);
STB_EXTERN void stb_fput_varlenu(FILE *f, unsigned int v);
STB_EXTERN int stb_fget_varlen (FILE *f);
STB_EXTERN stb_uint stb_fget_varlenu(FILE *f);
STB_EXTERN void stb_fput_ranged (FILE *f, int v, int b, stb_uint n);
STB_EXTERN int stb_fget_ranged (FILE *f, int b, stb_uint n);
STB_EXTERN int stb_size_varlen (int v);
STB_EXTERN int stb_size_varlenu(unsigned int v);
STB_EXTERN int stb_size_ranged (int b, stb_uint n);
STB_EXTERN int stb_fread(void *data, size_t len, size_t count, void *f);
STB_EXTERN int stb_fwrite(void *data, size_t len, size_t count, void *f);
#if 0
typedef struct
{
FILE *base_file;
char *buffer;
int buffer_size;
int buffer_off;
int buffer_left;
} STBF;
STB_EXTERN STBF *stb_tfopen(char *filename, char *mode);
STB_EXTERN int stb_tfread(void *data, size_t len, size_t count, STBF *f);
STB_EXTERN int stb_tfwrite(void *data, size_t len, size_t count, STBF *f);
#endif
#ifdef STB_DEFINE
#if 0
STBF *stb_tfopen(char *filename, char *mode)
{
STBF *z;
FILE *f = stb_p_fopen(filename, mode);
if (!f) return NULL;
z = (STBF *) malloc(sizeof(*z));
if (!z) { fclose(f); return NULL; }
z->base_file = f;
if (!strcmp(mode, "rb") || !strcmp(mode, "wb")) {
z->buffer_size = 4096;
z->buffer_off = z->buffer_size;
z->buffer_left = 0;
z->buffer = malloc(z->buffer_size);
if (!z->buffer) { free(z); fclose(f); return NULL; }
} else {
z->buffer = 0;
z->buffer_size = 0;
z->buffer_left = 0;
}
return z;
}
int stb_tfread(void *data, size_t len, size_t count, STBF *f)
{
int total = len*count, done=0;
if (!total) return 0;
if (total <= z->buffer_left) {
memcpy(data, z->buffer + z->buffer_off, total);
z->buffer_off += total;
z->buffer_left -= total;
return count;
} else {
char *out = (char *) data;
// consume all buffered data
memcpy(data, z->buffer + z->buffer_off, z->buffer_left);
done = z->buffer_left;
out += z->buffer_left;
z->buffer_left=0;
if (total-done > (z->buffer_size >> 1)) {
done += fread(out
}
}
}
#endif
void stb_fwrite32(FILE *f, stb_uint32 x)
{
fwrite(&x, 4, 1, f);
}
#if defined(_WIN32)
#define stb__stat _stat
#else
#define stb__stat stat
#endif
int stb_fexists(char *filename)
{
struct stb__stat buf;
return stb__windows(
_wstat((const wchar_t *)stb__from_utf8(filename), &buf),
stat(filename,&buf)
) == 0;
}
time_t stb_ftimestamp(char *filename)
{
struct stb__stat buf;
if (stb__windows(
_wstat((const wchar_t *)stb__from_utf8(filename), &buf),
stat(filename,&buf)
) == 0)
{
return buf.st_mtime;
} else {
return 0;
}
}
size_t stb_filelen(FILE *f)
{
long len, pos;
pos = ftell(f);
fseek(f, 0, SEEK_END);
len = ftell(f);
fseek(f, pos, SEEK_SET);
return (size_t) len;
}
void *stb_file(char *filename, size_t *length)
{
FILE *f = stb__fopen(filename, "rb");
char *buffer;
size_t len, len2;
if (!f) return NULL;
len = stb_filelen(f);
buffer = (char *) malloc(len+2); // nul + extra
len2 = fread(buffer, 1, len, f);
if (len2 == len) {
if (length) *length = len;
buffer[len] = 0;
} else {
free(buffer);
buffer = NULL;
}
fclose(f);
return buffer;
}
int stb_filewrite(char *filename, void *data, size_t length)
{
FILE *f = stb_fopen(filename, "wb");
if (f) {
unsigned char *data_ptr = (unsigned char *) data;
size_t remaining = length;
while (remaining > 0) {
size_t len2 = remaining > 65536 ? 65536 : remaining;
size_t len3 = fwrite(data_ptr, 1, len2, f);
if (len2 != len3) {
fprintf(stderr, "Failed while writing %s\n", filename);
break;
}
remaining -= len2;
data_ptr += len2;
}
stb_fclose(f, stb_keep_if_different);
}
return f != NULL;
}
int stb_filewritestr(char *filename, char *data)
{
return stb_filewrite(filename, data, strlen(data));
}
void * stb_file_max(char *filename, size_t *length)
{
FILE *f = stb__fopen(filename, "rb");
char *buffer;
size_t len, maxlen;
if (!f) return NULL;
maxlen = *length;
buffer = (char *) malloc(maxlen+1);
len = fread(buffer, 1, maxlen, f);
buffer[len] = 0;
fclose(f);
*length = len;
return buffer;
}
char ** stb_stringfile(char *filename, int *plen)
{
FILE *f = stb__fopen(filename, "rb");
char *buffer, **list=NULL, *s;
size_t len, count, i;
if (!f) return NULL;
len = stb_filelen(f);
buffer = (char *) malloc(len+1);
len = fread(buffer, 1, len, f);
buffer[len] = 0;
fclose(f);
// two passes through: first time count lines, second time set them
for (i=0; i < 2; ++i) {
s = buffer;
if (i == 1)
list[0] = s;
count = 1;
while (*s) {
if (*s == '\n' || *s == '\r') {
// detect if both cr & lf are together
int crlf = (s[0] + s[1]) == ('\n' + '\r');
if (i == 1) *s = 0;
if (crlf) ++s;
if (s[1]) { // it's not over yet
if (i == 1) list[count] = s+1;
++count;
}
}
++s;
}
if (i == 0) {
list = (char **) malloc(sizeof(*list) * (count+1) + len+1);
if (!list) return NULL;
list[count] = 0;
// recopy the file so there's just a single allocation to free
memcpy(&list[count+1], buffer, len+1);
free(buffer);
buffer = (char *) &list[count+1];
if (plen) *plen = (int) count;
}
}
return list;
}
char ** stb_stringfile_trimmed(char *name, int *len, char comment)
{
int i,n,o=0;
char **s = stb_stringfile(name, &n);
if (s == NULL) return NULL;
for (i=0; i < n; ++i) {
char *p = stb_skipwhite(s[i]);
if (*p && *p != comment)
s[o++] = p;
}
s[o] = NULL;
if (len) *len = o;
return s;
}
char * stb_fgets(char *buffer, int buflen, FILE *f)
{
char *p;
buffer[0] = 0;
p = fgets(buffer, buflen, f);
if (p) {
int n = (int) (strlen(p)-1);
if (n >= 0)
if (p[n] == '\n')
p[n] = 0;
}
return p;
}
char * stb_fgets_malloc(FILE *f)
{
// avoid reallocing for small strings
char quick_buffer[800];
quick_buffer[sizeof(quick_buffer)-2] = 0;
if (!fgets(quick_buffer, sizeof(quick_buffer), f))
return NULL;
if (quick_buffer[sizeof(quick_buffer)-2] == 0) {
size_t n = strlen(quick_buffer);
if (n > 0 && quick_buffer[n-1] == '\n')
quick_buffer[n-1] = 0;
return stb_p_strdup(quick_buffer);
} else {
char *p;
char *a = stb_p_strdup(quick_buffer);
size_t len = sizeof(quick_buffer)-1;
while (!feof(f)) {
if (a[len-1] == '\n') break;
a = (char *) realloc(a, len*2);
p = &a[len];
p[len-2] = 0;
if (!fgets(p, (int) len, f))
break;
if (p[len-2] == 0) {
len += strlen(p);
break;
}
len = len + (len-1);
}
if (a[len-1] == '\n')
a[len-1] = 0;
return a;
}
}
int stb_fullpath(char *abs, int abs_size, char *rel)
{
#ifdef _WIN32
return _fullpath(abs, rel, abs_size) != NULL;
#else
if (rel[0] == '/' || rel[0] == '~') {
if ((int) strlen(rel) >= abs_size)
return 0;
stb_p_strcpy_s(abs,65536,rel);
return STB_TRUE;
} else {
int n;
getcwd(abs, abs_size);
n = strlen(abs);
if (n+(int) strlen(rel)+2 <= abs_size) {
abs[n] = '/';
stb_p_strcpy_s(abs+n+1, 65536,rel);
return STB_TRUE;
} else {
return STB_FALSE;
}
}
#endif
}
static int stb_fcmp_core(FILE *f, FILE *g)
{
char buf1[1024],buf2[1024];
int n1,n2, res=0;
while (1) {
n1 = (int) fread(buf1, 1, sizeof(buf1), f);
n2 = (int) fread(buf2, 1, sizeof(buf2), g);
res = memcmp(buf1,buf2,stb_min(n1,n2));
if (res)
break;
if (n1 != n2) {
res = n1 < n2 ? -1 : 1;
break;
}
if (n1 == 0)
break;
}
fclose(f);
fclose(g);
return res;
}
int stb_fcmp(char *s1, char *s2)
{
FILE *f = stb__fopen(s1, "rb");
FILE *g = stb__fopen(s2, "rb");
if (f == NULL || g == NULL) {
if (f) fclose(f);
if (g) {
fclose(g);
return STB_TRUE;
}
return f != NULL;
}
return stb_fcmp_core(f,g);
}
int stb_feq(char *s1, char *s2)
{
FILE *f = stb__fopen(s1, "rb");
FILE *g = stb__fopen(s2, "rb");
if (f == NULL || g == NULL) {
if (f) fclose(f);
if (g) fclose(g);
return f == g;
}
// feq is faster because it shortcuts if they're different length
if (stb_filelen(f) != stb_filelen(g)) {
fclose(f);
fclose(g);
return 0;
}
return !stb_fcmp_core(f,g);
}
static stb_ptrmap *stb__files;
typedef struct
{
char *temp_name;
char *name;
int errors;
} stb__file_data;
static FILE *stb__open_temp_file(char *temp_name, char *src_name, const char *mode)
{
size_t p;
#ifdef _MSC_VER
int j;
#endif
FILE *f;
// try to generate a temporary file in the same directory
p = strlen(src_name)-1;
while (p > 0 && src_name[p] != '/' && src_name[p] != '\\'
&& src_name[p] != ':' && src_name[p] != '~')
--p;
++p;
memcpy(temp_name, src_name, p);
#ifdef _MSC_VER
// try multiple times to make a temp file... just in
// case some other process makes the name first
for (j=0; j < 32; ++j) {
stb_p_strcpy_s(temp_name+p, 65536, "stmpXXXXXX");
if (!stb_p_mktemp(temp_name))
return 0;
f = stb_p_fopen(temp_name, mode);
if (f != NULL)
break;
}
#else
{
stb_p_strcpy_s(temp_name+p, 65536, "stmpXXXXXX");
#ifdef __MINGW32__
int fd = open(stb_p_mktemp(temp_name), O_RDWR);
#else
int fd = mkstemp(temp_name);
#endif
if (fd == -1) return NULL;
f = fdopen(fd, mode);
if (f == NULL) {
unlink(temp_name);
close(fd);
return NULL;
}
}
#endif
return f;
}
FILE * stb_fopen(char *filename, const char *mode)
{
FILE *f;
char name_full[4096];
char temp_full[sizeof(name_full) + 12];
// @TODO: if the file doesn't exist, we can also use the fastpath here
if (mode[0] != 'w' && !strchr(mode, '+'))
return stb__fopen(filename, mode);
// save away the full path to the file so if the program
// changes the cwd everything still works right! unix has
// better ways to do this, but we have to work in windows
name_full[0] = '\0'; // stb_fullpath reads name_full[0]
if (stb_fullpath(name_full, sizeof(name_full), filename)==0)
return 0;
f = stb__open_temp_file(temp_full, name_full, mode);
if (f != NULL) {
stb__file_data *d = (stb__file_data *) malloc(sizeof(*d));
if (!d) { assert(0); /* NOTREACHED */fclose(f); return NULL; }
if (stb__files == NULL) stb__files = stb_ptrmap_create();
d->temp_name = stb_p_strdup(temp_full);
d->name = stb_p_strdup(name_full);
d->errors = 0;
stb_ptrmap_add(stb__files, f, d);
return f;
}
return NULL;
}
int stb_fclose(FILE *f, int keep)
{
stb__file_data *d;
int ok = STB_FALSE;
if (f == NULL) return 0;
if (ferror(f))
keep = stb_keep_no;
fclose(f);
if (stb__files && stb_ptrmap_remove(stb__files, f, (void **) &d)) {
if (stb__files->count == 0) {
stb_ptrmap_destroy(stb__files);
stb__files = NULL;
}
} else
return STB_TRUE; // not special
if (keep == stb_keep_if_different) {
// check if the files are identical
if (stb_feq(d->name, d->temp_name)) {
keep = stb_keep_no;
ok = STB_TRUE; // report success if no change
}
}
if (keep == stb_keep_no) {
remove(d->temp_name);
} else {
if (!stb_fexists(d->name)) {
// old file doesn't exist, so just move the new file over it
stb_rename(d->temp_name, d->name);
} else {
// don't delete the old file yet in case there are troubles! First rename it!
char preserved_old_file[4096];
// generate a temp filename in the same directory (also creates it, which we don't need)
FILE *dummy = stb__open_temp_file(preserved_old_file, d->name, "wb");
if (dummy != NULL) {
// we don't actually want the open file
fclose(dummy);
// discard what we just created
remove(preserved_old_file); // if this fails, there's nothing we can do, and following logic handles it as best as possible anyway
// move the existing file to the preserved name
if (0 != stb_rename(d->name, preserved_old_file)) { // 0 on success
// failed, state is:
// filename -> old file
// tempname -> new file
// keep tempname around so we don't lose data
} else {
// state is:
// preserved -> old file
// tempname -> new file
// move the new file to the old name
if (0 == stb_rename(d->temp_name, d->name)) {
// state is:
// preserved -> old file
// filename -> new file
ok = STB_TRUE;
// 'filename -> new file' has always been the goal, so clean up
remove(preserved_old_file); // nothing to be done if it fails
} else {
// couldn't rename, so try renaming preserved file back
// state is:
// preserved -> old file
// tempname -> new file
stb_rename(preserved_old_file, d->name);
// if the rename failed, there's nothing more we can do
}
}
} else {
// we couldn't get a temp filename. do this the naive way; the worst case failure here
// leaves the filename pointing to nothing and the new file as a tempfile
remove(d->name);
stb_rename(d->temp_name, d->name);
}
}
}
free(d->temp_name);
free(d->name);
free(d);
return ok;
}
int stb_copyfile(char *src, char *dest)
{
char raw_buffer[1024];
char *buffer;
int buf_size = 65536;
FILE *f, *g;
// if file already exists at destination, do nothing
if (stb_feq(src, dest)) return STB_TRUE;
// open file
f = stb__fopen(src, "rb");
if (f == NULL) return STB_FALSE;
// open file for writing
g = stb__fopen(dest, "wb");
if (g == NULL) {
fclose(f);
return STB_FALSE;
}
buffer = (char *) malloc(buf_size);
if (buffer == NULL) {
buffer = raw_buffer;
buf_size = sizeof(raw_buffer);
}
while (!feof(f)) {
size_t n = fread(buffer, 1, buf_size, f);
if (n != 0)
fwrite(buffer, 1, n, g);
}
fclose(f);
if (buffer != raw_buffer)
free(buffer);
fclose(g);
return STB_TRUE;
}
// varlen:
// v' = (v >> 31) + (v < 0 ? ~v : v)<<1; // small abs(v) => small v'
// output v as big endian v'+k for v' <= k:
// 1 byte : v' <= 0x00000080 ( -64 <= v < 64) 7 bits
// 2 bytes: v' <= 0x00004000 (-8192 <= v < 8192) 14 bits
// 3 bytes: v' <= 0x00200000 21 bits
// 4 bytes: v' <= 0x10000000 28 bits
// the number of most significant 1-bits in the first byte
// equals the number of bytes after the first
#define stb__varlen_xform(v) (v<0 ? (~v << 1)+1 : (v << 1))
int stb_size_varlen(int v) { return stb_size_varlenu(stb__varlen_xform(v)); }
int stb_size_varlenu(unsigned int v)
{
if (v < 0x00000080) return 1;
if (v < 0x00004000) return 2;
if (v < 0x00200000) return 3;
if (v < 0x10000000) return 4;
return 5;
}
void stb_fput_varlen(FILE *f, int v) { stb_fput_varlenu(f, stb__varlen_xform(v)); }
void stb_fput_varlenu(FILE *f, unsigned int z)
{
if (z >= 0x10000000) fputc(0xF0,f);
if (z >= 0x00200000) fputc((z < 0x10000000 ? 0xE0 : 0)+(z>>24),f);
if (z >= 0x00004000) fputc((z < 0x00200000 ? 0xC0 : 0)+(z>>16),f);
if (z >= 0x00000080) fputc((z < 0x00004000 ? 0x80 : 0)+(z>> 8),f);
fputc(z,f);
}
#define stb_fgetc(f) ((unsigned char) fgetc(f))
int stb_fget_varlen(FILE *f)
{
unsigned int z = stb_fget_varlenu(f);
return (z & 1) ? ~(z>>1) : (z>>1);
}
unsigned int stb_fget_varlenu(FILE *f)
{
unsigned int z;
unsigned char d;
d = stb_fgetc(f);
if (d >= 0x80) {
if (d >= 0xc0) {
if (d >= 0xe0) {
if (d == 0xf0) z = stb_fgetc(f) << 24;
else z = (d - 0xe0) << 24;
z += stb_fgetc(f) << 16;
}
else
z = (d - 0xc0) << 16;
z += stb_fgetc(f) << 8;
} else
z = (d - 0x80) << 8;
z += stb_fgetc(f);
} else
z = d;
return z;
}
stb_uint64 stb_fget_varlen64(FILE *f)
{
stb_uint64 z;
unsigned char d;
d = stb_fgetc(f);
if (d >= 0x80) {
if (d >= 0xc0) {
if (d >= 0xe0) {
if (d >= 0xf0) {
if (d >= 0xf8) {
if (d >= 0xfc) {
if (d >= 0xfe) {
if (d >= 0xff)
z = (stb_uint64) stb_fgetc(f) << 56;
else
z = (stb_uint64) (d - 0xfe) << 56;
z |= (stb_uint64) stb_fgetc(f) << 48;
} else z = (stb_uint64) (d - 0xfc) << 48;
z |= (stb_uint64) stb_fgetc(f) << 40;
} else z = (stb_uint64) (d - 0xf8) << 40;
z |= (stb_uint64) stb_fgetc(f) << 32;
} else z = (stb_uint64) (d - 0xf0) << 32;
z |= (stb_uint) stb_fgetc(f) << 24;
} else z = (stb_uint) (d - 0xe0) << 24;
z |= (stb_uint) stb_fgetc(f) << 16;
} else z = (stb_uint) (d - 0xc0) << 16;
z |= (stb_uint) stb_fgetc(f) << 8;
} else z = (stb_uint) (d - 0x80) << 8;
z |= stb_fgetc(f);
} else
z = d;
return (z & 1) ? ~(z >> 1) : (z >> 1);
}
int stb_size_varlen64(stb_uint64 v)
{
if (v < 0x00000080) return 1;
if (v < 0x00004000) return 2;
if (v < 0x00200000) return 3;
if (v < 0x10000000) return 4;
if (v < STB_IMM_UINT64(0x0000000800000000)) return 5;
if (v < STB_IMM_UINT64(0x0000040000000000)) return 6;
if (v < STB_IMM_UINT64(0x0002000000000000)) return 7;
if (v < STB_IMM_UINT64(0x0100000000000000)) return 8;
return 9;
}
void stb_fput_varlen64(FILE *f, stb_uint64 v)
{
stb_uint64 z = stb__varlen_xform(v);
int first=1;
if (z >= STB_IMM_UINT64(0x100000000000000)) {
fputc(0xff,f);
first=0;
}
if (z >= STB_IMM_UINT64(0x02000000000000)) fputc((first ? 0xFE : 0)+(char)(z>>56),f), first=0;
if (z >= STB_IMM_UINT64(0x00040000000000)) fputc((first ? 0xFC : 0)+(char)(z>>48),f), first=0;
if (z >= STB_IMM_UINT64(0x00000800000000)) fputc((first ? 0xF8 : 0)+(char)(z>>40),f), first=0;
if (z >= STB_IMM_UINT64(0x00000010000000)) fputc((first ? 0xF0 : 0)+(char)(z>>32),f), first=0;
if (z >= STB_IMM_UINT64(0x00000000200000)) fputc((first ? 0xE0 : 0)+(char)(z>>24),f), first=0;
if (z >= STB_IMM_UINT64(0x00000000004000)) fputc((first ? 0xC0 : 0)+(char)(z>>16),f), first=0;
if (z >= STB_IMM_UINT64(0x00000000000080)) fputc((first ? 0x80 : 0)+(char)(z>> 8),f), first=0;
fputc((char)z,f);
}
void stb_fput_ranged(FILE *f, int v, int b, stb_uint n)
{
v -= b;
if (n <= (1 << 31))
assert((stb_uint) v < n);
if (n > (1 << 24)) fputc(v >> 24, f);
if (n > (1 << 16)) fputc(v >> 16, f);
if (n > (1 << 8)) fputc(v >> 8, f);
fputc(v,f);
}
int stb_fget_ranged(FILE *f, int b, stb_uint n)
{
unsigned int v=0;
if (n > (1 << 24)) v += stb_fgetc(f) << 24;
if (n > (1 << 16)) v += stb_fgetc(f) << 16;
if (n > (1 << 8)) v += stb_fgetc(f) << 8;
v += stb_fgetc(f);
return b+v;
}
int stb_size_ranged(int b, stb_uint n)
{
if (n > (1 << 24)) return 4;
if (n > (1 << 16)) return 3;
if (n > (1 << 8)) return 2;
return 1;
}
void stb_fput_string(FILE *f, char *s)
{
size_t len = strlen(s);
stb_fput_varlenu(f, (unsigned int) len);
fwrite(s, 1, len, f);
}
// inverse of the above algorithm
char *stb_fget_string(FILE *f, void *p)
{
char *s;
int len = stb_fget_varlenu(f);
if (len > 4096) return NULL;
s = p ? stb_malloc_string(p, len+1) : (char *) malloc(len+1);
fread(s, 1, len, f);
s[len] = 0;
return s;
}
char *stb_strdup(char *str, void *pool)
{
size_t len = strlen(str);
char *p = stb_malloc_string(pool, len+1);
stb_p_strcpy_s(p, len+1, str);
return p;
}
// strip the trailing '/' or '\\' from a directory so we can refer to it
// as a file for _stat()
char *stb_strip_final_slash(char *t)
{
if (t[0]) {
char *z = t + strlen(t) - 1;
// *z is the last character
if (*z == '\\' || *z == '/')
if (z != t+2 || t[1] != ':') // but don't strip it if it's e.g. "c:/"
*z = 0;
if (*z == '\\')
*z = '/'; // canonicalize to make sure it matches db
}
return t;
}
char *stb_strip_final_slash_regardless(char *t)
{
if (t[0]) {
char *z = t + strlen(t) - 1;
// *z is the last character
if (*z == '\\' || *z == '/')
*z = 0;
if (*z == '\\')
*z = '/'; // canonicalize to make sure it matches db
}
return t;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Options parsing
//
STB_EXTERN char **stb_getopt_param(int *argc, char **argv, char *param);
STB_EXTERN char **stb_getopt(int *argc, char **argv);
STB_EXTERN void stb_getopt_free(char **opts);
#ifdef STB_DEFINE
void stb_getopt_free(char **opts)
{
int i;
char ** o2 = opts;
for (i=0; i < stb_arr_len(o2); ++i)
free(o2[i]);
stb_arr_free(o2);
}
char **stb_getopt(int *argc, char **argv)
{
return stb_getopt_param(argc, argv, (char*) "");
}
char **stb_getopt_param(int *argc, char **argv, char *param)
{
char ** opts=NULL;
int i,j=1;
for (i=1; i < *argc; ++i) {
if (argv[i][0] != '-') {
argv[j++] = argv[i];
} else {
if (argv[i][1] == 0) { // plain - == don't parse further options
++i;
while (i < *argc)
argv[j++] = argv[i++];
break;
} else if (argv[i][1] == '-') {
// copy argument through including initial '-' for clarity
stb_arr_push(opts, stb_p_strdup(argv[i]));
} else {
int k;
char *q = argv[i]; // traverse options list
for (k=1; q[k]; ++k) {
char *s;
if (strchr(param, q[k])) { // does it take a parameter?
char *t = &q[k+1], z = q[k];
size_t len=0;
if (*t == 0) {
if (i == *argc-1) { // takes a parameter, but none found
*argc = 0;
stb_getopt_free(opts);
return NULL;
}
t = argv[++i];
} else
k += (int) strlen(t);
len = strlen(t);
s = (char *) malloc(len+2);
if (!s) return NULL;
s[0] = z;
stb_p_strcpy_s(s+1, len+2, t);
} else {
// no parameter
s = (char *) malloc(2);
if (!s) return NULL;
s[0] = q[k];
s[1] = 0;
}
stb_arr_push(opts, s);
}
}
}
}
stb_arr_push(opts, NULL);
*argc = j;
return opts;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Portable directory reading
//
STB_EXTERN char **stb_readdir_files (char *dir);
STB_EXTERN char **stb_readdir_files_mask(char *dir, char *wild);
STB_EXTERN char **stb_readdir_subdirs(char *dir);
STB_EXTERN char **stb_readdir_subdirs_mask(char *dir, char *wild);
STB_EXTERN void stb_readdir_free (char **files);
STB_EXTERN char **stb_readdir_recursive(char *dir, char *filespec);
STB_EXTERN void stb_delete_directory_recursive(char *dir);
#ifdef STB_DEFINE
#ifdef _MSC_VER
#include <io.h>
#else
#include <unistd.h>
#include <dirent.h>
#endif
void stb_readdir_free(char **files)
{
char **f2 = files;
int i;
for (i=0; i < stb_arr_len(f2); ++i)
free(f2[i]);
stb_arr_free(f2);
}
static int isdotdirname(char *name)
{
if (name[0] == '.')
return (name[1] == '.') ? !name[2] : !name[1];
return 0;
}
STB_EXTERN int stb_wildmatchi(char *expr, char *candidate);
static char **readdir_raw(char *dir, int return_subdirs, char *mask)
{
char **results = NULL;
char buffer[4096], with_slash[4096];
size_t n;
#ifdef _MSC_VER
stb__wchar *ws;
struct _wfinddata_t data;
#ifdef _WIN64
const intptr_t none = -1;
intptr_t z;
#else
const long none = -1;
long z;
#endif
#else // !_MSC_VER
const DIR *none = NULL;
DIR *z;
#endif
n = stb_strscpy(buffer,dir,sizeof(buffer));
if (!n || n >= sizeof(buffer))
return NULL;
stb_fixpath(buffer);
n--;
if (n > 0 && (buffer[n-1] != '/')) {
buffer[n++] = '/';
}
buffer[n] = 0;
if (!stb_strscpy(with_slash,buffer,sizeof(with_slash)))
return NULL;
#ifdef _MSC_VER
if (!stb_strscpy(buffer+n,"*.*",sizeof(buffer)-n))
return NULL;
ws = stb__from_utf8(buffer);
z = _wfindfirst((const wchar_t *)ws, &data);
#else
z = opendir(dir);
#endif
if (z != none) {
int nonempty = STB_TRUE;
#ifndef _MSC_VER
struct dirent *data = readdir(z);
nonempty = (data != NULL);
#endif
if (nonempty) {
do {
int is_subdir;
#ifdef _MSC_VER
char *name = stb__to_utf8((stb__wchar *)data.name);
if (name == NULL) {
fprintf(stderr, "%s to convert '%S' to %s!\n", "Unable", data.name, "utf8");
continue;
}
is_subdir = !!(data.attrib & _A_SUBDIR);
#else
char *name = data->d_name;
if (!stb_strscpy(buffer+n,name,sizeof(buffer)-n))
break;
// Could follow DT_LNK, but would need to check for recursive links.
is_subdir = !!(data->d_type & DT_DIR);
#endif
if (is_subdir == return_subdirs) {
if (!is_subdir || !isdotdirname(name)) {
if (!mask || stb_wildmatchi(mask, name)) {
char buffer[4096],*p=buffer;
if ( stb_snprintf(buffer, sizeof(buffer), "%s%s", with_slash, name) < 0 )
break;
if (buffer[0] == '.' && buffer[1] == '/')
p = buffer+2;
stb_arr_push(results, stb_p_strdup(p));
}
}
}
}
#ifdef _MSC_VER
while (0 == _wfindnext(z, &data));
#else
while ((data = readdir(z)) != NULL);
#endif
}
#ifdef _MSC_VER
_findclose(z);
#else
closedir(z);
#endif
}
return results;
}
char **stb_readdir_files (char *dir) { return readdir_raw(dir, 0, NULL); }
char **stb_readdir_subdirs(char *dir) { return readdir_raw(dir, 1, NULL); }
char **stb_readdir_files_mask(char *dir, char *wild) { return readdir_raw(dir, 0, wild); }
char **stb_readdir_subdirs_mask(char *dir, char *wild) { return readdir_raw(dir, 1, wild); }
int stb__rec_max=0x7fffffff;
static char **stb_readdir_rec(char **sofar, char *dir, char *filespec)
{
char **files;
char ** dirs;
char **p;
if (stb_arr_len(sofar) >= stb__rec_max) return sofar;
files = stb_readdir_files_mask(dir, filespec);
stb_arr_for(p, files) {
stb_arr_push(sofar, stb_p_strdup(*p));
if (stb_arr_len(sofar) >= stb__rec_max) break;
}
stb_readdir_free(files);
if (stb_arr_len(sofar) >= stb__rec_max) return sofar;
dirs = stb_readdir_subdirs(dir);
stb_arr_for(p, dirs)
sofar = stb_readdir_rec(sofar, *p, filespec);
stb_readdir_free(dirs);
return sofar;
}
char **stb_readdir_recursive(char *dir, char *filespec)
{
return stb_readdir_rec(NULL, dir, filespec);
}
void stb_delete_directory_recursive(char *dir)
{
char **list = stb_readdir_subdirs(dir);
int i;
for (i=0; i < stb_arr_len(list); ++i)
stb_delete_directory_recursive(list[i]);
stb_arr_free(list);
list = stb_readdir_files(dir);
for (i=0; i < stb_arr_len(list); ++i)
if (!remove(list[i])) {
// on windows, try again after making it writeable; don't ALWAYS
// do this first since that would be slow in the normal case
#ifdef _MSC_VER
_chmod(list[i], _S_IWRITE);
remove(list[i]);
#endif
}
stb_arr_free(list);
stb__windows(_rmdir,rmdir)(dir);
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// construct trees from filenames; useful for cmirror summaries
typedef struct stb_dirtree2 stb_dirtree2;
struct stb_dirtree2
{
stb_dirtree2 **subdirs;
// make convenient for stb_summarize_tree
int num_subdir;
float weight;
// actual data
char *fullpath;
char *relpath;
char **files;
};
STB_EXTERN stb_dirtree2 *stb_dirtree2_from_files_relative(char *src, char **filelist, int count);
STB_EXTERN stb_dirtree2 *stb_dirtree2_from_files(char **filelist, int count);
STB_EXTERN int stb_dir_is_prefix(char *dir, int dirlen, char *file);
#ifdef STB_DEFINE
int stb_dir_is_prefix(char *dir, int dirlen, char *file)
{
if (dirlen == 0) return STB_TRUE;
if (stb_strnicmp(dir, file, dirlen)) return STB_FALSE;
if (file[dirlen] == '/' || file[dirlen] == '\\') return STB_TRUE;
return STB_FALSE;
}
stb_dirtree2 *stb_dirtree2_from_files_relative(char *src, char **filelist, int count)
{
char buffer1[1024];
int i;
int dlen = (int) strlen(src), elen;
stb_dirtree2 *d;
char ** descendents = NULL;
char ** files = NULL;
char *s;
if (!count) return NULL;
// first find all the ones that belong here... note this is will take O(NM) with N files and M subdirs
for (i=0; i < count; ++i) {
if (stb_dir_is_prefix(src, dlen, filelist[i])) {
stb_arr_push(descendents, filelist[i]);
}
}
if (descendents == NULL)
return NULL;
elen = dlen;
// skip a leading slash
if (elen == 0 && (descendents[0][0] == '/' || descendents[0][0] == '\\'))
++elen;
else if (elen)
++elen;
// now extract all the ones that have their root here
for (i=0; i < stb_arr_len(descendents);) {
if (!stb_strchr2(descendents[i]+elen, '/', '\\')) {
stb_arr_push(files, descendents[i]);
descendents[i] = descendents[stb_arr_len(descendents)-1];
stb_arr_pop(descendents);
} else
++i;
}
// now create a record
d = (stb_dirtree2 *) malloc(sizeof(*d));
d->files = files;
d->subdirs = NULL;
d->fullpath = stb_p_strdup(src);
s = stb_strrchr2(d->fullpath, '/', '\\');
if (s)
++s;
else
s = d->fullpath;
d->relpath = s;
// now create the children
qsort(descendents, stb_arr_len(descendents), sizeof(char *), stb_qsort_stricmp(0));
buffer1[0] = 0;
for (i=0; i < stb_arr_len(descendents); ++i) {
char buffer2[1024];
char *s = descendents[i] + elen, *t;
t = stb_strchr2(s, '/', '\\');
assert(t);
stb_strncpy(buffer2, descendents[i], (int) (t-descendents[i]+1));
if (stb_stricmp(buffer1, buffer2)) {
stb_dirtree2 *t = stb_dirtree2_from_files_relative(buffer2, descendents, stb_arr_len(descendents));
assert(t != NULL);
stb_p_strcpy_s(buffer1, sizeof(buffer1), buffer2);
stb_arr_push(d->subdirs, t);
}
}
d->num_subdir = stb_arr_len(d->subdirs);
d->weight = 0;
return d;
}
stb_dirtree2 *stb_dirtree2_from_files(char **filelist, int count)
{
return stb_dirtree2_from_files_relative((char*) "", filelist, count);
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Checksums: CRC-32, ADLER32, SHA-1
//
// CRC-32 and ADLER32 allow streaming blocks
// SHA-1 requires either a complete buffer, max size 2^32 - 73
// or it can checksum directly from a file, max 2^61
#define STB_ADLER32_SEED 1
#define STB_CRC32_SEED 0 // note that we logical NOT this in the code
STB_EXTERN stb_uint
stb_adler32(stb_uint adler32, stb_uchar *buffer, stb_uint buflen);
STB_EXTERN stb_uint
stb_crc32_block(stb_uint crc32, stb_uchar *buffer, stb_uint len);
STB_EXTERN stb_uint stb_crc32(unsigned char *buffer, stb_uint len);
STB_EXTERN void stb_sha1(
unsigned char output[20], unsigned char *buffer, unsigned int len);
STB_EXTERN int stb_sha1_file(unsigned char output[20], char *file);
STB_EXTERN void stb_sha1_readable(char display[27], unsigned char sha[20]);
#ifdef STB_DEFINE
stb_uint stb_crc32_block(stb_uint crc, unsigned char *buffer, stb_uint len)
{
static stb_uint crc_table[256];
stb_uint i,j,s;
crc = ~crc;
if (crc_table[1] == 0)
for(i=0; i < 256; i++) {
for (s=i, j=0; j < 8; ++j)
s = (s >> 1) ^ (s & 1 ? 0xedb88320 : 0);
crc_table[i] = s;
}
for (i=0; i < len; ++i)
crc = (crc >> 8) ^ crc_table[buffer[i] ^ (crc & 0xff)];
return ~crc;
}
stb_uint stb_crc32(unsigned char *buffer, stb_uint len)
{
return stb_crc32_block(0, buffer, len);
}
stb_uint stb_adler32(stb_uint adler32, stb_uchar *buffer, stb_uint buflen)
{
const unsigned long ADLER_MOD = 65521;
unsigned long s1 = adler32 & 0xffff, s2 = adler32 >> 16;
unsigned long blocklen, i;
blocklen = buflen % 5552;
while (buflen) {
for (i=0; i + 7 < blocklen; i += 8) {
s1 += buffer[0], s2 += s1;
s1 += buffer[1], s2 += s1;
s1 += buffer[2], s2 += s1;
s1 += buffer[3], s2 += s1;
s1 += buffer[4], s2 += s1;
s1 += buffer[5], s2 += s1;
s1 += buffer[6], s2 += s1;
s1 += buffer[7], s2 += s1;
buffer += 8;
}
for (; i < blocklen; ++i)
s1 += *buffer++, s2 += s1;
s1 %= ADLER_MOD, s2 %= ADLER_MOD;
buflen -= blocklen;
blocklen = 5552;
}
return (s2 << 16) + s1;
}
static void stb__sha1(stb_uchar *chunk, stb_uint h[5])
{
int i;
stb_uint a,b,c,d,e;
stb_uint w[80];
for (i=0; i < 16; ++i)
w[i] = stb_big32(&chunk[i*4]);
for (i=16; i < 80; ++i) {
stb_uint t;
t = w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16];
w[i] = (t + t) | (t >> 31);
}
a = h[0];
b = h[1];
c = h[2];
d = h[3];
e = h[4];
#define STB__SHA1(k,f) \
{ \
stb_uint temp = (a << 5) + (a >> 27) + (f) + e + (k) + w[i]; \
e = d; \
d = c; \
c = (b << 30) + (b >> 2); \
b = a; \
a = temp; \
}
i=0;
for (; i < 20; ++i) STB__SHA1(0x5a827999, d ^ (b & (c ^ d)) );
for (; i < 40; ++i) STB__SHA1(0x6ed9eba1, b ^ c ^ d );
for (; i < 60; ++i) STB__SHA1(0x8f1bbcdc, (b & c) + (d & (b ^ c)) );
for (; i < 80; ++i) STB__SHA1(0xca62c1d6, b ^ c ^ d );
#undef STB__SHA1
h[0] += a;
h[1] += b;
h[2] += c;
h[3] += d;
h[4] += e;
}
void stb_sha1(stb_uchar output[20], stb_uchar *buffer, stb_uint len)
{
unsigned char final_block[128];
stb_uint end_start, final_len, j;
int i;
stb_uint h[5];
h[0] = 0x67452301;
h[1] = 0xefcdab89;
h[2] = 0x98badcfe;
h[3] = 0x10325476;
h[4] = 0xc3d2e1f0;
// we need to write padding to the last one or two
// blocks, so build those first into 'final_block'
// we have to write one special byte, plus the 8-byte length
// compute the block where the data runs out
end_start = len & ~63;
// compute the earliest we can encode the length
if (((len+9) & ~63) == end_start) {
// it all fits in one block, so fill a second-to-last block
end_start -= 64;
}
final_len = end_start + 128;
// now we need to copy the data in
assert(end_start + 128 >= len+9);
assert(end_start < len || len < 64-9);
j = 0;
if (end_start > len)
j = (stb_uint) - (int) end_start;
for (; end_start + j < len; ++j)
final_block[j] = buffer[end_start + j];
final_block[j++] = 0x80;
while (j < 128-5) // 5 byte length, so write 4 extra padding bytes
final_block[j++] = 0;
// big-endian size
final_block[j++] = len >> 29;
final_block[j++] = len >> 21;
final_block[j++] = len >> 13;
final_block[j++] = len >> 5;
final_block[j++] = len << 3;
assert(j == 128 && end_start + j == final_len);
for (j=0; j < final_len; j += 64) { // 512-bit chunks
if (j+64 >= end_start+64)
stb__sha1(&final_block[j - end_start], h);
else
stb__sha1(&buffer[j], h);
}
for (i=0; i < 5; ++i) {
output[i*4 + 0] = h[i] >> 24;
output[i*4 + 1] = h[i] >> 16;
output[i*4 + 2] = h[i] >> 8;
output[i*4 + 3] = h[i] >> 0;
}
}
#ifdef _MSC_VER
int stb_sha1_file(stb_uchar output[20], char *file)
{
int i;
stb_uint64 length=0;
unsigned char buffer[128];
FILE *f = stb__fopen(file, "rb");
stb_uint h[5];
if (f == NULL) return 0; // file not found
h[0] = 0x67452301;
h[1] = 0xefcdab89;
h[2] = 0x98badcfe;
h[3] = 0x10325476;
h[4] = 0xc3d2e1f0;
for(;;) {
size_t n = fread(buffer, 1, 64, f);
if (n == 64) {
stb__sha1(buffer, h);
length += n;
} else {
int block = 64;
length += n;
buffer[n++] = 0x80;
// if there isn't enough room for the length, double the block
if (n + 8 > 64)
block = 128;
// pad to end
memset(buffer+n, 0, block-8-n);
i = block - 8;
buffer[i++] = (stb_uchar) (length >> 53);
buffer[i++] = (stb_uchar) (length >> 45);
buffer[i++] = (stb_uchar) (length >> 37);
buffer[i++] = (stb_uchar) (length >> 29);
buffer[i++] = (stb_uchar) (length >> 21);
buffer[i++] = (stb_uchar) (length >> 13);
buffer[i++] = (stb_uchar) (length >> 5);
buffer[i++] = (stb_uchar) (length << 3);
assert(i == block);
stb__sha1(buffer, h);
if (block == 128)
stb__sha1(buffer+64, h);
else
assert(block == 64);
break;
}
}
fclose(f);
for (i=0; i < 5; ++i) {
output[i*4 + 0] = h[i] >> 24;
output[i*4 + 1] = h[i] >> 16;
output[i*4 + 2] = h[i] >> 8;
output[i*4 + 3] = h[i] >> 0;
}
return 1;
}
#endif // _MSC_VER
// client can truncate this wherever they like
void stb_sha1_readable(char display[27], unsigned char sha[20])
{
char encoding[65] = "0123456789abcdefghijklmnopqrstuv"
"wxyzABCDEFGHIJKLMNOPQRSTUVWXYZ%$";
int num_bits = 0, acc=0;
int i=0,o=0;
while (o < 26) {
int v;
// expand the accumulator
if (num_bits < 6) {
assert(i != 20);
acc += sha[i++] << num_bits;
num_bits += 8;
}
v = acc & ((1 << 6) - 1);
display[o++] = encoding[v];
acc >>= 6;
num_bits -= 6;
}
assert(num_bits == 20*8 - 26*6);
display[o++] = encoding[acc];
}
#endif // STB_DEFINE
///////////////////////////////////////////////////////////
//
// simplified WINDOWS registry interface... hopefully
// we'll never actually use this?
#if defined(_WIN32)
STB_EXTERN void * stb_reg_open(const char *mode, const char *where); // mode: "rHKLM" or "rHKCU" or "w.."
STB_EXTERN void stb_reg_close(void *reg);
STB_EXTERN int stb_reg_read(void *zreg, const char *str, void *data, unsigned long len);
STB_EXTERN int stb_reg_read_string(void *zreg, const char *str, char *data, int len);
STB_EXTERN void stb_reg_write(void *zreg, const char *str, const void *data, unsigned long len);
STB_EXTERN void stb_reg_write_string(void *zreg, const char *str, const char *data);
#if defined(STB_DEFINE) && !defined(STB_NO_REGISTRY)
#define STB_HAS_REGISTRY
#ifndef _WINDOWS_
#define HKEY void *
STB_EXTERN __declspec(dllimport) long __stdcall RegCloseKey ( HKEY hKey );
STB_EXTERN __declspec(dllimport) long __stdcall RegCreateKeyExA ( HKEY hKey, const char * lpSubKey,
int Reserved, char * lpClass, int dwOptions,
int samDesired, void *lpSecurityAttributes, HKEY * phkResult, int * lpdwDisposition );
STB_EXTERN __declspec(dllimport) long __stdcall RegDeleteKeyA ( HKEY hKey, const char * lpSubKey );
STB_EXTERN __declspec(dllimport) long __stdcall RegQueryValueExA ( HKEY hKey, const char * lpValueName,
int * lpReserved, unsigned long * lpType, unsigned char * lpData, unsigned long * lpcbData );
STB_EXTERN __declspec(dllimport) long __stdcall RegSetValueExA ( HKEY hKey, const char * lpValueName,
int Reserved, int dwType, const unsigned char* lpData, int cbData );
STB_EXTERN __declspec(dllimport) long __stdcall RegOpenKeyExA ( HKEY hKey, const char * lpSubKey,
int ulOptions, int samDesired, HKEY * phkResult );
#endif // _WINDOWS_
#define STB__REG_OPTION_NON_VOLATILE 0
#define STB__REG_KEY_ALL_ACCESS 0x000f003f
#define STB__REG_KEY_READ 0x00020019
#ifdef _M_AMD64
#define STB__HKEY_CURRENT_USER 0x80000001ull
#define STB__HKEY_LOCAL_MACHINE 0x80000002ull
#else
#define STB__HKEY_CURRENT_USER 0x80000001
#define STB__HKEY_LOCAL_MACHINE 0x80000002
#endif
void *stb_reg_open(const char *mode, const char *where)
{
long res;
HKEY base;
HKEY zreg;
if (!stb_stricmp(mode+1, "cu") || !stb_stricmp(mode+1, "hkcu"))
base = (HKEY) STB__HKEY_CURRENT_USER;
else if (!stb_stricmp(mode+1, "lm") || !stb_stricmp(mode+1, "hklm"))
base = (HKEY) STB__HKEY_LOCAL_MACHINE;
else
return NULL;
if (mode[0] == 'r')
res = RegOpenKeyExA(base, where, 0, STB__REG_KEY_READ, &zreg);
else if (mode[0] == 'w')
res = RegCreateKeyExA(base, where, 0, NULL, STB__REG_OPTION_NON_VOLATILE, STB__REG_KEY_ALL_ACCESS, NULL, &zreg, NULL);
else
return NULL;
return res ? NULL : zreg;
}
void stb_reg_close(void *reg)
{
RegCloseKey((HKEY) reg);
}
#define STB__REG_SZ 1
#define STB__REG_BINARY 3
#define STB__REG_DWORD 4
int stb_reg_read(void *zreg, const char *str, void *data, unsigned long len)
{
unsigned long type;
unsigned long alen = len;
if (0 == RegQueryValueExA((HKEY) zreg, str, 0, &type, (unsigned char *) data, &len))
if (type == STB__REG_BINARY || type == STB__REG_SZ || type == STB__REG_DWORD) {
if (len < alen)
*((char *) data + len) = 0;
return 1;
}
return 0;
}
void stb_reg_write(void *zreg, const char *str, const void *data, unsigned long len)
{
if (zreg)
RegSetValueExA((HKEY) zreg, str, 0, STB__REG_BINARY, (const unsigned char *) data, len);
}
int stb_reg_read_string(void *zreg, const char *str, char *data, int len)
{
if (!stb_reg_read(zreg, str, data, len)) return 0;
data[len-1] = 0; // force a 0 at the end of the string no matter what
return 1;
}
void stb_reg_write_string(void *zreg, const char *str, const char *data)
{
if (zreg)
RegSetValueExA((HKEY) zreg, str, 0, STB__REG_SZ, (const unsigned char *) data, (int) strlen(data)+1);
}
#endif // STB_DEFINE
#endif // _WIN32
//////////////////////////////////////////////////////////////////////////////
//
// stb_cfg - This is like the registry, but the config info
// is all stored in plain old files where we can
// backup and restore them easily. The LOCATION of
// the config files is gotten from... the registry!
#ifndef STB_NO_STB_STRINGS
typedef struct stb_cfg_st stb_cfg;
STB_EXTERN stb_cfg * stb_cfg_open(char *config, const char *mode); // mode = "r", "w"
STB_EXTERN void stb_cfg_close(stb_cfg *cfg);
STB_EXTERN int stb_cfg_read(stb_cfg *cfg, char *key, void *value, int len);
STB_EXTERN void stb_cfg_write(stb_cfg *cfg, char *key, void *value, int len);
STB_EXTERN int stb_cfg_read_string(stb_cfg *cfg, char *key, char *value, int len);
STB_EXTERN void stb_cfg_write_string(stb_cfg *cfg, char *key, char *value);
STB_EXTERN int stb_cfg_delete(stb_cfg *cfg, char *key);
STB_EXTERN void stb_cfg_set_directory(char *dir);
#ifdef STB_DEFINE
typedef struct
{
char *key;
void *value;
int value_len;
} stb__cfg_item;
struct stb_cfg_st
{
stb__cfg_item *data;
char *loaded_file; // this needs to be freed
FILE *f; // write the data to this file on close
};
static const char *stb__cfg_sig = "sTbCoNfIg!\0\0";
static char stb__cfg_dir[512];
STB_EXTERN void stb_cfg_set_directory(char *dir)
{
stb_p_strcpy_s(stb__cfg_dir, sizeof(stb__cfg_dir), dir);
}
STB_EXTERN stb_cfg * stb_cfg_open(char *config, const char *mode)
{
size_t len;
stb_cfg *z;
char file[512];
if (mode[0] != 'r' && mode[0] != 'w') return NULL;
if (!stb__cfg_dir[0]) {
#ifdef _WIN32
stb_p_strcpy_s(stb__cfg_dir, sizeof(stb__cfg_dir), "c:/stb");
#else
strcpy(stb__cfg_dir, "~/.stbconfig");
#endif
#ifdef STB_HAS_REGISTRY
{
void *reg = stb_reg_open("rHKLM", "Software\\SilverSpaceship\\stb");
if (reg) {
stb_reg_read_string(reg, "config_dir", stb__cfg_dir, sizeof(stb__cfg_dir));
stb_reg_close(reg);
}
}
#endif
}
stb_p_sprintf(file stb_p_size(sizeof(file)), "%s/%s.cfg", stb__cfg_dir, config);
z = (stb_cfg *) stb_malloc(0, sizeof(*z));
z->data = NULL;
z->loaded_file = stb_filec(file, &len);
if (z->loaded_file) {
char *s = z->loaded_file;
if (!memcmp(s, stb__cfg_sig, 12)) {
char *s = z->loaded_file + 12;
while (s < z->loaded_file + len) {
stb__cfg_item a;
int n = *(stb_int16 *) s;
a.key = s+2;
s = s+2 + n;
a.value_len = *(int *) s;
s += 4;
a.value = s;
s += a.value_len;
stb_arr_push(z->data, a);
}
assert(s == z->loaded_file + len);
}
}
if (mode[0] == 'w')
z->f = stb_p_fopen(file, "wb");
else
z->f = NULL;
return z;
}
void stb_cfg_close(stb_cfg *z)
{
if (z->f) {
int i;
// write the file out
fwrite(stb__cfg_sig, 12, 1, z->f);
for (i=0; i < stb_arr_len(z->data); ++i) {
stb_int16 n = (stb_int16) strlen(z->data[i].key)+1;
fwrite(&n, 2, 1, z->f);
fwrite(z->data[i].key, n, 1, z->f);
fwrite(&z->data[i].value_len, 4, 1, z->f);
fwrite(z->data[i].value, z->data[i].value_len, 1, z->f);
}
fclose(z->f);
}
stb_arr_free(z->data);
stb_free(z);
}
int stb_cfg_read(stb_cfg *z, char *key, void *value, int len)
{
int i;
for (i=0; i < stb_arr_len(z->data); ++i) {
if (!stb_stricmp(z->data[i].key, key)) {
int n = stb_min(len, z->data[i].value_len);
memcpy(value, z->data[i].value, n);
if (n < len)
*((char *) value + n) = 0;
return 1;
}
}
return 0;
}
void stb_cfg_write(stb_cfg *z, char *key, void *value, int len)
{
int i;
for (i=0; i < stb_arr_len(z->data); ++i)
if (!stb_stricmp(z->data[i].key, key))
break;
if (i == stb_arr_len(z->data)) {
stb__cfg_item p;
p.key = stb_strdup(key, z);
p.value = NULL;
p.value_len = 0;
stb_arr_push(z->data, p);
}
z->data[i].value = stb_malloc(z, len);
z->data[i].value_len = len;
memcpy(z->data[i].value, value, len);
}
int stb_cfg_delete(stb_cfg *z, char *key)
{
int i;
for (i=0; i < stb_arr_len(z->data); ++i)
if (!stb_stricmp(z->data[i].key, key)) {
stb_arr_fastdelete(z->data, i);
return 1;
}
return 0;
}
int stb_cfg_read_string(stb_cfg *z, char *key, char *value, int len)
{
if (!stb_cfg_read(z, key, value, len)) return 0;
value[len-1] = 0;
return 1;
}
void stb_cfg_write_string(stb_cfg *z, char *key, char *value)
{
stb_cfg_write(z, key, value, (int) strlen(value)+1);
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// stb_dirtree - load a description of a directory tree
// uses a cache and stat()s the directories for changes
// MUCH faster on NTFS, _wrong_ on FAT32, so should
// ignore the db on FAT32
#ifdef _WIN32
typedef struct
{
char * path; // full path from passed-in root
time_t last_modified;
int num_files;
int flag;
} stb_dirtree_dir;
typedef struct
{
char *name; // name relative to path
int dir; // index into dirs[] array
stb_int64 size; // size, max 4GB
time_t last_modified;
int flag;
} stb_dirtree_file;
typedef struct
{
stb_dirtree_dir *dirs;
stb_dirtree_file *files;
// internal use
void * string_pool; // used to free data en masse
} stb_dirtree;
extern void stb_dirtree_free ( stb_dirtree *d );
extern stb_dirtree *stb_dirtree_get ( char *dir);
extern stb_dirtree *stb_dirtree_get_dir ( char *dir, char *cache_dir);
extern stb_dirtree *stb_dirtree_get_with_file ( char *dir, char *cache_file);
// get a list of all the files recursively underneath 'dir'
//
// cache_file is used to store a copy of the directory tree to speed up
// later calls. It must be unique to 'dir' and the current working
// directory! Otherwise who knows what will happen (a good solution
// is to put it _in_ dir, but this API doesn't force that).
//
// Also, it might be possible to break this if you have two different processes
// do a call to stb_dirtree_get() with the same cache file at about the same
// time, but I _think_ it might just work.
// i needed to build an identical data structure representing the state of
// a mirrored copy WITHOUT bothering to rescan it (i.e. we're mirroring to
// it WITHOUT scanning it, e.g. it's over the net), so this requires access
// to all of the innards.
extern void stb_dirtree_db_add_dir(stb_dirtree *active, char *path, time_t last);
extern void stb_dirtree_db_add_file(stb_dirtree *active, char *name, int dir, stb_int64 size, time_t last);
extern void stb_dirtree_db_read(stb_dirtree *target, char *filename, char *dir);
extern void stb_dirtree_db_write(stb_dirtree *target, char *filename, char *dir);
#ifdef STB_DEFINE
static void stb__dirtree_add_dir(char *path, time_t last, stb_dirtree *active)
{
stb_dirtree_dir d;
d.last_modified = last;
d.num_files = 0;
d.path = stb_strdup(path, active->string_pool);
stb_arr_push(active->dirs, d);
}
static void stb__dirtree_add_file(char *name, int dir, stb_int64 size, time_t last, stb_dirtree *active)
{
stb_dirtree_file f;
f.dir = dir;
f.size = size;
f.last_modified = last;
f.name = stb_strdup(name, active->string_pool);
++active->dirs[dir].num_files;
stb_arr_push(active->files, f);
}
// version 02 supports > 4GB files
static char stb__signature[12] = { 's', 'T', 'b', 'D', 'i', 'R', 't', 'R', 'e', 'E', '0', '2' };
static void stb__dirtree_save_db(char *filename, stb_dirtree *data, char *root)
{
int i, num_dirs_final=0, num_files_final;
char *info = root ? root : (char*)"";
int *remap;
FILE *f = stb_p_fopen(filename, "wb");
if (!f) return;
fwrite(stb__signature, sizeof(stb__signature), 1, f);
fwrite(info, strlen(info)+1, 1, f);
// need to be slightly tricky and not write out NULLed directories, nor the root
// build remapping table of all dirs we'll be writing out
remap = (int *) malloc(sizeof(remap[0]) * stb_arr_len(data->dirs));
for (i=0; i < stb_arr_len(data->dirs); ++i) {
if (data->dirs[i].path == NULL || (root && 0==stb_stricmp(data->dirs[i].path, root))) {
remap[i] = -1;
} else {
remap[i] = num_dirs_final++;
}
}
fwrite(&num_dirs_final, 4, 1, f);
for (i=0; i < stb_arr_len(data->dirs); ++i) {
if (remap[i] >= 0) {
fwrite(&data->dirs[i].last_modified, 4, 1, f);
stb_fput_string(f, data->dirs[i].path);
}
}
num_files_final = 0;
for (i=0; i < stb_arr_len(data->files); ++i)
if (remap[data->files[i].dir] >= 0 && data->files[i].name)
++num_files_final;
fwrite(&num_files_final, 4, 1, f);
for (i=0; i < stb_arr_len(data->files); ++i) {
if (remap[data->files[i].dir] >= 0 && data->files[i].name) {
stb_fput_ranged(f, remap[data->files[i].dir], 0, num_dirs_final);
stb_fput_varlen64(f, data->files[i].size);
fwrite(&data->files[i].last_modified, 4, 1, f);
stb_fput_string(f, data->files[i].name);
}
}
fclose(f);
}
// note: stomps any existing data, rather than appending
static void stb__dirtree_load_db(char *filename, stb_dirtree *data, char *dir)
{
char sig[2048];
int i,n;
FILE *f = stb_p_fopen(filename, "rb");
if (!f) return;
data->string_pool = stb_malloc(0,1);
fread(sig, sizeof(stb__signature), 1, f);
if (memcmp(stb__signature, sig, sizeof(stb__signature))) { fclose(f); return; }
if (!fread(sig, strlen(dir)+1, 1, f)) { fclose(f); return; }
if (stb_stricmp(sig,dir)) { fclose(f); return; }
// we can just read them straight in, because they're guaranteed to be valid
fread(&n, 4, 1, f);
stb_arr_setlen(data->dirs, n);
for(i=0; i < stb_arr_len(data->dirs); ++i) {
fread(&data->dirs[i].last_modified, 4, 1, f);
data->dirs[i].path = stb_fget_string(f, data->string_pool);
if (data->dirs[i].path == NULL) goto bail;
}
fread(&n, 4, 1, f);
stb_arr_setlen(data->files, n);
for (i=0; i < stb_arr_len(data->files); ++i) {
data->files[i].dir = stb_fget_ranged(f, 0, stb_arr_len(data->dirs));
data->files[i].size = stb_fget_varlen64(f);
fread(&data->files[i].last_modified, 4, 1, f);
data->files[i].name = stb_fget_string(f, data->string_pool);
if (data->files[i].name == NULL) goto bail;
}
if (0) {
bail:
stb_arr_free(data->dirs);
stb_arr_free(data->files);
}
fclose(f);
}
FILE *hlog;
static int stb__dircount, stb__dircount_mask, stb__showfile;
static void stb__dirtree_scandir(char *path, time_t last_time, stb_dirtree *active)
{
// this is dumb depth first; theoretically it might be faster
// to fully traverse each directory before visiting its children,
// but it's complicated and didn't seem like a gain in the test app
int n;
struct _wfinddatai64_t c_file;
long hFile;
stb__wchar full_path[1024];
int has_slash;
if (stb__showfile) printf("<");
has_slash = (path[0] && path[strlen(path)-1] == '/');
// @TODO: do this concatenation without using swprintf to avoid this mess:
#if (defined(_MSC_VER) && _MSC_VER < 1400) // || (defined(__clang__))
// confusingly, Windows Kits\10 needs to go down this path?!?
// except now it doesn't, I don't know what changed
if (has_slash)
swprintf(full_path, L"%s*", stb__from_utf8(path));
else
swprintf(full_path, L"%s/*", stb__from_utf8(path));
#else
if (has_slash)
swprintf((wchar_t *) full_path, (size_t) 1024, L"%s*", (wchar_t *) stb__from_utf8(path));
else
swprintf((wchar_t *) full_path, (size_t) 1024, L"%s/*", (wchar_t *) stb__from_utf8(path));
#endif
// it's possible this directory is already present: that means it was in the
// cache, but its parent wasn't... in that case, we're done with it
if (stb__showfile) printf("C[%d]", stb_arr_len(active->dirs));
for (n=0; n < stb_arr_len(active->dirs); ++n)
if (0 == stb_stricmp(active->dirs[n].path, path)) {
if (stb__showfile) printf("D");
return;
}
if (stb__showfile) printf("E");
// otherwise, we need to add it
stb__dirtree_add_dir(path, last_time, active);
n = stb_arr_lastn(active->dirs);
if (stb__showfile) printf("[");
if( (hFile = (long) _wfindfirsti64( (wchar_t *) full_path, &c_file )) != -1L ) {
do {
if (stb__showfile) printf(")");
if (c_file.attrib & _A_SUBDIR) {
// ignore subdirectories starting with '.', e.g. "." and ".."
if (c_file.name[0] != '.') {
char *new_path = (char *) full_path;
char *temp = stb__to_utf8((stb__wchar *) c_file.name);
if (has_slash)
stb_p_sprintf(new_path stb_p_size(sizeof(full_path)), "%s%s", path, temp);
else
stb_p_sprintf(new_path stb_p_size(sizeof(full_path)), "%s/%s", path, temp);
if (stb__dircount_mask) {
++stb__dircount;
if (!(stb__dircount & stb__dircount_mask)) {
char dummy_path[128], *pad;
stb_strncpy(dummy_path, new_path, sizeof(dummy_path)-1);
if (strlen(dummy_path) > 96) {
stb_p_strcpy_s(dummy_path+96/2-1,128, "...");
stb_p_strcpy_s(dummy_path+96/2+2,128, new_path + strlen(new_path)-96/2+2);
}
pad = dummy_path + strlen(dummy_path);
while (pad < dummy_path+98)
*pad++ = ' ';
*pad = 0;
printf("%s\r", dummy_path);
#if 0
if (hlog == 0) {
hlog = stb_p_fopen("c:/x/temp.log", "w");
fprintf(hlog, "%s\n", dummy_path);
}
#endif
}
}
stb__dirtree_scandir(new_path, c_file.time_write, active);
}
} else {
char *temp = stb__to_utf8((stb__wchar *) c_file.name);
stb__dirtree_add_file(temp, n, c_file.size, c_file.time_write, active);
}
if (stb__showfile) printf("(");
} while( _wfindnexti64( hFile, &c_file ) == 0 );
if (stb__showfile) printf("]");
_findclose( hFile );
}
if (stb__showfile) printf(">\n");
}
// scan the database and see if it's all valid
static int stb__dirtree_update_db(stb_dirtree *db, stb_dirtree *active)
{
int changes_detected = STB_FALSE;
int i;
int *remap;
int *rescan=NULL;
remap = (int *) malloc(sizeof(remap[0]) * stb_arr_len(db->dirs));
memset(remap, 0, sizeof(remap[0]) * stb_arr_len(db->dirs));
rescan = NULL;
for (i=0; i < stb_arr_len(db->dirs); ++i) {
struct _stat info;
if (stb__dircount_mask) {
++stb__dircount;
if (!(stb__dircount & stb__dircount_mask)) {
printf(".");
}
}
if (0 == _stat(db->dirs[i].path, &info)) {
if (info.st_mode & _S_IFDIR) {
// it's still a directory, as expected
int n = abs((int) (info.st_mtime - db->dirs[i].last_modified));
if (n > 1 && n != 3600) { // the 3600 is a hack because sometimes this jumps for no apparent reason, even when no time zone or DST issues are at play
// it's changed! force a rescan
// we don't want to scan it until we've stat()d its
// subdirs, though, so we queue it
if (stb__showfile) printf("Changed: %s - %08x:%08x\n", db->dirs[i].path, (unsigned int) db->dirs[i].last_modified, (unsigned int) info.st_mtime);
stb_arr_push(rescan, i);
// update the last_mod time
db->dirs[i].last_modified = info.st_mtime;
// ignore existing files in this dir
remap[i] = -1;
changes_detected = STB_TRUE;
} else {
// it hasn't changed, just copy it through unchanged
stb__dirtree_add_dir(db->dirs[i].path, db->dirs[i].last_modified, active);
remap[i] = stb_arr_lastn(active->dirs);
}
} else {
// this path used to refer to a directory, but now it's a file!
// assume that the parent directory is going to be forced to rescan anyway
goto delete_entry;
}
} else {
delete_entry:
// directory no longer exists, so don't copy it
// we don't free it because it's in the string pool now
db->dirs[i].path = NULL;
remap[i] = -1;
changes_detected = STB_TRUE;
}
}
// at this point, we have:
//
// <rescan> holds a list of directory indices that need to be scanned due to being out of date
// <remap> holds the directory index in <active> for each dir in <db>, if it exists; -1 if not
// directories in <rescan> are not in <active> yet
// so we can go ahead and remap all the known files right now
for (i=0; i < stb_arr_len(db->files); ++i) {
int dir = db->files[i].dir;
if (remap[dir] >= 0) {
stb__dirtree_add_file(db->files[i].name, remap[dir], db->files[i].size, db->files[i].last_modified, active);
}
}
// at this point we're done with db->files, and done with remap
free(remap);
// now scan those directories using the standard scan
for (i=0; i < stb_arr_len(rescan); ++i) {
int z = rescan[i];
stb__dirtree_scandir(db->dirs[z].path, db->dirs[z].last_modified, active);
}
stb_arr_free(rescan);
return changes_detected;
}
static void stb__dirtree_free_raw(stb_dirtree *d)
{
stb_free(d->string_pool);
stb_arr_free(d->dirs);
stb_arr_free(d->files);
}
stb_dirtree *stb_dirtree_get_with_file(char *dir, char *cache_file)
{
stb_dirtree *output = (stb_dirtree *) malloc(sizeof(*output));
stb_dirtree db,active;
int prev_dir_count, cache_mismatch;
char *stripped_dir; // store the directory name without a trailing '/' or '\\'
// load the database of last-known state on disk
db.string_pool = NULL;
db.files = NULL;
db.dirs = NULL;
stripped_dir = stb_strip_final_slash(stb_p_strdup(dir));
if (cache_file != NULL)
stb__dirtree_load_db(cache_file, &db, stripped_dir);
else if (stb__showfile)
printf("No cache file\n");
active.files = NULL;
active.dirs = NULL;
active.string_pool = stb_malloc(0,1); // @TODO: share string pools between both?
// check all the directories in the database; make note if
// anything we scanned had changed, and rescan those things
cache_mismatch = stb__dirtree_update_db(&db, &active);
// check the root tree
prev_dir_count = stb_arr_len(active.dirs); // record how many directories we've seen
stb__dirtree_scandir(stripped_dir, 0, &active); // no last_modified time available for root
if (stb__dircount_mask)
printf(" \r");
// done with the DB; write it back out if any changes, i.e. either
// 1. any inconsistency found between cached information and actual disk
// or 2. if scanning the root found any new directories--which we detect because
// more than one directory got added to the active db during that scan
if (cache_mismatch || stb_arr_len(active.dirs) > prev_dir_count+1)
stb__dirtree_save_db(cache_file, &active, stripped_dir);
free(stripped_dir);
stb__dirtree_free_raw(&db);
*output = active;
return output;
}
stb_dirtree *stb_dirtree_get_dir(char *dir, char *cache_dir)
{
int i;
stb_uint8 sha[20];
char dir_lower[1024];
char cache_file[1024],*s;
if (cache_dir == NULL)
return stb_dirtree_get_with_file(dir, NULL);
stb_p_strcpy_s(dir_lower, sizeof(dir_lower), dir);
stb_tolower(dir_lower);
stb_sha1(sha, (unsigned char *) dir_lower, (unsigned int) strlen(dir_lower));
stb_p_strcpy_s(cache_file, sizeof(cache_file), cache_dir);
s = cache_file + strlen(cache_file);
if (s[-1] != '/' && s[-1] != '\\') *s++ = '/';
stb_p_strcpy_s(s, sizeof(cache_file), "dirtree_");
s += strlen(s);
for (i=0; i < 8; ++i) {
char *hex = (char*)"0123456789abcdef";
stb_uint z = sha[i];
*s++ = hex[z >> 4];
*s++ = hex[z & 15];
}
stb_p_strcpy_s(s, sizeof(cache_file), ".bin");
return stb_dirtree_get_with_file(dir, cache_file);
}
stb_dirtree *stb_dirtree_get(char *dir)
{
char cache_dir[256];
stb_p_strcpy_s(cache_dir, sizeof(cache_dir), "c:/bindata");
#ifdef STB_HAS_REGISTRY
{
void *reg = stb_reg_open("rHKLM", "Software\\SilverSpaceship\\stb");
if (reg) {
stb_reg_read(reg, "dirtree", cache_dir, sizeof(cache_dir));
stb_reg_close(reg);
}
}
#endif
return stb_dirtree_get_dir(dir, cache_dir);
}
void stb_dirtree_free(stb_dirtree *d)
{
stb__dirtree_free_raw(d);
free(d);
}
void stb_dirtree_db_add_dir(stb_dirtree *active, char *path, time_t last)
{
stb__dirtree_add_dir(path, last, active);
}
void stb_dirtree_db_add_file(stb_dirtree *active, char *name, int dir, stb_int64 size, time_t last)
{
stb__dirtree_add_file(name, dir, size, last, active);
}
void stb_dirtree_db_read(stb_dirtree *target, char *filename, char *dir)
{
char *s = stb_strip_final_slash(stb_p_strdup(dir));
target->dirs = 0;
target->files = 0;
target->string_pool = 0;
stb__dirtree_load_db(filename, target, s);
free(s);
}
void stb_dirtree_db_write(stb_dirtree *target, char *filename, char *dir)
{
stb__dirtree_save_db(filename, target, 0); // don't strip out any directories
}
#endif // STB_DEFINE
#endif // _WIN32
#endif // STB_NO_STB_STRINGS
//////////////////////////////////////////////////////////////////////////////
//
// STB_MALLOC_WRAPPER
//
// you can use the wrapper functions with your own malloc wrapper,
// or define STB_MALLOC_WRAPPER project-wide to have
// malloc/free/realloc/strdup all get vectored to it
// this has too many very specific error messages you could google for and find in stb.h,
// so don't use it if they don't want any stb.h-identifiable strings
#if defined(STB_DEFINE) && !defined(STB_NO_STB_STRINGS)
typedef struct
{
void *p;
char *file;
int line;
size_t size;
} stb_malloc_record;
#ifndef STB_MALLOC_HISTORY_COUNT
#define STB_MALLOC_HISTORY_COUNT 50 // 800 bytes
#endif
stb_malloc_record *stb__allocations;
static int stb__alloc_size, stb__alloc_limit, stb__alloc_mask;
int stb__alloc_count;
stb_malloc_record stb__alloc_history[STB_MALLOC_HISTORY_COUNT];
int stb__history_pos;
static int stb__hashfind(void *p)
{
stb_uint32 h = stb_hashptr(p);
int s,n = h & stb__alloc_mask;
if (stb__allocations[n].p == p)
return n;
s = stb_rehash(h)|1;
for(;;) {
if (stb__allocations[n].p == NULL)
return -1;
n = (n+s) & stb__alloc_mask;
if (stb__allocations[n].p == p)
return n;
}
}
size_t stb_wrapper_allocsize(void *p)
{
int n = stb__hashfind(p);
if (n < 0) return 0;
return stb__allocations[n].size;
}
static int stb__historyfind(void *p)
{
int n = stb__history_pos;
int i;
for (i=0; i < STB_MALLOC_HISTORY_COUNT; ++i) {
if (--n < 0) n = STB_MALLOC_HISTORY_COUNT-1;
if (stb__alloc_history[n].p == p)
return n;
}
return -1;
}
static void stb__add_alloc(void *p, size_t sz, char *file, int line);
static void stb__grow_alloc(void)
{
int i,old_num = stb__alloc_size;
stb_malloc_record *old = stb__allocations;
if (stb__alloc_size == 0)
stb__alloc_size = 64;
else
stb__alloc_size *= 2;
stb__allocations = (stb_malloc_record *) stb__realloc_raw(NULL, stb__alloc_size * sizeof(stb__allocations[0]));
if (stb__allocations == NULL)
stb_fatal("Internal error: couldn't grow malloc wrapper table");
memset(stb__allocations, 0, stb__alloc_size * sizeof(stb__allocations[0]));
stb__alloc_limit = (stb__alloc_size*3)>>2;
stb__alloc_mask = stb__alloc_size-1;
stb__alloc_count = 0;
for (i=0; i < old_num; ++i)
if (old[i].p > STB_DEL) {
stb__add_alloc(old[i].p, old[i].size, old[i].file, old[i].line);
assert(stb__hashfind(old[i].p) >= 0);
}
for (i=0; i < old_num; ++i)
if (old[i].p > STB_DEL)
assert(stb__hashfind(old[i].p) >= 0);
stb__realloc_raw(old, 0);
}
static void stb__add_alloc(void *p, size_t sz, char *file, int line)
{
stb_uint32 h;
int n;
if (stb__alloc_count >= stb__alloc_limit)
stb__grow_alloc();
h = stb_hashptr(p);
n = h & stb__alloc_mask;
if (stb__allocations[n].p > STB_DEL) {
int s = stb_rehash(h)|1;
do {
n = (n+s) & stb__alloc_mask;
} while (stb__allocations[n].p > STB_DEL);
}
assert(stb__allocations[n].p == NULL || stb__allocations[n].p == STB_DEL);
stb__allocations[n].p = p;
stb__allocations[n].size = sz;
stb__allocations[n].line = line;
stb__allocations[n].file = file;
++stb__alloc_count;
}
static void stb__remove_alloc(int n, char *file, int line)
{
stb__alloc_history[stb__history_pos] = stb__allocations[n];
stb__alloc_history[stb__history_pos].file = file;
stb__alloc_history[stb__history_pos].line = line;
if (++stb__history_pos == STB_MALLOC_HISTORY_COUNT)
stb__history_pos = 0;
stb__allocations[n].p = STB_DEL;
--stb__alloc_count;
}
void stb_wrapper_malloc(void *p, size_t sz, char *file, int line)
{
if (!p) return;
stb__add_alloc(p,sz,file,line);
}
void stb_wrapper_free(void *p, char *file, int line)
{
int n;
if (p == NULL) return;
n = stb__hashfind(p);
if (n >= 0)
stb__remove_alloc(n, file, line);
else {
// tried to free something we hadn't allocated!
n = stb__historyfind(p);
assert(0); /* NOTREACHED */
if (n >= 0)
stb_fatal("Attempted to free %d-byte block %p at %s:%d previously freed/realloced at %s:%d",
stb__alloc_history[n].size, p,
file, line,
stb__alloc_history[n].file, stb__alloc_history[n].line);
else
stb_fatal("Attempted to free unknown block %p at %s:%d", p, file,line);
}
}
void stb_wrapper_check(void *p)
{
int n;
if (p == NULL) return;
n = stb__hashfind(p);
if (n >= 0) return;
for (n=0; n < stb__alloc_size; ++n)
if (stb__allocations[n].p == p)
stb_fatal("Internal error: pointer %p was allocated, but hash search failed", p);
// tried to free something that wasn't allocated!
n = stb__historyfind(p);
if (n >= 0)
stb_fatal("Checked %d-byte block %p previously freed/realloced at %s:%d",
stb__alloc_history[n].size, p,
stb__alloc_history[n].file, stb__alloc_history[n].line);
stb_fatal("Checked unknown block %p");
}
void stb_wrapper_realloc(void *p, void *q, size_t sz, char *file, int line)
{
int n;
if (p == NULL) { stb_wrapper_malloc(q, sz, file, line); return; }
if (q == NULL) return; // nothing happened
n = stb__hashfind(p);
if (n == -1) {
// tried to free something we hadn't allocated!
// this is weird, though, because we got past the realloc!
n = stb__historyfind(p);
assert(0); /* NOTREACHED */
if (n >= 0)
stb_fatal("Attempted to realloc %d-byte block %p at %s:%d previously freed/realloced at %s:%d",
stb__alloc_history[n].size, p,
file, line,
stb__alloc_history[n].file, stb__alloc_history[n].line);
else
stb_fatal("Attempted to realloc unknown block %p at %s:%d", p, file,line);
} else {
if (q == p) {
stb__allocations[n].size = sz;
stb__allocations[n].file = file;
stb__allocations[n].line = line;
} else {
stb__remove_alloc(n, file, line);
stb__add_alloc(q,sz,file,line);
}
}
}
void stb_wrapper_listall(void (*func)(void *ptr, size_t sz, char *file, int line))
{
int i;
for (i=0; i < stb__alloc_size; ++i)
if (stb__allocations[i].p > STB_DEL)
func(stb__allocations[i].p , stb__allocations[i].size,
stb__allocations[i].file, stb__allocations[i].line);
}
void stb_wrapper_dump(char *filename)
{
int i;
FILE *f = stb_p_fopen(filename, "w");
if (!f) return;
for (i=0; i < stb__alloc_size; ++i)
if (stb__allocations[i].p > STB_DEL)
fprintf(f, "%p %7d - %4d %s\n",
stb__allocations[i].p , (int) stb__allocations[i].size,
stb__allocations[i].line, stb__allocations[i].file);
}
#endif // STB_DEFINE
//////////////////////////////////////////////////////////////////////////////
//
// stb_pointer_set
//
//
// For data structures that support querying by key, data structure
// classes always hand-wave away the issue of what to do if two entries
// have the same key: basically, store a linked list of all the nodes
// which have the same key (a LISP-style list).
//
// The thing is, it's not that trivial. If you have an O(log n)
// lookup data structure, but then n/4 items have the same value,
// you don't want to spend O(n) time scanning that list when
// deleting an item if you already have a pointer to the item.
// (You have to spend O(n) time enumerating all the items with
// a given key, sure, and you can't accelerate deleting a particular
// item if you only have the key, not a pointer to the item.)
//
// I'm going to call this data structure, whatever it turns out to
// be, a "pointer set", because we don't store any associated data for
// items in this data structure, we just answer the question of
// whether an item is in it or not (it's effectively one bit per pointer).
// Technically they don't have to be pointers; you could cast ints
// to (void *) if you want, but you can't store 0 or 1 because of the
// hash table.
//
// Since the fastest data structure we might want to add support for
// identical-keys to is a hash table with O(1)-ish lookup time,
// that means that the conceptual "linked list of all items with
// the same indexed value" that we build needs to have the same
// performance; that way when we index a table we think is arbitrary
// ints, but in fact half of them are 0, we don't get screwed.
//
// Therefore, it needs to be a hash table, at least when it gets
// large. On the other hand, when the data has totally arbitrary ints
// or floats, there won't be many collisions, and we'll have tons of
// 1-item bitmaps. That will be grossly inefficient as hash tables;
// trade-off; the hash table is reasonably efficient per-item when
// it's large, but not when it's small. So we need to do something
// Judy-like and use different strategies depending on the size.
//
// Like Judy, we'll use the bottom bit to encode the strategy:
//
// bottom bits:
// 00 - direct pointer
// 01 - 4-item bucket (16 bytes, no length, NULLs)
// 10 - N-item array
// 11 - hash table
typedef struct stb_ps stb_ps;
STB_EXTERN int stb_ps_find (stb_ps *ps, void *value);
STB_EXTERN stb_ps * stb_ps_add (stb_ps *ps, void *value);
STB_EXTERN stb_ps * stb_ps_remove(stb_ps *ps, void *value);
STB_EXTERN stb_ps * stb_ps_remove_any(stb_ps *ps, void **value);
STB_EXTERN void stb_ps_delete(stb_ps *ps);
STB_EXTERN int stb_ps_count (stb_ps *ps);
STB_EXTERN stb_ps * stb_ps_copy (stb_ps *ps);
STB_EXTERN int stb_ps_subset(stb_ps *bigger, stb_ps *smaller);
STB_EXTERN int stb_ps_eq (stb_ps *p0, stb_ps *p1);
STB_EXTERN void ** stb_ps_getlist (stb_ps *ps, int *count);
STB_EXTERN int stb_ps_writelist(stb_ps *ps, void **list, int size );
// enum and fastlist don't allocate storage, but you must consume the
// list before there's any chance the data structure gets screwed up;
STB_EXTERN int stb_ps_enum (stb_ps *ps, void *data,
int (*func)(void *value, void*data) );
STB_EXTERN void ** stb_ps_fastlist(stb_ps *ps, int *count);
// result:
// returns a list, *count is the length of that list,
// but some entries of the list may be invalid;
// test with 'stb_ps_fastlist_valid(x)'
#define stb_ps_fastlist_valid(x) ((stb_uinta) (x) > 1)
#ifdef STB_DEFINE
enum
{
STB_ps_direct = 0,
STB_ps_bucket = 1,
STB_ps_array = 2,
STB_ps_hash = 3,
};
#define STB_BUCKET_SIZE 4
typedef struct
{
void *p[STB_BUCKET_SIZE];
} stb_ps_bucket;
#define GetBucket(p) ((stb_ps_bucket *) ((char *) (p) - STB_ps_bucket))
#define EncodeBucket(p) ((stb_ps *) ((char *) (p) + STB_ps_bucket))
static void stb_bucket_free(stb_ps_bucket *b)
{
free(b);
}
static stb_ps_bucket *stb_bucket_create2(void *v0, void *v1)
{
stb_ps_bucket *b = (stb_ps_bucket*) malloc(sizeof(*b));
b->p[0] = v0;
b->p[1] = v1;
b->p[2] = NULL;
b->p[3] = NULL;
return b;
}
static stb_ps_bucket * stb_bucket_create3(void **v)
{
stb_ps_bucket *b = (stb_ps_bucket*) malloc(sizeof(*b));
b->p[0] = v[0];
b->p[1] = v[1];
b->p[2] = v[2];
b->p[3] = NULL;
return b;
}
// could use stb_arr, but this will save us memory
typedef struct
{
int count;
void *p[1];
} stb_ps_array;
#define GetArray(p) ((stb_ps_array *) ((char *) (p) - STB_ps_array))
#define EncodeArray(p) ((stb_ps *) ((char *) (p) + STB_ps_array))
static int stb_ps_array_max = 13;
typedef struct
{
int size, mask;
int count, count_deletes;
int grow_threshhold;
int shrink_threshhold;
int rehash_threshhold;
int any_offset;
void *table[1];
} stb_ps_hash;
#define GetHash(p) ((stb_ps_hash *) ((char *) (p) - STB_ps_hash))
#define EncodeHash(p) ((stb_ps *) ((char *) (p) + STB_ps_hash))
#define stb_ps_empty(v) (((stb_uint32) v) <= 1)
static stb_ps_hash *stb_ps_makehash(int size, int old_size, void **old_data)
{
int i;
stb_ps_hash *h = (stb_ps_hash *) malloc(sizeof(*h) + (size-1) * sizeof(h->table[0]));
assert(stb_is_pow2(size));
h->size = size;
h->mask = size-1;
h->shrink_threshhold = (int) (0.3f * size);
h-> grow_threshhold = (int) (0.8f * size);
h->rehash_threshhold = (int) (0.9f * size);
h->count = 0;
h->count_deletes = 0;
h->any_offset = 0;
memset(h->table, 0, size * sizeof(h->table[0]));
for (i=0; i < old_size; ++i)
if (!stb_ps_empty((size_t)old_data[i]))
stb_ps_add(EncodeHash(h), old_data[i]);
return h;
}
void stb_ps_delete(stb_ps *ps)
{
switch (3 & (int)(size_t) ps) {
case STB_ps_direct: break;
case STB_ps_bucket: stb_bucket_free(GetBucket(ps)); break;
case STB_ps_array : free(GetArray(ps)); break;
case STB_ps_hash : free(GetHash(ps)); break;
}
}
stb_ps *stb_ps_copy(stb_ps *ps)
{
int i;
// not a switch: order based on expected performance/power-law distribution
switch (3 & (int)(size_t) ps) {
case STB_ps_direct: return ps;
case STB_ps_bucket: {
stb_ps_bucket *n = (stb_ps_bucket *) malloc(sizeof(*n));
*n = *GetBucket(ps);
return EncodeBucket(n);
}
case STB_ps_array: {
stb_ps_array *a = GetArray(ps);
stb_ps_array *n = (stb_ps_array *) malloc(sizeof(*n) + stb_ps_array_max * sizeof(n->p[0]));
n->count = a->count;
for (i=0; i < a->count; ++i)
n->p[i] = a->p[i];
return EncodeArray(n);
}
case STB_ps_hash: {
stb_ps_hash *h = GetHash(ps);
stb_ps_hash *n = stb_ps_makehash(h->size, h->size, h->table);
return EncodeHash(n);
}
}
assert(0); /* NOTREACHED */
return NULL;
}
int stb_ps_find(stb_ps *ps, void *value)
{
int i, code = 3 & (int)(size_t) ps;
assert((3 & (int)(size_t) value) == STB_ps_direct);
assert(stb_ps_fastlist_valid(value));
// not a switch: order based on expected performance/power-law distribution
if (code == STB_ps_direct)
return value == ps;
if (code == STB_ps_bucket) {
stb_ps_bucket *b = GetBucket(ps);
assert(STB_BUCKET_SIZE == 4);
if (b->p[0] == value || b->p[1] == value ||
b->p[2] == value || b->p[3] == value)
return STB_TRUE;
return STB_FALSE;
}
if (code == STB_ps_array) {
stb_ps_array *a = GetArray(ps);
for (i=0; i < a->count; ++i)
if (a->p[i] == value)
return STB_TRUE;
return STB_FALSE;
} else {
stb_ps_hash *h = GetHash(ps);
stb_uint32 hash = stb_hashptr(value);
stb_uint32 s, n = hash & h->mask;
void **t = h->table;
if (t[n] == value) return STB_TRUE;
if (t[n] == NULL) return STB_FALSE;
s = stb_rehash(hash) | 1;
do {
n = (n + s) & h->mask;
if (t[n] == value) return STB_TRUE;
} while (t[n] != NULL);
return STB_FALSE;
}
}
stb_ps * stb_ps_add (stb_ps *ps, void *value)
{
#ifdef STB_DEBUG
assert(!stb_ps_find(ps,value));
#endif
if (value == NULL) return ps; // ignore NULL adds to avoid bad breakage
assert((3 & (int)(size_t) value) == STB_ps_direct);
assert(stb_ps_fastlist_valid(value));
assert(value != STB_DEL); // STB_DEL is less likely
switch (3 & (int)(size_t) ps) {
case STB_ps_direct:
if (ps == NULL) return (stb_ps *) value;
return EncodeBucket(stb_bucket_create2(ps,value));
case STB_ps_bucket: {
stb_ps_bucket *b = GetBucket(ps);
stb_ps_array *a;
assert(STB_BUCKET_SIZE == 4);
if (b->p[0] == NULL) { b->p[0] = value; return ps; }
if (b->p[1] == NULL) { b->p[1] = value; return ps; }
if (b->p[2] == NULL) { b->p[2] = value; return ps; }
if (b->p[3] == NULL) { b->p[3] = value; return ps; }
a = (stb_ps_array *) malloc(sizeof(*a) + 7 * sizeof(a->p[0])); // 8 slots, must be 2^k
memcpy(a->p, b, sizeof(*b));
a->p[4] = value;
a->count = 5;
stb_bucket_free(b);
return EncodeArray(a);
}
case STB_ps_array: {
stb_ps_array *a = GetArray(ps);
if (a->count == stb_ps_array_max) {
// promote from array to hash
stb_ps_hash *h = stb_ps_makehash(2 << stb_log2_ceil(a->count), a->count, a->p);
free(a);
return stb_ps_add(EncodeHash(h), value);
}
// do we need to resize the array? the array doubles in size when it
// crosses a power-of-two
if ((a->count & (a->count-1))==0) {
int newsize = a->count*2;
// clamp newsize to max if:
// 1. it's larger than max
// 2. newsize*1.5 is larger than max (to avoid extra resizing)
if (newsize + a->count > stb_ps_array_max)
newsize = stb_ps_array_max;
a = (stb_ps_array *) realloc(a, sizeof(*a) + (newsize-1) * sizeof(a->p[0]));
}
a->p[a->count++] = value;
return EncodeArray(a);
}
case STB_ps_hash: {
stb_ps_hash *h = GetHash(ps);
stb_uint32 hash = stb_hashptr(value);
stb_uint32 n = hash & h->mask;
void **t = h->table;
// find first NULL or STB_DEL entry
if (!stb_ps_empty((size_t)t[n])) {
stb_uint32 s = stb_rehash(hash) | 1;
do {
n = (n + s) & h->mask;
} while (!stb_ps_empty((size_t)t[n]));
}
if (t[n] == STB_DEL)
-- h->count_deletes;
t[n] = value;
++ h->count;
if (h->count == h->grow_threshhold) {
stb_ps_hash *h2 = stb_ps_makehash(h->size*2, h->size, t);
free(h);
return EncodeHash(h2);
}
if (h->count + h->count_deletes == h->rehash_threshhold) {
stb_ps_hash *h2 = stb_ps_makehash(h->size, h->size, t);
free(h);
return EncodeHash(h2);
}
return ps;
}
}
return NULL; /* NOTREACHED */
}
stb_ps *stb_ps_remove(stb_ps *ps, void *value)
{
#ifdef STB_DEBUG
assert(stb_ps_find(ps, value));
#endif
assert((3 & (int)(size_t) value) == STB_ps_direct);
if (value == NULL) return ps; // ignore NULL removes to avoid bad breakage
switch (3 & (int)(size_t) ps) {
case STB_ps_direct:
return ps == value ? NULL : ps;
case STB_ps_bucket: {
stb_ps_bucket *b = GetBucket(ps);
int count=0;
assert(STB_BUCKET_SIZE == 4);
if (b->p[0] == value) b->p[0] = NULL; else count += (b->p[0] != NULL);
if (b->p[1] == value) b->p[1] = NULL; else count += (b->p[1] != NULL);
if (b->p[2] == value) b->p[2] = NULL; else count += (b->p[2] != NULL);
if (b->p[3] == value) b->p[3] = NULL; else count += (b->p[3] != NULL);
if (count == 1) { // shrink bucket at size 1
value = b->p[0];
if (value == NULL) value = b->p[1];
if (value == NULL) value = b->p[2];
if (value == NULL) value = b->p[3];
assert(value != NULL);
stb_bucket_free(b);
return (stb_ps *) value; // return STB_ps_direct of value
}
return ps;
}
case STB_ps_array: {
stb_ps_array *a = GetArray(ps);
int i;
for (i=0; i < a->count; ++i) {
if (a->p[i] == value) {
a->p[i] = a->p[--a->count];
if (a->count == 3) { // shrink to bucket!
stb_ps_bucket *b = stb_bucket_create3(a->p);
free(a);
return EncodeBucket(b);
}
return ps;
}
}
return ps;
}
case STB_ps_hash: {
stb_ps_hash *h = GetHash(ps);
stb_uint32 hash = stb_hashptr(value);
stb_uint32 s, n = hash & h->mask;
void **t = h->table;
if (t[n] != value) {
s = stb_rehash(hash) | 1;
do {
n = (n + s) & h->mask;
} while (t[n] != value);
}
t[n] = STB_DEL;
-- h->count;
++ h->count_deletes;
// should we shrink down to an array?
if (h->count < stb_ps_array_max) {
int n = 1 << stb_log2_floor(stb_ps_array_max);
if (h->count < n) {
stb_ps_array *a = (stb_ps_array *) malloc(sizeof(*a) + (n-1) * sizeof(a->p[0]));
int i,j=0;
for (i=0; i < h->size; ++i)
if (!stb_ps_empty((size_t)t[i]))
a->p[j++] = t[i];
assert(j == h->count);
a->count = j;
free(h);
return EncodeArray(a);
}
}
if (h->count == h->shrink_threshhold) {
stb_ps_hash *h2 = stb_ps_makehash(h->size >> 1, h->size, t);
free(h);
return EncodeHash(h2);
}
return ps;
}
}
return ps; /* NOTREACHED */
}
stb_ps *stb_ps_remove_any(stb_ps *ps, void **value)
{
assert(ps != NULL);
switch (3 & (int)(size_t) ps) {
case STB_ps_direct:
*value = ps;
return NULL;
case STB_ps_bucket: {
stb_ps_bucket *b = GetBucket(ps);
int count=0, slast=0, last=0;
assert(STB_BUCKET_SIZE == 4);
if (b->p[0]) { ++count; last = 0; }
if (b->p[1]) { ++count; slast = last; last = 1; }
if (b->p[2]) { ++count; slast = last; last = 2; }
if (b->p[3]) { ++count; slast = last; last = 3; }
*value = b->p[last];
b->p[last] = 0;
if (count == 2) {
void *leftover = b->p[slast]; // second to last
stb_bucket_free(b);
return (stb_ps *) leftover;
}
return ps;
}
case STB_ps_array: {
stb_ps_array *a = GetArray(ps);
*value = a->p[a->count-1];
if (a->count == 4)
return stb_ps_remove(ps, *value);
--a->count;
return ps;
}
case STB_ps_hash: {
stb_ps_hash *h = GetHash(ps);
void **t = h->table;
stb_uint32 n = h->any_offset;
while (stb_ps_empty((size_t)t[n]))
n = (n + 1) & h->mask;
*value = t[n];
h->any_offset = (n+1) & h->mask;
// check if we need to skip down to the previous type
if (h->count-1 < stb_ps_array_max || h->count-1 == h->shrink_threshhold)
return stb_ps_remove(ps, *value);
t[n] = STB_DEL;
-- h->count;
++ h->count_deletes;
return ps;
}
}
return ps; /* NOTREACHED */
}
void ** stb_ps_getlist(stb_ps *ps, int *count)
{
int i,n=0;
void **p = NULL;
switch (3 & (int)(size_t) ps) {
case STB_ps_direct:
if (ps == NULL) { *count = 0; return NULL; }
p = (void **) malloc(sizeof(*p) * 1);
p[0] = ps;
*count = 1;
return p;
case STB_ps_bucket: {
stb_ps_bucket *b = GetBucket(ps);
p = (void **) malloc(sizeof(*p) * STB_BUCKET_SIZE);
for (i=0; i < STB_BUCKET_SIZE; ++i)
if (b->p[i] != NULL)
p[n++] = b->p[i];
break;
}
case STB_ps_array: {
stb_ps_array *a = GetArray(ps);
p = (void **) malloc(sizeof(*p) * a->count);
memcpy(p, a->p, sizeof(*p) * a->count);
*count = a->count;
return p;
}
case STB_ps_hash: {
stb_ps_hash *h = GetHash(ps);
p = (void **) malloc(sizeof(*p) * h->count);
for (i=0; i < h->size; ++i)
if (!stb_ps_empty((size_t)h->table[i]))
p[n++] = h->table[i];
break;
}
}
*count = n;
return p;
}
int stb_ps_writelist(stb_ps *ps, void **list, int size )
{
int i,n=0;
switch (3 & (int)(size_t) ps) {
case STB_ps_direct:
if (ps == NULL || size <= 0) return 0;
list[0] = ps;
return 1;
case STB_ps_bucket: {
stb_ps_bucket *b = GetBucket(ps);
for (i=0; i < STB_BUCKET_SIZE; ++i)
if (b->p[i] != NULL && n < size)
list[n++] = b->p[i];
return n;
}
case STB_ps_array: {
stb_ps_array *a = GetArray(ps);
n = stb_min(size, a->count);
memcpy(list, a->p, sizeof(*list) * n);
return n;
}
case STB_ps_hash: {
stb_ps_hash *h = GetHash(ps);
if (size <= 0) return 0;
for (i=0; i < h->count; ++i) {
if (!stb_ps_empty((size_t)h->table[i])) {
list[n++] = h->table[i];
if (n == size) break;
}
}
return n;
}
}
return 0; /* NOTREACHED */
}
int stb_ps_enum(stb_ps *ps, void *data, int (*func)(void *value, void *data))
{
int i;
switch (3 & (int)(size_t) ps) {
case STB_ps_direct:
if (ps == NULL) return STB_TRUE;
return func(ps, data);
case STB_ps_bucket: {
stb_ps_bucket *b = GetBucket(ps);
for (i=0; i < STB_BUCKET_SIZE; ++i)
if (b->p[i] != NULL)
if (!func(b->p[i], data))
return STB_FALSE;
return STB_TRUE;
}
case STB_ps_array: {
stb_ps_array *a = GetArray(ps);
for (i=0; i < a->count; ++i)
if (!func(a->p[i], data))
return STB_FALSE;
return STB_TRUE;
}
case STB_ps_hash: {
stb_ps_hash *h = GetHash(ps);
for (i=0; i < h->count; ++i)
if (!stb_ps_empty((size_t)h->table[i]))
if (!func(h->table[i], data))
return STB_FALSE;
return STB_TRUE;
}
}
return STB_TRUE; /* NOTREACHED */
}
int stb_ps_count (stb_ps *ps)
{
switch (3 & (int)(size_t) ps) {
case STB_ps_direct:
return ps != NULL;
case STB_ps_bucket: {
stb_ps_bucket *b = GetBucket(ps);
return (b->p[0] != NULL) + (b->p[1] != NULL) +
(b->p[2] != NULL) + (b->p[3] != NULL);
}
case STB_ps_array: {
stb_ps_array *a = GetArray(ps);
return a->count;
}
case STB_ps_hash: {
stb_ps_hash *h = GetHash(ps);
return h->count;
}
}
return 0;
}
void ** stb_ps_fastlist(stb_ps *ps, int *count)
{
static void *storage;
switch (3 & (int)(size_t) ps) {
case STB_ps_direct:
if (ps == NULL) { *count = 0; return NULL; }
storage = ps;
*count = 1;
return &storage;
case STB_ps_bucket: {
stb_ps_bucket *b = GetBucket(ps);
*count = STB_BUCKET_SIZE;
return b->p;
}
case STB_ps_array: {
stb_ps_array *a = GetArray(ps);
*count = a->count;
return a->p;
}
case STB_ps_hash: {
stb_ps_hash *h = GetHash(ps);
*count = h->size;
return h->table;
}
}
return NULL; /* NOTREACHED */
}
int stb_ps_subset(stb_ps *bigger, stb_ps *smaller)
{
int i, listlen;
void **list = stb_ps_fastlist(smaller, &listlen);
for(i=0; i < listlen; ++i)
if (stb_ps_fastlist_valid(list[i]))
if (!stb_ps_find(bigger, list[i]))
return 0;
return 1;
}
int stb_ps_eq(stb_ps *p0, stb_ps *p1)
{
if (stb_ps_count(p0) != stb_ps_count(p1))
return 0;
return stb_ps_subset(p0, p1);
}
#undef GetBucket
#undef GetArray
#undef GetHash
#undef EncodeBucket
#undef EncodeArray
#undef EncodeHash
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Random Numbers via Meresenne Twister or LCG
//
STB_EXTERN unsigned int stb_srandLCG(unsigned int seed);
STB_EXTERN unsigned int stb_randLCG(void);
STB_EXTERN double stb_frandLCG(void);
STB_EXTERN void stb_srand(unsigned int seed);
STB_EXTERN unsigned int stb_rand(void);
STB_EXTERN double stb_frand(void);
STB_EXTERN void stb_shuffle(void *p, size_t n, size_t sz,
unsigned int seed);
STB_EXTERN void stb_reverse(void *p, size_t n, size_t sz);
STB_EXTERN unsigned int stb_randLCG_explicit(unsigned int seed);
#define stb_rand_define(x,y) \
\
unsigned int x(void) \
{ \
static unsigned int stb__rand = y; \
stb__rand = stb__rand * 2147001325 + 715136305; /* BCPL */ \
return 0x31415926 ^ ((stb__rand >> 16) + (stb__rand << 16)); \
}
#ifdef STB_DEFINE
unsigned int stb_randLCG_explicit(unsigned int seed)
{
return seed * 2147001325 + 715136305;
}
static unsigned int stb__rand_seed=0;
unsigned int stb_srandLCG(unsigned int seed)
{
unsigned int previous = stb__rand_seed;
stb__rand_seed = seed;
return previous;
}
unsigned int stb_randLCG(void)
{
stb__rand_seed = stb__rand_seed * 2147001325 + 715136305; // BCPL generator
// shuffle non-random bits to the middle, and xor to decorrelate with seed
return 0x31415926 ^ ((stb__rand_seed >> 16) + (stb__rand_seed << 16));
}
double stb_frandLCG(void)
{
return stb_randLCG() / ((double) (1 << 16) * (1 << 16));
}
void stb_shuffle(void *p, size_t n, size_t sz, unsigned int seed)
{
char *a;
unsigned int old_seed;
int i;
if (seed)
old_seed = stb_srandLCG(seed);
a = (char *) p + (n-1) * sz;
for (i=(int) n; i > 1; --i) {
int j = stb_randLCG() % i;
stb_swap(a, (char *) p + j * sz, sz);
a -= sz;
}
if (seed)
stb_srandLCG(old_seed);
}
void stb_reverse(void *p, size_t n, size_t sz)
{
size_t i,j = n-1;
for (i=0; i < j; ++i,--j) {
stb_swap((char *) p + i * sz, (char *) p + j * sz, sz);
}
}
// public domain Mersenne Twister by Michael Brundage
#define STB__MT_LEN 624
int stb__mt_index = STB__MT_LEN*sizeof(int)+1;
unsigned int stb__mt_buffer[STB__MT_LEN];
void stb_srand(unsigned int seed)
{
int i;
unsigned int old = stb_srandLCG(seed);
for (i = 0; i < STB__MT_LEN; i++)
stb__mt_buffer[i] = stb_randLCG();
stb_srandLCG(old);
stb__mt_index = STB__MT_LEN*sizeof(unsigned int);
}
#define STB__MT_IA 397
#define STB__MT_IB (STB__MT_LEN - STB__MT_IA)
#define STB__UPPER_MASK 0x80000000
#define STB__LOWER_MASK 0x7FFFFFFF
#define STB__MATRIX_A 0x9908B0DF
#define STB__TWIST(b,i,j) ((b)[i] & STB__UPPER_MASK) | ((b)[j] & STB__LOWER_MASK)
#define STB__MAGIC(s) (((s)&1)*STB__MATRIX_A)
unsigned int stb_rand()
{
unsigned int * b = stb__mt_buffer;
int idx = stb__mt_index;
unsigned int s,r;
int i;
if (idx >= STB__MT_LEN*sizeof(unsigned int)) {
if (idx > STB__MT_LEN*sizeof(unsigned int))
stb_srand(0);
idx = 0;
i = 0;
for (; i < STB__MT_IB; i++) {
s = STB__TWIST(b, i, i+1);
b[i] = b[i + STB__MT_IA] ^ (s >> 1) ^ STB__MAGIC(s);
}
for (; i < STB__MT_LEN-1; i++) {
s = STB__TWIST(b, i, i+1);
b[i] = b[i - STB__MT_IB] ^ (s >> 1) ^ STB__MAGIC(s);
}
s = STB__TWIST(b, STB__MT_LEN-1, 0);
b[STB__MT_LEN-1] = b[STB__MT_IA-1] ^ (s >> 1) ^ STB__MAGIC(s);
}
stb__mt_index = idx + sizeof(unsigned int);
r = *(unsigned int *)((unsigned char *)b + idx);
r ^= (r >> 11);
r ^= (r << 7) & 0x9D2C5680;
r ^= (r << 15) & 0xEFC60000;
r ^= (r >> 18);
return r;
}
double stb_frand(void)
{
return stb_rand() / ((double) (1 << 16) * (1 << 16));
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// stb_dupe
//
// stb_dupe is a duplicate-finding system for very, very large data
// structures--large enough that sorting is too slow, but not so large
// that we can't keep all the data in memory. using it works as follows:
//
// 1. create an stb_dupe:
// provide a hash function
// provide an equality function
// provide an estimate for the size
// optionally provide a comparison function
//
// 2. traverse your data, 'adding' pointers to the stb_dupe
//
// 3. finish and ask for duplicates
//
// the stb_dupe will discard its intermediate data and build
// a collection of sorted lists of duplicates, with non-duplicate
// entries omitted entirely
//
//
// Implementation strategy:
//
// while collecting the N items, we keep a hash table of approximate
// size sqrt(N). (if you tell use the N up front, the hash table is
// just that size exactly)
//
// each entry in the hash table is just an stb__arr of pointers (no need
// to use stb_ps, because we don't need to delete from these)
//
// for step 3, for each entry in the hash table, we apply stb_dupe to it
// recursively. once the size gets small enough (or doesn't decrease
// significantly), we switch to either using qsort() on the comparison
// function, or else we just do the icky N^2 gather
typedef struct stb_dupe stb_dupe;
typedef int (*stb_compare_func)(void *a, void *b);
typedef int (*stb_hash_func)(void *a, unsigned int seed);
STB_EXTERN void stb_dupe_free(stb_dupe *sd);
STB_EXTERN stb_dupe *stb_dupe_create(stb_hash_func hash,
stb_compare_func eq, int size, stb_compare_func ineq);
STB_EXTERN void stb_dupe_add(stb_dupe *sd, void *item);
STB_EXTERN void stb_dupe_finish(stb_dupe *sd);
STB_EXTERN int stb_dupe_numsets(stb_dupe *sd);
STB_EXTERN void **stb_dupe_set(stb_dupe *sd, int num);
STB_EXTERN int stb_dupe_set_count(stb_dupe *sd, int num);
struct stb_dupe
{
void ***hash_table;
int hash_size;
int size_log2;
int population;
int hash_shift;
stb_hash_func hash;
stb_compare_func eq;
stb_compare_func ineq;
void ***dupes;
};
#ifdef STB_DEFINE
int stb_dupe_numsets(stb_dupe *sd)
{
assert(sd->hash_table == NULL);
return stb_arr_len(sd->dupes);
}
void **stb_dupe_set(stb_dupe *sd, int num)
{
assert(sd->hash_table == NULL);
return sd->dupes[num];
}
int stb_dupe_set_count(stb_dupe *sd, int num)
{
assert(sd->hash_table == NULL);
return stb_arr_len(sd->dupes[num]);
}
stb_dupe *stb_dupe_create(stb_hash_func hash, stb_compare_func eq, int size,
stb_compare_func ineq)
{
int i, hsize;
stb_dupe *sd = (stb_dupe *) malloc(sizeof(*sd));
sd->size_log2 = 4;
hsize = 1 << sd->size_log2;
while (hsize * hsize < size) {
++sd->size_log2;
hsize *= 2;
}
sd->hash = hash;
sd->eq = eq;
sd->ineq = ineq;
sd->hash_shift = 0;
sd->population = 0;
sd->hash_size = hsize;
sd->hash_table = (void ***) malloc(sizeof(*sd->hash_table) * hsize);
for (i=0; i < hsize; ++i)
sd->hash_table[i] = NULL;
sd->dupes = NULL;
return sd;
}
void stb_dupe_add(stb_dupe *sd, void *item)
{
stb_uint32 hash = sd->hash(item, sd->hash_shift);
int z = hash & (sd->hash_size-1);
stb_arr_push(sd->hash_table[z], item);
++sd->population;
}
void stb_dupe_free(stb_dupe *sd)
{
int i;
for (i=0; i < stb_arr_len(sd->dupes); ++i)
if (sd->dupes[i])
stb_arr_free(sd->dupes[i]);
stb_arr_free(sd->dupes);
free(sd);
}
static stb_compare_func stb__compare;
static int stb__dupe_compare(const void *a, const void *b)
{
void *p = *(void **) a;
void *q = *(void **) b;
return stb__compare(p,q);
}
void stb_dupe_finish(stb_dupe *sd)
{
int i,j,k;
assert(sd->dupes == NULL);
for (i=0; i < sd->hash_size; ++i) {
void ** list = sd->hash_table[i];
if (list != NULL) {
int n = stb_arr_len(list);
// @TODO: measure to find good numbers instead of just making them up!
int thresh = (sd->ineq ? 200 : 20);
// if n is large enough to be worth it, and n is smaller than
// before (so we can guarantee we'll use a smaller hash table);
// and there are enough hash bits left, assuming full 32-bit hash
if (n > thresh && n < (sd->population >> 3) && sd->hash_shift + sd->size_log2*2 < 32) {
// recursively process this row using stb_dupe, O(N log log N)
stb_dupe *d = stb_dupe_create(sd->hash, sd->eq, n, sd->ineq);
d->hash_shift = stb_randLCG_explicit(sd->hash_shift);
for (j=0; j < n; ++j)
stb_dupe_add(d, list[j]);
stb_arr_free(sd->hash_table[i]);
stb_dupe_finish(d);
for (j=0; j < stb_arr_len(d->dupes); ++j) {
stb_arr_push(sd->dupes, d->dupes[j]);
d->dupes[j] = NULL; // take over ownership
}
stb_dupe_free(d);
} else if (sd->ineq) {
// process this row using qsort(), O(N log N)
stb__compare = sd->ineq;
qsort(list, n, sizeof(list[0]), stb__dupe_compare);
// find equal subsequences of the list
for (j=0; j < n-1; ) {
// find a subsequence from j..k
for (k=j; k < n; ++k)
// only use ineq so eq can be left undefined
if (sd->ineq(list[j], list[k]))
break;
// k is the first one not in the subsequence
if (k-j > 1) {
void **mylist = NULL;
stb_arr_setlen(mylist, k-j);
memcpy(mylist, list+j, sizeof(list[j]) * (k-j));
stb_arr_push(sd->dupes, mylist);
}
j = k;
}
stb_arr_free(sd->hash_table[i]);
} else {
// process this row using eq(), O(N^2)
for (j=0; j < n; ++j) {
if (list[j] != NULL) {
void **output = NULL;
for (k=j+1; k < n; ++k) {
if (sd->eq(list[j], list[k])) {
if (output == NULL)
stb_arr_push(output, list[j]);
stb_arr_push(output, list[k]);
list[k] = NULL;
}
}
list[j] = NULL;
if (output)
stb_arr_push(sd->dupes, output);
}
}
stb_arr_free(sd->hash_table[i]);
}
}
}
free(sd->hash_table);
sd->hash_table = NULL;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// templatized Sort routine
//
// This is an attempt to implement a templated sorting algorithm.
// To use it, you have to explicitly instantiate it as a _function_,
// then you call that function. This allows the comparison to be inlined,
// giving the sort similar performance to C++ sorts.
//
// It implements quicksort with three-way-median partitioning (generally
// well-behaved), with a final insertion sort pass.
//
// When you define the compare expression, you should assume you have
// elements of your array pointed to by 'a' and 'b', and perform the comparison
// on those. OR you can use one or more statements; first say '0;', then
// write whatever code you want, and compute the result into a variable 'c'.
#define stb_declare_sort(FUNCNAME, TYPE) \
void FUNCNAME(TYPE *p, int n)
#define stb_define_sort(FUNCNAME,TYPE,COMPARE) \
stb__define_sort( void, FUNCNAME,TYPE,COMPARE)
#define stb_define_sort_static(FUNCNAME,TYPE,COMPARE) \
stb__define_sort(static void, FUNCNAME,TYPE,COMPARE)
#define stb__define_sort(MODE, FUNCNAME, TYPE, COMPARE) \
\
static void STB_(FUNCNAME,_ins_sort)(TYPE *p, int n) \
{ \
int i,j; \
for (i=1; i < n; ++i) { \
TYPE t = p[i], *a = &t; \
j = i; \
while (j > 0) { \
TYPE *b = &p[j-1]; \
int c = COMPARE; \
if (!c) break; \
p[j] = p[j-1]; \
--j; \
} \
if (i != j) \
p[j] = t; \
} \
} \
\
static void STB_(FUNCNAME,_quicksort)(TYPE *p, int n) \
{ \
/* threshold for transitioning to insertion sort */ \
while (n > 12) { \
TYPE *a,*b,t; \
int c01,c12,c,m,i,j; \
\
/* compute median of three */ \
m = n >> 1; \
a = &p[0]; \
b = &p[m]; \
c = COMPARE; \
c01 = c; \
a = &p[m]; \
b = &p[n-1]; \
c = COMPARE; \
c12 = c; \
/* if 0 >= mid >= end, or 0 < mid < end, then use mid */ \
if (c01 != c12) { \
/* otherwise, we'll need to swap something else to middle */ \
int z; \
a = &p[0]; \
b = &p[n-1]; \
c = COMPARE; \
/* 0>mid && mid<n: 0>n => n; 0<n => 0 */ \
/* 0<mid && mid>n: 0>n => 0; 0<n => n */ \
z = (c == c12) ? 0 : n-1; \
t = p[z]; \
p[z] = p[m]; \
p[m] = t; \
} \
/* now p[m] is the median-of-three */ \
/* swap it to the beginning so it won't move around */ \
t = p[0]; \
p[0] = p[m]; \
p[m] = t; \
\
/* partition loop */ \
i=1; \
j=n-1; \
for(;;) { \
/* handling of equality is crucial here */ \
/* for sentinels & efficiency with duplicates */ \
b = &p[0]; \
for (;;++i) { \
a=&p[i]; \
c = COMPARE; \
if (!c) break; \
} \
a = &p[0]; \
for (;;--j) { \
b=&p[j]; \
c = COMPARE; \
if (!c) break; \
} \
/* make sure we haven't crossed */ \
if (i >= j) break; \
t = p[i]; \
p[i] = p[j]; \
p[j] = t; \
\
++i; \
--j; \
} \
/* recurse on smaller side, iterate on larger */ \
if (j < (n-i)) { \
STB_(FUNCNAME,_quicksort)(p,j); \
p = p+i; \
n = n-i; \
} else { \
STB_(FUNCNAME,_quicksort)(p+i, n-i); \
n = j; \
} \
} \
} \
\
MODE FUNCNAME(TYPE *p, int n) \
{ \
STB_(FUNCNAME, _quicksort)(p, n); \
STB_(FUNCNAME, _ins_sort)(p, n); \
} \
//////////////////////////////////////////////////////////////////////////////
//
// stb_bitset an array of booleans indexed by integers
//
typedef stb_uint32 stb_bitset;
STB_EXTERN stb_bitset *stb_bitset_new(int value, int len);
#define stb_bitset_clearall(arr,len) (memset(arr, 0, 4 * (len)))
#define stb_bitset_setall(arr,len) (memset(arr, 255, 4 * (len)))
#define stb_bitset_setbit(arr,n) ((arr)[(n) >> 5] |= (1 << (n & 31)))
#define stb_bitset_clearbit(arr,n) ((arr)[(n) >> 5] &= ~(1 << (n & 31)))
#define stb_bitset_testbit(arr,n) ((arr)[(n) >> 5] & (1 << (n & 31)))
STB_EXTERN stb_bitset *stb_bitset_union(stb_bitset *p0, stb_bitset *p1, int len);
STB_EXTERN int *stb_bitset_getlist(stb_bitset *out, int start, int end);
STB_EXTERN int stb_bitset_eq(stb_bitset *p0, stb_bitset *p1, int len);
STB_EXTERN int stb_bitset_disjoint(stb_bitset *p0, stb_bitset *p1, int len);
STB_EXTERN int stb_bitset_disjoint_0(stb_bitset *p0, stb_bitset *p1, int len);
STB_EXTERN int stb_bitset_subset(stb_bitset *bigger, stb_bitset *smaller, int len);
STB_EXTERN int stb_bitset_unioneq_changed(stb_bitset *p0, stb_bitset *p1, int len);
#ifdef STB_DEFINE
int stb_bitset_eq(stb_bitset *p0, stb_bitset *p1, int len)
{
int i;
for (i=0; i < len; ++i)
if (p0[i] != p1[i]) return 0;
return 1;
}
int stb_bitset_disjoint(stb_bitset *p0, stb_bitset *p1, int len)
{
int i;
for (i=0; i < len; ++i)
if (p0[i] & p1[i]) return 0;
return 1;
}
int stb_bitset_disjoint_0(stb_bitset *p0, stb_bitset *p1, int len)
{
int i;
for (i=0; i < len; ++i)
if ((p0[i] | p1[i]) != 0xffffffff) return 0;
return 1;
}
int stb_bitset_subset(stb_bitset *bigger, stb_bitset *smaller, int len)
{
int i;
for (i=0; i < len; ++i)
if ((bigger[i] & smaller[i]) != smaller[i]) return 0;
return 1;
}
stb_bitset *stb_bitset_union(stb_bitset *p0, stb_bitset *p1, int len)
{
int i;
stb_bitset *d = (stb_bitset *) malloc(sizeof(*d) * len);
for (i=0; i < len; ++i) d[i] = p0[i] | p1[i];
return d;
}
int stb_bitset_unioneq_changed(stb_bitset *p0, stb_bitset *p1, int len)
{
int i, changed=0;
for (i=0; i < len; ++i) {
stb_bitset d = p0[i] | p1[i];
if (d != p0[i]) {
p0[i] = d;
changed = 1;
}
}
return changed;
}
stb_bitset *stb_bitset_new(int value, int len)
{
int i;
stb_bitset *d = (stb_bitset *) malloc(sizeof(*d) * len);
if (value) value = 0xffffffff;
for (i=0; i < len; ++i) d[i] = value;
return d;
}
int *stb_bitset_getlist(stb_bitset *out, int start, int end)
{
int *list = NULL;
int i;
for (i=start; i < end; ++i)
if (stb_bitset_testbit(out, i))
stb_arr_push(list, i);
return list;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// stb_wordwrap quality word-wrapping for fixed-width fonts
//
STB_EXTERN int stb_wordwrap(int *pairs, int pair_max, int count, char *str);
STB_EXTERN int *stb_wordwrapalloc(int count, char *str);
#ifdef STB_DEFINE
int stb_wordwrap(int *pairs, int pair_max, int count, char *str)
{
int n=0,i=0, start=0,nonwhite=0;
if (pairs == NULL) pair_max = 0x7ffffff0;
else pair_max *= 2;
// parse
for(;;) {
int s=i; // first whitespace char; last nonwhite+1
int w; // word start
// accept whitespace
while (isspace(str[i])) {
if (str[i] == '\n' || str[i] == '\r') {
if (str[i] + str[i+1] == '\n' + '\r') ++i;
if (n >= pair_max) return -1;
if (pairs) pairs[n] = start, pairs[n+1] = s-start;
n += 2;
nonwhite=0;
start = i+1;
s = start;
}
++i;
}
if (i >= start+count) {
// we've gone off the end using whitespace
if (nonwhite) {
if (n >= pair_max) return -1;
if (pairs) pairs[n] = start, pairs[n+1] = s-start;
n += 2;
start = s = i;
nonwhite=0;
} else {
// output all the whitespace
while (i >= start+count) {
if (n >= pair_max) return -1;
if (pairs) pairs[n] = start, pairs[n+1] = count;
n += 2;
start += count;
}
s = start;
}
}
if (str[i] == 0) break;
// now scan out a word and see if it fits
w = i;
while (str[i] && !isspace(str[i])) {
++i;
}
// wrapped?
if (i > start + count) {
// huge?
if (i-s <= count) {
if (n >= pair_max) return -1;
if (pairs) pairs[n] = start, pairs[n+1] = s-start;
n += 2;
start = w;
} else {
// This word is longer than one line. If we wrap it onto N lines
// there are leftover chars. do those chars fit on the cur line?
// But if we have leading whitespace, we force it to start here.
if ((w-start) + ((i-w) % count) <= count || !nonwhite) {
// output a full line
if (n >= pair_max) return -1;
if (pairs) pairs[n] = start, pairs[n+1] = count;
n += 2;
start += count;
w = start;
} else {
// output a partial line, trimming trailing whitespace
if (s != start) {
if (n >= pair_max) return -1;
if (pairs) pairs[n] = start, pairs[n+1] = s-start;
n += 2;
start = w;
}
}
// now output full lines as needed
while (start + count <= i) {
if (n >= pair_max) return -1;
if (pairs) pairs[n] = start, pairs[n+1] = count;
n += 2;
start += count;
}
}
}
nonwhite=1;
}
if (start < i) {
if (n >= pair_max) return -1;
if (pairs) pairs[n] = start, pairs[n+1] = i-start;
n += 2;
}
return n>>1;
}
int *stb_wordwrapalloc(int count, char *str)
{
int n = stb_wordwrap(NULL,0,count,str);
int *z = NULL;
stb_arr_setlen(z, n*2);
stb_wordwrap(z, n, count, str);
return z;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// stb_match: wildcards and regexping
//
STB_EXTERN int stb_wildmatch (char *expr, char *candidate);
STB_EXTERN int stb_wildmatchi(char *expr, char *candidate);
STB_EXTERN int stb_wildfind (char *expr, char *candidate);
STB_EXTERN int stb_wildfindi (char *expr, char *candidate);
STB_EXTERN int stb_regex(char *regex, char *candidate);
typedef struct stb_matcher stb_matcher;
STB_EXTERN stb_matcher *stb_regex_matcher(char *regex);
STB_EXTERN int stb_matcher_match(stb_matcher *m, char *str);
STB_EXTERN int stb_matcher_find(stb_matcher *m, char *str);
STB_EXTERN void stb_matcher_free(stb_matcher *f);
STB_EXTERN stb_matcher *stb_lex_matcher(void);
STB_EXTERN int stb_lex_item(stb_matcher *m, const char *str, int result);
STB_EXTERN int stb_lex_item_wild(stb_matcher *matcher, const char *regex, int result);
STB_EXTERN int stb_lex(stb_matcher *m, char *str, int *len);
#ifdef STB_DEFINE
static int stb__match_qstring(char *candidate, char *qstring, int qlen, int insensitive)
{
int i;
if (insensitive) {
for (i=0; i < qlen; ++i)
if (qstring[i] == '?') {
if (!candidate[i]) return 0;
} else
if (tolower(qstring[i]) != tolower(candidate[i]))
return 0;
} else {
for (i=0; i < qlen; ++i)
if (qstring[i] == '?') {
if (!candidate[i]) return 0;
} else
if (qstring[i] != candidate[i])
return 0;
}
return 1;
}
static int stb__find_qstring(char *candidate, char *qstring, int qlen, int insensitive)
{
char c;
int offset=0;
while (*qstring == '?') {
++qstring;
--qlen;
++candidate;
if (qlen == 0) return 0;
if (*candidate == 0) return -1;
}
c = *qstring++;
--qlen;
if (insensitive) c = tolower(c);
while (candidate[offset]) {
if (c == (insensitive ? tolower(candidate[offset]) : candidate[offset]))
if (stb__match_qstring(candidate+offset+1, qstring, qlen, insensitive))
return offset;
++offset;
}
return -1;
}
int stb__wildmatch_raw2(char *expr, char *candidate, int search, int insensitive)
{
int where=0;
int start = -1;
if (!search) {
// parse to first '*'
if (*expr != '*')
start = 0;
while (*expr != '*') {
if (!*expr)
return *candidate == 0 ? 0 : -1;
if (*expr == '?') {
if (!*candidate) return -1;
} else {
if (insensitive) {
if (tolower(*candidate) != tolower(*expr))
return -1;
} else
if (*candidate != *expr)
return -1;
}
++candidate, ++expr, ++where;
}
} else {
// 0-length search string
if (!*expr)
return 0;
}
assert(search || *expr == '*');
if (!search)
++expr;
// implicit '*' at this point
while (*expr) {
int o=0;
// combine redundant * characters
while (expr[0] == '*') ++expr;
// ok, at this point, expr[-1] == '*',
// and expr[0] != '*'
if (!expr[0]) return start >= 0 ? start : 0;
// now find next '*'
o = 0;
while (expr[o] != '*') {
if (expr[o] == 0)
break;
++o;
}
// if no '*', scan to end, then match at end
if (expr[o] == 0 && !search) {
int z;
for (z=0; z < o; ++z)
if (candidate[z] == 0)
return -1;
while (candidate[z])
++z;
// ok, now check if they match
if (stb__match_qstring(candidate+z-o, expr, o, insensitive))
return start >= 0 ? start : 0;
return -1;
} else {
// if yes '*', then do stb__find_qmatch on the intervening chars
int n = stb__find_qstring(candidate, expr, o, insensitive);
if (n < 0)
return -1;
if (start < 0)
start = where + n;
expr += o;
candidate += n+o;
}
if (*expr == 0) {
assert(search);
return start;
}
assert(*expr == '*');
++expr;
}
return start >= 0 ? start : 0;
}
int stb__wildmatch_raw(char *expr, char *candidate, int search, int insensitive)
{
char buffer[256];
// handle multiple search strings
char *s = strchr(expr, ';');
char *last = expr;
while (s) {
int z;
// need to allow for non-writeable strings... assume they're small
if (s - last < 256) {
stb_strncpy(buffer, last, (int) (s-last+1));
z = stb__wildmatch_raw2(buffer, candidate, search, insensitive);
} else {
*s = 0;
z = stb__wildmatch_raw2(last, candidate, search, insensitive);
*s = ';';
}
if (z >= 0) return z;
last = s+1;
s = strchr(last, ';');
}
return stb__wildmatch_raw2(last, candidate, search, insensitive);
}
int stb_wildmatch(char *expr, char *candidate)
{
return stb__wildmatch_raw(expr, candidate, 0,0) >= 0;
}
int stb_wildmatchi(char *expr, char *candidate)
{
return stb__wildmatch_raw(expr, candidate, 0,1) >= 0;
}
int stb_wildfind(char *expr, char *candidate)
{
return stb__wildmatch_raw(expr, candidate, 1,0);
}
int stb_wildfindi(char *expr, char *candidate)
{
return stb__wildmatch_raw(expr, candidate, 1,1);
}
typedef struct
{
stb_int16 transition[256];
} stb_dfa;
// an NFA node represents a state you're in; it then has
// an arbitrary number of edges dangling off of it
// note this isn't utf8-y
typedef struct
{
stb_int16 match; // character/set to match
stb_uint16 node; // output node to go to
} stb_nfa_edge;
typedef struct
{
stb_int16 goal; // does reaching this win the prize?
stb_uint8 active; // is this in the active list
stb_nfa_edge *out;
stb_uint16 *eps; // list of epsilon closures
} stb_nfa_node;
#define STB__DFA_UNDEF -1
#define STB__DFA_GOAL -2
#define STB__DFA_END -3
#define STB__DFA_MGOAL -4
#define STB__DFA_VALID 0
#define STB__NFA_STOP_GOAL -1
// compiled regexp
struct stb_matcher
{
stb_uint16 start_node;
stb_int16 dfa_start;
stb_uint32 *charset;
int num_charset;
int match_start;
stb_nfa_node *nodes;
int does_lex;
// dfa matcher
stb_dfa * dfa;
stb_uint32 * dfa_mapping;
stb_int16 * dfa_result;
int num_words_per_dfa;
};
static int stb__add_node(stb_matcher *matcher)
{
stb_nfa_node z;
z.active = 0;
z.eps = 0;
z.goal = 0;
z.out = 0;
stb_arr_push(matcher->nodes, z);
return stb_arr_len(matcher->nodes)-1;
}
static void stb__add_epsilon(stb_matcher *matcher, int from, int to)
{
assert(from != to);
if (matcher->nodes[from].eps == NULL)
stb_arr_malloc((void **) &matcher->nodes[from].eps, matcher);
stb_arr_push(matcher->nodes[from].eps, to);
}
static void stb__add_edge(stb_matcher *matcher, int from, int to, int type)
{
stb_nfa_edge z = { (stb_int16)type, (stb_uint16)to };
if (matcher->nodes[from].out == NULL)
stb_arr_malloc((void **) &matcher->nodes[from].out, matcher);
stb_arr_push(matcher->nodes[from].out, z);
}
static char *stb__reg_parse_alt(stb_matcher *m, int s, char *r, stb_uint16 *e);
static char *stb__reg_parse(stb_matcher *matcher, int start, char *regex, stb_uint16 *end)
{
int n;
int last_start = -1;
stb_uint16 last_end = start;
while (*regex) {
switch (*regex) {
case '(':
last_start = last_end;
regex = stb__reg_parse_alt(matcher, last_end, regex+1, &last_end);
if (regex == NULL || *regex != ')')
return NULL;
++regex;
break;
case '|':
case ')':
*end = last_end;
return regex;
case '?':
if (last_start < 0) return NULL;
stb__add_epsilon(matcher, last_start, last_end);
++regex;
break;
case '*':
if (last_start < 0) return NULL;
stb__add_epsilon(matcher, last_start, last_end);
// fall through
case '+':
if (last_start < 0) return NULL;
stb__add_epsilon(matcher, last_end, last_start);
// prevent links back to last_end from chaining to last_start
n = stb__add_node(matcher);
stb__add_epsilon(matcher, last_end, n);
last_end = n;
++regex;
break;
case '{': // not supported!
// @TODO: given {n,m}, clone last_start to last_end m times,
// and include epsilons from start to first m-n blocks
return NULL;
case '\\':
++regex;
if (!*regex) return NULL;
// fallthrough
default: // match exactly this character
n = stb__add_node(matcher);
stb__add_edge(matcher, last_end, n, *regex);
last_start = last_end;
last_end = n;
++regex;
break;
case '$':
n = stb__add_node(matcher);
stb__add_edge(matcher, last_end, n, '\n');
last_start = last_end;
last_end = n;
++regex;
break;
case '.':
n = stb__add_node(matcher);
stb__add_edge(matcher, last_end, n, -1);
last_start = last_end;
last_end = n;
++regex;
break;
case '[': {
stb_uint8 flags[256];
int invert = 0,z;
++regex;
if (matcher->num_charset == 0) {
matcher->charset = (stb_uint *) stb_malloc(matcher, sizeof(*matcher->charset) * 256);
memset(matcher->charset, 0, sizeof(*matcher->charset) * 256);
}
memset(flags,0,sizeof(flags));
// leading ^ is special
if (*regex == '^')
++regex, invert = 1;
// leading ] is special
if (*regex == ']') {
flags[(int) ']'] = 1;
++regex;
}
while (*regex != ']') {
stb_uint a;
if (!*regex) return NULL;
a = *regex++;
if (regex[0] == '-' && regex[1] != ']') {
stb_uint i,b = regex[1];
regex += 2;
if (b == 0) return NULL;
if (a > b) return NULL;
for (i=a; i <= b; ++i)
flags[i] = 1;
} else
flags[a] = 1;
}
++regex;
if (invert) {
int i;
for (i=0; i < 256; ++i)
flags[i] = 1-flags[i];
}
// now check if any existing charset matches
for (z=0; z < matcher->num_charset; ++z) {
int i, k[2] = { 0, 1 << z};
for (i=0; i < 256; ++i) {
unsigned int f = k[flags[i]];
if ((matcher->charset[i] & k[1]) != f)
break;
}
if (i == 256) break;
}
if (z == matcher->num_charset) {
int i;
++matcher->num_charset;
if (matcher->num_charset > 32) {
assert(0); /* NOTREACHED */
return NULL; // too many charsets, oops
}
for (i=0; i < 256; ++i)
if (flags[i])
matcher->charset[i] |= (1 << z);
}
n = stb__add_node(matcher);
stb__add_edge(matcher, last_end, n, -2 - z);
last_start = last_end;
last_end = n;
break;
}
}
}
*end = last_end;
return regex;
}
static char *stb__reg_parse_alt(stb_matcher *matcher, int start, char *regex, stb_uint16 *end)
{
stb_uint16 last_end = start;
stb_uint16 main_end;
int head, tail;
head = stb__add_node(matcher);
stb__add_epsilon(matcher, start, head);
regex = stb__reg_parse(matcher, head, regex, &last_end);
if (regex == NULL) return NULL;
if (*regex == 0 || *regex == ')') {
*end = last_end;
return regex;
}
main_end = last_end;
tail = stb__add_node(matcher);
stb__add_epsilon(matcher, last_end, tail);
// start alternatives from the same starting node; use epsilon
// transitions to combine their endings
while(*regex && *regex != ')') {
assert(*regex == '|');
head = stb__add_node(matcher);
stb__add_epsilon(matcher, start, head);
regex = stb__reg_parse(matcher, head, regex+1, &last_end);
if (regex == NULL)
return NULL;
stb__add_epsilon(matcher, last_end, tail);
}
*end = tail;
return regex;
}
static char *stb__wild_parse(stb_matcher *matcher, int start, char *str, stb_uint16 *end)
{
int n;
stb_uint16 last_end;
last_end = stb__add_node(matcher);
stb__add_epsilon(matcher, start, last_end);
while (*str) {
switch (*str) {
// fallthrough
default: // match exactly this character
n = stb__add_node(matcher);
if (toupper(*str) == tolower(*str)) {
stb__add_edge(matcher, last_end, n, *str);
} else {
stb__add_edge(matcher, last_end, n, tolower(*str));
stb__add_edge(matcher, last_end, n, toupper(*str));
}
last_end = n;
++str;
break;
case '?':
n = stb__add_node(matcher);
stb__add_edge(matcher, last_end, n, -1);
last_end = n;
++str;
break;
case '*':
n = stb__add_node(matcher);
stb__add_edge(matcher, last_end, n, -1);
stb__add_epsilon(matcher, last_end, n);
stb__add_epsilon(matcher, n, last_end);
last_end = n;
++str;
break;
}
}
// now require end of string to match
n = stb__add_node(matcher);
stb__add_edge(matcher, last_end, n, 0);
last_end = n;
*end = last_end;
return str;
}
static int stb__opt(stb_matcher *m, int n)
{
for(;;) {
stb_nfa_node *p = &m->nodes[n];
if (p->goal) return n;
if (stb_arr_len(p->out)) return n;
if (stb_arr_len(p->eps) != 1) return n;
n = p->eps[0];
}
}
static void stb__optimize(stb_matcher *m)
{
// if the target of any edge is a node with exactly
// one out-epsilon, shorten it
int i,j;
for (i=0; i < stb_arr_len(m->nodes); ++i) {
stb_nfa_node *p = &m->nodes[i];
for (j=0; j < stb_arr_len(p->out); ++j)
p->out[j].node = stb__opt(m,p->out[j].node);
for (j=0; j < stb_arr_len(p->eps); ++j)
p->eps[j] = stb__opt(m,p->eps[j] );
}
m->start_node = stb__opt(m,m->start_node);
}
void stb_matcher_free(stb_matcher *f)
{
stb_free(f);
}
static stb_matcher *stb__alloc_matcher(void)
{
stb_matcher *matcher = (stb_matcher *) stb_malloc(0,sizeof(*matcher));
matcher->start_node = 0;
stb_arr_malloc((void **) &matcher->nodes, matcher);
matcher->num_charset = 0;
matcher->match_start = 0;
matcher->does_lex = 0;
matcher->dfa_start = STB__DFA_UNDEF;
stb_arr_malloc((void **) &matcher->dfa, matcher);
stb_arr_malloc((void **) &matcher->dfa_mapping, matcher);
stb_arr_malloc((void **) &matcher->dfa_result, matcher);
stb__add_node(matcher);
return matcher;
}
static void stb__lex_reset(stb_matcher *matcher)
{
// flush cached dfa data
stb_arr_setlen(matcher->dfa, 0);
stb_arr_setlen(matcher->dfa_mapping, 0);
stb_arr_setlen(matcher->dfa_result, 0);
matcher->dfa_start = STB__DFA_UNDEF;
}
stb_matcher *stb_regex_matcher(char *regex)
{
char *z;
stb_uint16 end;
stb_matcher *matcher = stb__alloc_matcher();
if (*regex == '^') {
matcher->match_start = 1;
++regex;
}
z = stb__reg_parse_alt(matcher, matcher->start_node, regex, &end);
if (!z || *z) {
stb_free(matcher);
return NULL;
}
((matcher->nodes)[(int) end]).goal = STB__NFA_STOP_GOAL;
return matcher;
}
stb_matcher *stb_lex_matcher(void)
{
stb_matcher *matcher = stb__alloc_matcher();
matcher->match_start = 1;
matcher->does_lex = 1;
return matcher;
}
int stb_lex_item(stb_matcher *matcher, const char *regex, int result)
{
char *z;
stb_uint16 end;
z = stb__reg_parse_alt(matcher, matcher->start_node, (char*) regex, &end);
if (z == NULL)
return 0;
stb__lex_reset(matcher);
matcher->nodes[(int) end].goal = result;
return 1;
}
int stb_lex_item_wild(stb_matcher *matcher, const char *regex, int result)
{
char *z;
stb_uint16 end;
z = stb__wild_parse(matcher, matcher->start_node, (char*) regex, &end);
if (z == NULL)
return 0;
stb__lex_reset(matcher);
matcher->nodes[(int) end].goal = result;
return 1;
}
static void stb__clear(stb_matcher *m, stb_uint16 *list)
{
int i;
for (i=0; i < stb_arr_len(list); ++i)
m->nodes[(int) list[i]].active = 0;
}
static int stb__clear_goalcheck(stb_matcher *m, stb_uint16 *list)
{
int i, t=0;
for (i=0; i < stb_arr_len(list); ++i) {
t += m->nodes[(int) list[i]].goal;
m->nodes[(int) list[i]].active = 0;
}
return t;
}
static stb_uint16 * stb__add_if_inactive(stb_matcher *m, stb_uint16 *list, int n)
{
if (!m->nodes[n].active) {
stb_arr_push(list, n);
m->nodes[n].active = 1;
}
return list;
}
static stb_uint16 * stb__eps_closure(stb_matcher *m, stb_uint16 *list)
{
int i,n = stb_arr_len(list);
for(i=0; i < n; ++i) {
stb_uint16 *e = m->nodes[(int) list[i]].eps;
if (e) {
int j,k = stb_arr_len(e);
for (j=0; j < k; ++j)
list = stb__add_if_inactive(m, list, e[j]);
n = stb_arr_len(list);
}
}
return list;
}
int stb_matcher_match(stb_matcher *m, char *str)
{
int result = 0;
int i,j,y,z;
stb_uint16 *previous = NULL;
stb_uint16 *current = NULL;
stb_uint16 *temp;
stb_arr_setsize(previous, 4);
stb_arr_setsize(current, 4);
previous = stb__add_if_inactive(m, previous, m->start_node);
previous = stb__eps_closure(m,previous);
stb__clear(m, previous);
while (*str && stb_arr_len(previous)) {
y = stb_arr_len(previous);
for (i=0; i < y; ++i) {
stb_nfa_node *n = &m->nodes[(int) previous[i]];
z = stb_arr_len(n->out);
for (j=0; j < z; ++j) {
if (n->out[j].match >= 0) {
if (n->out[j].match == *str)
current = stb__add_if_inactive(m, current, n->out[j].node);
} else if (n->out[j].match == -1) {
if (*str != '\n')
current = stb__add_if_inactive(m, current, n->out[j].node);
} else if (n->out[j].match < -1) {
int z = -n->out[j].match - 2;
if (m->charset[(stb_uint8) *str] & (1 << z))
current = stb__add_if_inactive(m, current, n->out[j].node);
}
}
}
stb_arr_setlen(previous, 0);
temp = previous;
previous = current;
current = temp;
previous = stb__eps_closure(m,previous);
stb__clear(m, previous);
++str;
}
// transition to pick up a '$' at the end
y = stb_arr_len(previous);
for (i=0; i < y; ++i)
m->nodes[(int) previous[i]].active = 1;
for (i=0; i < y; ++i) {
stb_nfa_node *n = &m->nodes[(int) previous[i]];
z = stb_arr_len(n->out);
for (j=0; j < z; ++j) {
if (n->out[j].match == '\n')
current = stb__add_if_inactive(m, current, n->out[j].node);
}
}
previous = stb__eps_closure(m,previous);
stb__clear(m, previous);
y = stb_arr_len(previous);
for (i=0; i < y; ++i)
if (m->nodes[(int) previous[i]].goal)
result = 1;
stb_arr_free(previous);
stb_arr_free(current);
return result && *str == 0;
}
stb_int16 stb__get_dfa_node(stb_matcher *m, stb_uint16 *list)
{
stb_uint16 node;
stb_uint32 data[8], *state, *newstate;
int i,j,n;
state = (stb_uint32 *) stb_temp(data, m->num_words_per_dfa * 4);
memset(state, 0, m->num_words_per_dfa*4);
n = stb_arr_len(list);
for (i=0; i < n; ++i) {
int x = list[i];
state[x >> 5] |= 1 << (x & 31);
}
// @TODO use a hash table
n = stb_arr_len(m->dfa_mapping);
i=j=0;
for(; j < n; ++i, j += m->num_words_per_dfa) {
// @TODO special case for <= 32
if (!memcmp(state, m->dfa_mapping + j, m->num_words_per_dfa*4)) {
node = i;
goto done;
}
}
assert(stb_arr_len(m->dfa) == i);
node = i;
newstate = stb_arr_addn(m->dfa_mapping, m->num_words_per_dfa);
memcpy(newstate, state, m->num_words_per_dfa*4);
// set all transitions to 'unknown'
stb_arr_add(m->dfa);
memset(m->dfa[i].transition, -1, sizeof(m->dfa[i].transition));
if (m->does_lex) {
int result = -1;
n = stb_arr_len(list);
for (i=0; i < n; ++i) {
if (m->nodes[(int) list[i]].goal > result)
result = m->nodes[(int) list[i]].goal;
}
stb_arr_push(m->dfa_result, result);
}
done:
stb_tempfree(data, state);
return node;
}
static int stb__matcher_dfa(stb_matcher *m, char *str_c, int *len)
{
stb_uint8 *str = (stb_uint8 *) str_c;
stb_int16 node,prevnode;
stb_dfa *trans;
int match_length = 0;
stb_int16 match_result=0;
if (m->dfa_start == STB__DFA_UNDEF) {
stb_uint16 *list;
m->num_words_per_dfa = (stb_arr_len(m->nodes)+31) >> 5;
stb__optimize(m);
list = stb__add_if_inactive(m, NULL, m->start_node);
list = stb__eps_closure(m,list);
if (m->does_lex) {
m->dfa_start = stb__get_dfa_node(m,list);
stb__clear(m, list);
// DON'T allow start state to be a goal state!
// this allows people to specify regexes that can match 0
// characters without them actually matching (also we don't
// check _before_ advancing anyway
if (m->dfa_start <= STB__DFA_MGOAL)
m->dfa_start = -(m->dfa_start - STB__DFA_MGOAL);
} else {
if (stb__clear_goalcheck(m, list))
m->dfa_start = STB__DFA_GOAL;
else
m->dfa_start = stb__get_dfa_node(m,list);
}
stb_arr_free(list);
}
prevnode = STB__DFA_UNDEF;
node = m->dfa_start;
trans = m->dfa;
if (m->dfa_start == STB__DFA_GOAL)
return 1;
for(;;) {
assert(node >= STB__DFA_VALID);
// fast inner DFA loop; especially if STB__DFA_VALID is 0
do {
prevnode = node;
node = trans[node].transition[*str++];
} while (node >= STB__DFA_VALID);
assert(node >= STB__DFA_MGOAL - stb_arr_len(m->dfa));
assert(node < stb_arr_len(m->dfa));
// special case for lex: need _longest_ match, so notice goal
// state without stopping
if (node <= STB__DFA_MGOAL) {
match_length = (int) (str - (stb_uint8 *) str_c);
node = -(node - STB__DFA_MGOAL);
match_result = node;
continue;
}
// slow NFA->DFA conversion
// or we hit the goal or the end of the string, but those
// can only happen once per search...
if (node == STB__DFA_UNDEF) {
// build a list -- @TODO special case <= 32 states
// heck, use a more compact data structure for <= 16 and <= 8 ?!
// @TODO keep states/newstates around instead of reallocating them
stb_uint16 *states = NULL;
stb_uint16 *newstates = NULL;
int i,j,y,z;
stb_uint32 *flags = &m->dfa_mapping[prevnode * m->num_words_per_dfa];
assert(prevnode != STB__DFA_UNDEF);
stb_arr_setsize(states, 4);
stb_arr_setsize(newstates,4);
for (j=0; j < m->num_words_per_dfa; ++j) {
for (i=0; i < 32; ++i) {
if (*flags & (1 << i))
stb_arr_push(states, j*32+i);
}
++flags;
}
// states is now the states we were in in the previous node;
// so now we can compute what node it transitions to on str[-1]
y = stb_arr_len(states);
for (i=0; i < y; ++i) {
stb_nfa_node *n = &m->nodes[(int) states[i]];
z = stb_arr_len(n->out);
for (j=0; j < z; ++j) {
if (n->out[j].match >= 0) {
if (n->out[j].match == str[-1] || (str[-1] == 0 && n->out[j].match == '\n'))
newstates = stb__add_if_inactive(m, newstates, n->out[j].node);
} else if (n->out[j].match == -1) {
if (str[-1] != '\n' && str[-1])
newstates = stb__add_if_inactive(m, newstates, n->out[j].node);
} else if (n->out[j].match < -1) {
int z = -n->out[j].match - 2;
if (m->charset[str[-1]] & (1 << z))
newstates = stb__add_if_inactive(m, newstates, n->out[j].node);
}
}
}
// AND add in the start state!
if (!m->match_start || (str[-1] == '\n' && !m->does_lex))
newstates = stb__add_if_inactive(m, newstates, m->start_node);
// AND epsilon close it
newstates = stb__eps_closure(m, newstates);
// if it's a goal state, then that's all there is to it
if (stb__clear_goalcheck(m, newstates)) {
if (m->does_lex) {
match_length = (int) (str - (stb_uint8 *) str_c);
node = stb__get_dfa_node(m,newstates);
match_result = node;
node = -node + STB__DFA_MGOAL;
trans = m->dfa; // could have gotten realloc()ed
} else
node = STB__DFA_GOAL;
} else if (str[-1] == 0 || stb_arr_len(newstates) == 0) {
node = STB__DFA_END;
} else {
node = stb__get_dfa_node(m,newstates);
trans = m->dfa; // could have gotten realloc()ed
}
trans[prevnode].transition[str[-1]] = node;
if (node <= STB__DFA_MGOAL)
node = -(node - STB__DFA_MGOAL);
stb_arr_free(newstates);
stb_arr_free(states);
}
if (node == STB__DFA_GOAL) {
return 1;
}
if (node == STB__DFA_END) {
if (m->does_lex) {
if (match_result) {
if (len) *len = match_length;
return m->dfa_result[(int) match_result];
}
}
return 0;
}
assert(node != STB__DFA_UNDEF);
}
}
int stb_matcher_find(stb_matcher *m, char *str)
{
assert(m->does_lex == 0);
return stb__matcher_dfa(m, str, NULL);
}
int stb_lex(stb_matcher *m, char *str, int *len)
{
assert(m->does_lex);
return stb__matcher_dfa(m, str, len);
}
#ifdef STB_PERFECT_HASH
int stb_regex(char *regex, char *str)
{
static stb_perfect p;
static stb_matcher ** matchers;
static char ** regexps;
static char ** regexp_cache;
static unsigned short *mapping;
int z = stb_perfect_hash(&p, (int)(size_t) regex);
if (z >= 0) {
if (strcmp(regex, regexp_cache[(int) mapping[z]])) {
int i = mapping[z];
stb_matcher_free(matchers[i]);
free(regexp_cache[i]);
regexps[i] = regex;
regexp_cache[i] = stb_p_strdup(regex);
matchers[i] = stb_regex_matcher(regex);
}
} else {
int i,n;
if (regex == NULL) {
for (i=0; i < stb_arr_len(matchers); ++i) {
stb_matcher_free(matchers[i]);
free(regexp_cache[i]);
}
stb_arr_free(matchers);
stb_arr_free(regexps);
stb_arr_free(regexp_cache);
stb_perfect_destroy(&p);
free(mapping); mapping = NULL;
return -1;
}
stb_arr_push(regexps, regex);
stb_arr_push(regexp_cache, stb_p_strdup(regex));
stb_arr_push(matchers, stb_regex_matcher(regex));
stb_perfect_destroy(&p);
n = stb_perfect_create(&p, (unsigned int *) (char **) regexps, stb_arr_len(regexps));
mapping = (unsigned short *) realloc(mapping, n * sizeof(*mapping));
for (i=0; i < stb_arr_len(regexps); ++i)
mapping[stb_perfect_hash(&p, (int)(size_t) regexps[i])] = i;
z = stb_perfect_hash(&p, (int)(size_t) regex);
}
return stb_matcher_find(matchers[(int) mapping[z]], str);
}
#endif
#endif // STB_DEFINE
#if 0
//////////////////////////////////////////////////////////////////////////////
//
// C source-code introspection
//
// runtime structure
typedef struct
{
char *name;
char *type; // base type
char *comment; // content of comment field
int size; // size of base type
int offset; // field offset
int arrcount[8]; // array sizes; -1 = pointer indirection; 0 = end of list
} stb_info_field;
typedef struct
{
char *structname;
int size;
int num_fields;
stb_info_field *fields;
} stb_info_struct;
extern stb_info_struct stb_introspect_output[];
//
STB_EXTERN void stb_introspect_precompiled(stb_info_struct *compiled);
STB_EXTERN void stb__introspect(char *path, char *file);
#define stb_introspect_ship() stb__introspect(NULL, NULL, stb__introspect_output)
#ifdef STB_SHIP
#define stb_introspect() stb_introspect_ship()
#define stb_introspect_path(p) stb_introspect_ship()
#else
// bootstrapping: define stb_introspect() (or 'path') the first time
#define stb_introspect() stb__introspect(NULL, __FILE__, NULL)
#define stb_introspect_auto() stb__introspect(NULL, __FILE__, stb__introspect_output)
#define stb_introspect_path(p) stb__introspect(p, __FILE__, NULL)
#define stb_introspect_path(p) stb__introspect(p, __FILE__, NULL)
#endif
#ifdef STB_DEFINE
#ifndef STB_INTROSPECT_CPP
#ifdef __cplusplus
#define STB_INTROSPECT_CPP 1
#else
#define STB_INTROSPECT_CPP 0
#endif
#endif
void stb_introspect_precompiled(stb_info_struct *compiled)
{
}
static void stb__introspect_filename(char *buffer, char *path)
{
#if STB_INTROSPECT_CPP
stb_p_sprintf(buffer stb_p_size(9999), "%s/stb_introspect.cpp", path);
#else
stb_p_sprintf(buffer stb_p_size(9999), "%s/stb_introspect.c", path);
#endif
}
static void stb__introspect_compute(char *path, char *file)
{
int i;
char ** include_list = NULL;
char ** introspect_list = NULL;
FILE *f;
f = stb_p_fopen(file, "w");
if (!f) return;
fputs("// if you get compiler errors, change the following 0 to a 1:\n", f);
fputs("#define STB_INTROSPECT_INVALID 0\n\n", f);
fputs("// this will force the code to compile, and force the introspector\n", f);
fputs("// to run and then exit, allowing you to recompile\n\n\n", f);
fputs("#include \"stb.h\"\n\n",f );
fputs("#if STB_INTROSPECT_INVALID\n", f);
fputs(" stb_info_struct stb__introspect_output[] = { (void *) 1 }\n", f);
fputs("#else\n\n", f);
for (i=0; i < stb_arr_len(include_list); ++i)
fprintf(f, " #include \"%s\"\n", include_list[i]);
fputs(" stb_info_struct stb__introspect_output[] =\n{\n", f);
for (i=0; i < stb_arr_len(introspect_list); ++i)
fprintf(f, " stb_introspect_%s,\n", introspect_list[i]);
fputs(" };\n", f);
fputs("#endif\n", f);
fclose(f);
}
static stb_info_struct *stb__introspect_info;
#ifndef STB_SHIP
#endif
void stb__introspect(char *path, char *file, stb_info_struct *compiled)
{
static int first=1;
if (!first) return;
first=0;
stb__introspect_info = compiled;
#ifndef STB_SHIP
if (path || file) {
int bail_flag = compiled && compiled[0].structname == (void *) 1;
int needs_building = bail_flag;
struct stb__stat st;
char buffer[1024], buffer2[1024];
if (!path) {
stb_splitpath(buffer, file, STB_PATH);
path = buffer;
}
// bail if the source path doesn't exist
if (!stb_fexists(path)) return;
stb__introspect_filename(buffer2, path);
// get source/include files timestamps, compare to output-file timestamp;
// if mismatched, regenerate
if (stb__stat(buffer2, &st))
needs_building = STB_TRUE;
{
// find any file that contains an introspection command and is newer
// if needs_building is already true, we don't need to do this test,
// but we still need these arrays, so go ahead and get them
char **all[3];
all[0] = stb_readdir_files_mask(path, "*.h");
all[1] = stb_readdir_files_mask(path, "*.c");
all[2] = stb_readdir_files_mask(path, "*.cpp");
int i,j;
if (needs_building) {
for (j=0; j < 3; ++j) {
for (i=0; i < stb_arr_len(all[j]); ++i) {
struct stb__stat st2;
if (!stb__stat(all[j][i], &st2)) {
if (st.st_mtime < st2.st_mtime) {
char *z = stb_filec(all[j][i], NULL);
int found=STB_FALSE;
while (y) {
y = strstr(y, "//si");
if (y && isspace(y[4])) {
found = STB_TRUE;
break;
}
}
needs_building = STB_TRUE;
goto done;
}
}
}
}
done:;
}
char *z = stb_filec(all[i], NULL), *y = z;
int found=STB_FALSE;
while (y) {
y = strstr(y, "//si");
if (y && isspace(y[4])) {
found = STB_TRUE;
break;
}
}
if (found)
stb_arr_push(introspect_h, stb_p_strdup(all[i]));
free(z);
}
}
stb_readdir_free(all);
if (!needs_building) {
for (i=0; i < stb_arr_len(introspect_h); ++i) {
struct stb__stat st2;
if (!stb__stat(introspect_h[i], &st2))
if (st.st_mtime < st2.st_mtime)
needs_building = STB_TRUE;
}
}
if (needs_building) {
stb__introspect_compute(path, buffer2);
}
}
}
#endif
}
#endif
#endif
#ifdef STB_INTROSPECT
// compile-time code-generator
#define INTROSPECT(x) int main(int argc, char **argv) { stb__introspect(__FILE__); return 0; }
#define FILE(x)
void stb__introspect(char *filename)
{
char *file = stb_file(filename, NULL);
char *s = file, *t, **p;
char *out_name = "stb_introspect.c";
char *out_path;
STB_ARR(char) filelist = NULL;
int i,n;
if (!file) stb_fatal("Couldn't open %s", filename);
out_path = stb_splitpathdup(filename, STB_PATH);
// search for the macros
while (*s) {
char buffer[256];
while (*s && !isupper(*s)) ++s;
s = stb_strtok_invert(buffer, s, "ABCDEFGHIJKLMNOPQRSTUVWXYZ");
s = stb_skipwhite(s);
if (*s == '(') {
++s;
t = strchr(s, ')');
if (t == NULL) stb_fatal("Error parsing %s", filename);
}
}
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// STB-C sliding-window dictionary compression
//
// This uses a DEFLATE-style sliding window, but no bitwise entropy.
// Everything is on byte boundaries, so you could then apply a byte-wise
// entropy code, though that's nowhere near as effective.
//
// An STB-C stream begins with a 16-byte header:
// 4 bytes: 0x57 0xBC 0x00 0x00
// 8 bytes: big-endian size of decompressed data, 64-bits
// 4 bytes: big-endian size of window (how far back decompressor may need)
//
// The following symbols appear in the stream (these were determined ad hoc,
// not by analysis):
//
// [dict] 00000100 yyyyyyyy yyyyyyyy yyyyyyyy xxxxxxxx xxxxxxxx
// [END] 00000101 11111010 cccccccc cccccccc cccccccc cccccccc
// [dict] 00000110 yyyyyyyy yyyyyyyy yyyyyyyy xxxxxxxx
// [literals] 00000111 zzzzzzzz zzzzzzzz
// [literals] 00001zzz zzzzzzzz
// [dict] 00010yyy yyyyyyyy yyyyyyyy xxxxxxxx xxxxxxxx
// [dict] 00011yyy yyyyyyyy yyyyyyyy xxxxxxxx
// [literals] 001zzzzz
// [dict] 01yyyyyy yyyyyyyy xxxxxxxx
// [dict] 1xxxxxxx yyyyyyyy
//
// xxxxxxxx: match length - 1
// yyyyyyyy: backwards distance - 1
// zzzzzzzz: num literals - 1
// cccccccc: adler32 checksum of decompressed data
// (all big-endian)
STB_EXTERN stb_uint stb_decompress_length(stb_uchar *input);
STB_EXTERN stb_uint stb_decompress(stb_uchar *out,stb_uchar *in,stb_uint len);
STB_EXTERN stb_uint stb_compress (stb_uchar *out,stb_uchar *in,stb_uint len);
STB_EXTERN void stb_compress_window(int z);
STB_EXTERN void stb_compress_hashsize(unsigned int z);
STB_EXTERN int stb_compress_tofile(char *filename, char *in, stb_uint len);
STB_EXTERN int stb_compress_intofile(FILE *f, char *input, stb_uint len);
STB_EXTERN char *stb_decompress_fromfile(char *filename, stb_uint *len);
STB_EXTERN int stb_compress_stream_start(FILE *f);
STB_EXTERN void stb_compress_stream_end(int close);
STB_EXTERN void stb_write(char *data, int data_len);
#ifdef STB_DEFINE
stb_uint stb_decompress_length(stb_uchar *input)
{
return (input[8] << 24) + (input[9] << 16) + (input[10] << 8) + input[11];
}
//////////////////// decompressor ///////////////////////
// simple implementation that just writes whole thing into big block
static unsigned char *stb__barrier;
static unsigned char *stb__barrier2;
static unsigned char *stb__barrier3;
static unsigned char *stb__barrier4;
static stb_uchar *stb__dout;
static void stb__match(stb_uchar *data, stb_uint length)
{
// INVERSE of memmove... write each byte before copying the next...
assert (stb__dout + length <= stb__barrier);
if (stb__dout + length > stb__barrier) { stb__dout += length; return; }
if (data < stb__barrier4) { stb__dout = stb__barrier+1; return; }
while (length--) *stb__dout++ = *data++;
}
static void stb__lit(stb_uchar *data, stb_uint length)
{
assert (stb__dout + length <= stb__barrier);
if (stb__dout + length > stb__barrier) { stb__dout += length; return; }
if (data < stb__barrier2) { stb__dout = stb__barrier+1; return; }
memcpy(stb__dout, data, length);
stb__dout += length;
}
#define stb__in2(x) ((i[x] << 8) + i[(x)+1])
#define stb__in3(x) ((i[x] << 16) + stb__in2((x)+1))
#define stb__in4(x) ((i[x] << 24) + stb__in3((x)+1))
static stb_uchar *stb_decompress_token(stb_uchar *i)
{
if (*i >= 0x20) { // use fewer if's for cases that expand small
if (*i >= 0x80) stb__match(stb__dout-i[1]-1, i[0] - 0x80 + 1), i += 2;
else if (*i >= 0x40) stb__match(stb__dout-(stb__in2(0) - 0x4000 + 1), i[2]+1), i += 3;
else /* *i >= 0x20 */ stb__lit(i+1, i[0] - 0x20 + 1), i += 1 + (i[0] - 0x20 + 1);
} else { // more ifs for cases that expand large, since overhead is amortized
if (*i >= 0x18) stb__match(stb__dout-(stb__in3(0) - 0x180000 + 1), i[3]+1), i += 4;
else if (*i >= 0x10) stb__match(stb__dout-(stb__in3(0) - 0x100000 + 1), stb__in2(3)+1), i += 5;
else if (*i >= 0x08) stb__lit(i+2, stb__in2(0) - 0x0800 + 1), i += 2 + (stb__in2(0) - 0x0800 + 1);
else if (*i == 0x07) stb__lit(i+3, stb__in2(1) + 1), i += 3 + (stb__in2(1) + 1);
else if (*i == 0x06) stb__match(stb__dout-(stb__in3(1)+1), i[4]+1), i += 5;
else if (*i == 0x04) stb__match(stb__dout-(stb__in3(1)+1), stb__in2(4)+1), i += 6;
}
return i;
}
stb_uint stb_decompress(stb_uchar *output, stb_uchar *i, stb_uint length)
{
stb_uint olen;
if (stb__in4(0) != 0x57bC0000) return 0;
if (stb__in4(4) != 0) return 0; // error! stream is > 4GB
olen = stb_decompress_length(i);
stb__barrier2 = i;
stb__barrier3 = i+length;
stb__barrier = output + olen;
stb__barrier4 = output;
i += 16;
stb__dout = output;
while (1) {
stb_uchar *old_i = i;
i = stb_decompress_token(i);
if (i == old_i) {
if (*i == 0x05 && i[1] == 0xfa) {
assert(stb__dout == output + olen);
if (stb__dout != output + olen) return 0;
if (stb_adler32(1, output, olen) != (stb_uint) stb__in4(2))
return 0;
return olen;
} else {
assert(0); /* NOTREACHED */
return 0;
}
}
assert(stb__dout <= output + olen);
if (stb__dout > output + olen)
return 0;
}
}
char *stb_decompress_fromfile(char *filename, unsigned int *len)
{
unsigned int n;
char *q;
unsigned char *p;
FILE *f = stb_p_fopen(filename, "rb"); if (f == NULL) return NULL;
fseek(f, 0, SEEK_END);
n = ftell(f);
fseek(f, 0, SEEK_SET);
p = (unsigned char * ) malloc(n); if (p == NULL) return NULL;
fread(p, 1, n, f);
fclose(f);
if (p == NULL) return NULL;
if (p[0] != 0x57 || p[1] != 0xBc || p[2] || p[3]) { free(p); return NULL; }
q = (char *) malloc(stb_decompress_length(p)+1);
if (!q) { free(p); return NULL; }
*len = stb_decompress((unsigned char *) q, p, n);
if (*len) q[*len] = 0;
free(p);
return q;
}
#if 0
// streaming decompressor
static struct
{
stb__uchar *in_buffer;
stb__uchar *match;
stb__uint pending_literals;
stb__uint pending_match;
} xx;
static void stb__match(stb_uchar *data, stb_uint length)
{
// INVERSE of memmove... write each byte before copying the next...
assert (stb__dout + length <= stb__barrier);
if (stb__dout + length > stb__barrier) { stb__dout += length; return; }
if (data < stb__barrier2) { stb__dout = stb__barrier+1; return; }
while (length--) *stb__dout++ = *data++;
}
static void stb__lit(stb_uchar *data, stb_uint length)
{
assert (stb__dout + length <= stb__barrier);
if (stb__dout + length > stb__barrier) { stb__dout += length; return; }
if (data < stb__barrier2) { stb__dout = stb__barrier+1; return; }
memcpy(stb__dout, data, length);
stb__dout += length;
}
static void sx_match(stb_uchar *data, stb_uint length)
{
xx.match = data;
xx.pending_match = length;
}
static void sx_lit(stb_uchar *data, stb_uint length)
{
xx.pending_lit = length;
}
static int stb_decompress_token_state(void)
{
stb__uchar *i = xx.in_buffer;
if (*i >= 0x20) { // use fewer if's for cases that expand small
if (*i >= 0x80) sx_match(stb__dout-i[1]-1, i[0] - 0x80 + 1), i += 2;
else if (*i >= 0x40) sx_match(stb__dout-(stb__in2(0) - 0x4000 + 1), i[2]+1), i += 3;
else /* *i >= 0x20 */ sx_lit(i+1, i[0] - 0x20 + 1), i += 1;
} else { // more ifs for cases that expand large, since overhead is amortized
if (*i >= 0x18) sx_match(stb__dout-(stb__in3(0) - 0x180000 + 1), i[3]+1), i += 4;
else if (*i >= 0x10) sx_match(stb__dout-(stb__in3(0) - 0x100000 + 1), stb__in2(3)+1), i += 5;
else if (*i >= 0x08) sx_lit(i+2, stb__in2(0) - 0x0800 + 1), i += 2;
else if (*i == 0x07) sx_lit(i+3, stb__in2(1) + 1), i += 3;
else if (*i == 0x06) sx_match(stb__dout-(stb__in3(1)+1), i[4]+1), i += 5;
else if (*i == 0x04) sx_match(stb__dout-(stb__in3(1)+1), stb__in2(4)+1), i += 6;
else return 0;
}
xx.in_buffer = i;
return 1;
}
#endif
//////////////////// compressor ///////////////////////
static unsigned int stb_matchlen(stb_uchar *m1, stb_uchar *m2, stb_uint maxlen)
{
stb_uint i;
for (i=0; i < maxlen; ++i)
if (m1[i] != m2[i]) return i;
return i;
}
// simple implementation that just takes the source data in a big block
static stb_uchar *stb__out;
static FILE *stb__outfile;
static stb_uint stb__outbytes;
static void stb__write(unsigned char v)
{
fputc(v, stb__outfile);
++stb__outbytes;
}
#define stb_out(v) (stb__out ? (void)(*stb__out++ = (stb_uchar) (v)) : stb__write((stb_uchar) (v)))
static void stb_out2(stb_uint v)
{
stb_out(v >> 8);
stb_out(v);
}
static void stb_out3(stb_uint v) { stb_out(v >> 16); stb_out(v >> 8); stb_out(v); }
static void stb_out4(stb_uint v) { stb_out(v >> 24); stb_out(v >> 16);
stb_out(v >> 8 ); stb_out(v); }
static void outliterals(stb_uchar *in, ptrdiff_t numlit)
{
while (numlit > 65536) {
outliterals(in,65536);
in += 65536;
numlit -= 65536;
}
if (numlit == 0) ;
else if (numlit <= 32) stb_out (0x000020 + (stb_uint) numlit-1);
else if (numlit <= 2048) stb_out2(0x000800 + (stb_uint) numlit-1);
else /* numlit <= 65536) */ stb_out3(0x070000 + (stb_uint) numlit-1);
if (stb__out) {
memcpy(stb__out,in,numlit);
stb__out += numlit;
} else
fwrite(in, 1, numlit, stb__outfile);
}
static int stb__window = 0x40000; // 256K
void stb_compress_window(int z)
{
if (z >= 0x1000000) z = 0x1000000; // limit of implementation
if (z < 0x100) z = 0x100; // insanely small
stb__window = z;
}
static int stb_not_crap(int best, int dist)
{
return ((best > 2 && dist <= 0x00100)
|| (best > 5 && dist <= 0x04000)
|| (best > 7 && dist <= 0x80000));
}
static stb_uint stb__hashsize = 32768;
void stb_compress_hashsize(unsigned int y)
{
unsigned int z = 1024;
while (z < y) z <<= 1;
stb__hashsize = z >> 2; // pass in bytes, store #pointers
}
// note that you can play with the hashing functions all you
// want without needing to change the decompressor
#define stb__hc(q,h,c) (((h) << 7) + ((h) >> 25) + q[c])
#define stb__hc2(q,h,c,d) (((h) << 14) + ((h) >> 18) + (q[c] << 7) + q[d])
#define stb__hc3(q,c,d,e) ((q[c] << 14) + (q[d] << 7) + q[e])
static stb_uint32 stb__running_adler;
static int stb_compress_chunk(stb_uchar *history,
stb_uchar *start,
stb_uchar *end,
int length,
int *pending_literals,
stb_uchar **chash,
stb_uint mask)
{
int window = stb__window;
stb_uint match_max;
stb_uchar *lit_start = start - *pending_literals;
stb_uchar *q = start;
#define STB__SCRAMBLE(h) (((h) + ((h) >> 16)) & mask)
// stop short of the end so we don't scan off the end doing
// the hashing; this means we won't compress the last few bytes
// unless they were part of something longer
while (q < start+length && q+12 < end) {
int m;
stb_uint h1,h2,h3,h4, h;
stb_uchar *t;
int best = 2, dist=0;
if (q+65536 > end)
match_max = (stb_uint) (end-q);
else
match_max = 65536u;
#define stb__nc(b,d) ((d) <= window && ((b) > 9 || stb_not_crap(b,d)))
#define STB__TRY(t,p) /* avoid retrying a match we already tried */ \
if (p ? dist != (int) (q-t) : 1) \
if ((m = (int) stb_matchlen(t, q, match_max)) > best)\
if (stb__nc(m,(int) (q-(t)))) \
best = m, dist = (int) (q - (t))
// rather than search for all matches, only try 4 candidate locations,
// chosen based on 4 different hash functions of different lengths.
// this strategy is inspired by LZO; hashing is unrolled here using the
// 'hc' macro
h = stb__hc3(q,0, 1, 2); h1 = STB__SCRAMBLE(h);
t = chash[h1]; if (t) STB__TRY(t,0);
h = stb__hc2(q,h, 3, 4); h2 = STB__SCRAMBLE(h);
h = stb__hc2(q,h, 5, 6); t = chash[h2]; if (t) STB__TRY(t,1);
h = stb__hc2(q,h, 7, 8); h3 = STB__SCRAMBLE(h);
h = stb__hc2(q,h, 9,10); t = chash[h3]; if (t) STB__TRY(t,1);
h = stb__hc2(q,h,11,12); h4 = STB__SCRAMBLE(h);
t = chash[h4]; if (t) STB__TRY(t,1);
// because we use a shared hash table, can only update it
// _after_ we've probed all of them
chash[h1] = chash[h2] = chash[h3] = chash[h4] = q;
if (best > 2)
assert(dist > 0);
// see if our best match qualifies
if (best < 3) { // fast path literals
++q;
} else if (best > 2 && best <= 0x80 && dist <= 0x100) {
outliterals(lit_start, q-lit_start); lit_start = (q += best);
stb_out(0x80 + best-1);
stb_out(dist-1);
} else if (best > 5 && best <= 0x100 && dist <= 0x4000) {
outliterals(lit_start, q-lit_start); lit_start = (q += best);
stb_out2(0x4000 + dist-1);
stb_out(best-1);
} else if (best > 7 && best <= 0x100 && dist <= 0x80000) {
outliterals(lit_start, q-lit_start); lit_start = (q += best);
stb_out3(0x180000 + dist-1);
stb_out(best-1);
} else if (best > 8 && best <= 0x10000 && dist <= 0x80000) {
outliterals(lit_start, q-lit_start); lit_start = (q += best);
stb_out3(0x100000 + dist-1);
stb_out2(best-1);
} else if (best > 9 && dist <= 0x1000000) {
if (best > 65536) best = 65536;
outliterals(lit_start, q-lit_start); lit_start = (q += best);
if (best <= 0x100) {
stb_out(0x06);
stb_out3(dist-1);
stb_out(best-1);
} else {
stb_out(0x04);
stb_out3(dist-1);
stb_out2(best-1);
}
} else { // fallback literals if no match was a balanced tradeoff
++q;
}
}
// if we didn't get all the way, add the rest to literals
if (q-start < length)
q = start+length;
// the literals are everything from lit_start to q
*pending_literals = (int) (q - lit_start);
stb__running_adler = stb_adler32(stb__running_adler, start, (int) (q - start));
return (int) (q - start);
}
static int stb_compress_inner(stb_uchar *input, stb_uint length)
{
int literals = 0;
stb_uint len,i;
stb_uchar **chash;
chash = (stb_uchar**) malloc(stb__hashsize * sizeof(stb_uchar*));
if (chash == NULL) return 0; // failure
for (i=0; i < stb__hashsize; ++i)
chash[i] = NULL;
// stream signature
stb_out(0x57); stb_out(0xbc);
stb_out2(0);
stb_out4(0); // 64-bit length requires 32-bit leading 0
stb_out4(length);
stb_out4(stb__window);
stb__running_adler = 1;
len = stb_compress_chunk(input, input, input+length, length, &literals, chash, stb__hashsize-1);
assert(len == length);
outliterals(input+length - literals, literals);
free(chash);
stb_out2(0x05fa); // end opcode
stb_out4(stb__running_adler);
return 1; // success
}
stb_uint stb_compress(stb_uchar *out, stb_uchar *input, stb_uint length)
{
stb__out = out;
stb__outfile = NULL;
stb_compress_inner(input, length);
return (stb_uint) (stb__out - out);
}
int stb_compress_tofile(char *filename, char *input, unsigned int length)
{
//int maxlen = length + 512 + (length >> 2); // total guess
//char *buffer = (char *) malloc(maxlen);
//int blen = stb_compress((stb_uchar*)buffer, (stb_uchar*)input, length);
stb__out = NULL;
stb__outfile = stb_p_fopen(filename, "wb");
if (!stb__outfile) return 0;
stb__outbytes = 0;
if (!stb_compress_inner((stb_uchar*)input, length))
return 0;
fclose(stb__outfile);
return stb__outbytes;
}
int stb_compress_intofile(FILE *f, char *input, unsigned int length)
{
//int maxlen = length + 512 + (length >> 2); // total guess
//char *buffer = (char*)malloc(maxlen);
//int blen = stb_compress((stb_uchar*)buffer, (stb_uchar*)input, length);
stb__out = NULL;
stb__outfile = f;
if (!stb__outfile) return 0;
stb__outbytes = 0;
if (!stb_compress_inner((stb_uchar*)input, length))
return 0;
return stb__outbytes;
}
////////////////////// streaming I/O version /////////////////////
static size_t stb_out_backpatch_id(void)
{
if (stb__out)
return (size_t) stb__out;
else
return ftell(stb__outfile);
}
static void stb_out_backpatch(size_t id, stb_uint value)
{
stb_uchar data[4] = { (stb_uchar)(value >> 24), (stb_uchar)(value >> 16), (stb_uchar)(value >> 8), (stb_uchar)(value) };
if (stb__out) {
memcpy((void *) id, data, 4);
} else {
stb_uint where = ftell(stb__outfile);
fseek(stb__outfile, (long) id, SEEK_SET);
fwrite(data, 4, 1, stb__outfile);
fseek(stb__outfile, where, SEEK_SET);
}
}
// ok, the wraparound buffer was a total failure. let's instead
// use a copying-in-place buffer, which lets us share the code.
// This is way less efficient but it'll do for now.
static struct
{
stb_uchar *buffer;
int size; // physical size of buffer in bytes
int valid; // amount of valid data in bytes
int start; // bytes of data already output
int window;
int fsize;
int pending_literals; // bytes not-quite output but counted in start
int length_id;
stb_uint total_bytes;
stb_uchar **chash;
stb_uint hashmask;
} xtb;
static int stb_compress_streaming_start(void)
{
stb_uint i;
xtb.size = stb__window * 3;
xtb.buffer = (stb_uchar*)malloc(xtb.size);
if (!xtb.buffer) return 0;
xtb.chash = (stb_uchar**)malloc(sizeof(*xtb.chash) * stb__hashsize);
if (!xtb.chash) {
free(xtb.buffer);
return 0;
}
for (i=0; i < stb__hashsize; ++i)
xtb.chash[i] = NULL;
xtb.hashmask = stb__hashsize-1;
xtb.valid = 0;
xtb.start = 0;
xtb.window = stb__window;
xtb.fsize = stb__window;
xtb.pending_literals = 0;
xtb.total_bytes = 0;
// stream signature
stb_out(0x57); stb_out(0xbc); stb_out2(0);
stb_out4(0); // 64-bit length requires 32-bit leading 0
xtb.length_id = (int) stb_out_backpatch_id();
stb_out4(0); // we don't know the output length yet
stb_out4(stb__window);
stb__running_adler = 1;
return 1;
}
static int stb_compress_streaming_end(void)
{
// flush out any remaining data
stb_compress_chunk(xtb.buffer, xtb.buffer+xtb.start, xtb.buffer+xtb.valid,
xtb.valid-xtb.start, &xtb.pending_literals, xtb.chash, xtb.hashmask);
// write out pending literals
outliterals(xtb.buffer + xtb.valid - xtb.pending_literals, xtb.pending_literals);
stb_out2(0x05fa); // end opcode
stb_out4(stb__running_adler);
stb_out_backpatch(xtb.length_id, xtb.total_bytes);
free(xtb.buffer);
free(xtb.chash);
return 1;
}
void stb_write(char *data, int data_len)
{
stb_uint i;
// @TODO: fast path for filling the buffer and doing nothing else
// if (xtb.valid + data_len < xtb.size)
xtb.total_bytes += data_len;
while (data_len) {
// fill buffer
if (xtb.valid < xtb.size) {
int amt = xtb.size - xtb.valid;
if (data_len < amt) amt = data_len;
memcpy(xtb.buffer + xtb.valid, data, amt);
data_len -= amt;
data += amt;
xtb.valid += amt;
}
if (xtb.valid < xtb.size)
return;
// at this point, the buffer is full
// if we can process some data, go for it; make sure
// we leave an 'fsize's worth of data, though
if (xtb.start + xtb.fsize < xtb.valid) {
int amount = (xtb.valid - xtb.fsize) - xtb.start;
int n;
assert(amount > 0);
n = stb_compress_chunk(xtb.buffer, xtb.buffer + xtb.start, xtb.buffer + xtb.valid,
amount, &xtb.pending_literals, xtb.chash, xtb.hashmask);
xtb.start += n;
}
assert(xtb.start + xtb.fsize >= xtb.valid);
// at this point, our future size is too small, so we
// need to flush some history. we, in fact, flush exactly
// one window's worth of history
{
int flush = xtb.window;
assert(xtb.start >= flush);
assert(xtb.valid >= flush);
// if 'pending literals' extends back into the shift region,
// write them out
if (xtb.start - xtb.pending_literals < flush) {
outliterals(xtb.buffer + xtb.start - xtb.pending_literals, xtb.pending_literals);
xtb.pending_literals = 0;
}
// now shift the window
memmove(xtb.buffer, xtb.buffer + flush, xtb.valid - flush);
xtb.start -= flush;
xtb.valid -= flush;
for (i=0; i <= xtb.hashmask; ++i)
if (xtb.chash[i] < xtb.buffer + flush)
xtb.chash[i] = NULL;
else
xtb.chash[i] -= flush;
}
// and now that we've made room for more data, go back to the top
}
}
int stb_compress_stream_start(FILE *f)
{
stb__out = NULL;
stb__outfile = f;
if (f == NULL)
return 0;
if (!stb_compress_streaming_start())
return 0;
return 1;
}
void stb_compress_stream_end(int close)
{
stb_compress_streaming_end();
if (close && stb__outfile) {
fclose(stb__outfile);
}
}
#endif // STB_DEFINE
//////////////////////////////////////////////////////////////////////////////
//
// File abstraction... tired of not having this... we can write
// compressors to be layers over these that auto-close their children.
typedef struct stbfile
{
int (*getbyte)(struct stbfile *); // -1 on EOF
unsigned int (*getdata)(struct stbfile *, void *block, unsigned int len);
int (*putbyte)(struct stbfile *, int byte);
unsigned int (*putdata)(struct stbfile *, void *block, unsigned int len);
unsigned int (*size)(struct stbfile *);
unsigned int (*tell)(struct stbfile *);
void (*backpatch)(struct stbfile *, unsigned int tell, void *block, unsigned int len);
void (*close)(struct stbfile *);
FILE *f; // file to fread/fwrite
unsigned char *buffer; // input/output buffer
unsigned char *indata, *inend; // input buffer
union {
int various;
void *ptr;
};
} stbfile;
STB_EXTERN unsigned int stb_getc(stbfile *f); // read
STB_EXTERN int stb_putc(stbfile *f, int ch); // write
STB_EXTERN unsigned int stb_getdata(stbfile *f, void *buffer, unsigned int len); // read
STB_EXTERN unsigned int stb_putdata(stbfile *f, void *buffer, unsigned int len); // write
STB_EXTERN unsigned int stb_tell(stbfile *f); // read
STB_EXTERN unsigned int stb_size(stbfile *f); // read/write
STB_EXTERN void stb_backpatch(stbfile *f, unsigned int tell, void *buffer, unsigned int len); // write
#ifdef STB_DEFINE
unsigned int stb_getc(stbfile *f) { return f->getbyte(f); }
int stb_putc(stbfile *f, int ch) { return f->putbyte(f, ch); }
unsigned int stb_getdata(stbfile *f, void *buffer, unsigned int len)
{
return f->getdata(f, buffer, len);
}
unsigned int stb_putdata(stbfile *f, void *buffer, unsigned int len)
{
return f->putdata(f, buffer, len);
}
void stb_close(stbfile *f)
{
f->close(f);
free(f);
}
unsigned int stb_tell(stbfile *f) { return f->tell(f); }
unsigned int stb_size(stbfile *f) { return f->size(f); }
void stb_backpatch(stbfile *f, unsigned int tell, void *buffer, unsigned int len)
{
f->backpatch(f,tell,buffer,len);
}
// FILE * implementation
static int stb__fgetbyte(stbfile *f) { return fgetc(f->f); }
static int stb__fputbyte(stbfile *f, int ch) { return fputc(ch, f->f)==0; }
static unsigned int stb__fgetdata(stbfile *f, void *buffer, unsigned int len) { return (unsigned int) fread(buffer,1,len,f->f); }
static unsigned int stb__fputdata(stbfile *f, void *buffer, unsigned int len) { return (unsigned int) fwrite(buffer,1,len,f->f); }
static unsigned int stb__fsize(stbfile *f) { return (unsigned int) stb_filelen(f->f); }
static unsigned int stb__ftell(stbfile *f) { return (unsigned int) ftell(f->f); }
static void stb__fbackpatch(stbfile *f, unsigned int where, void *buffer, unsigned int len)
{
fseek(f->f, where, SEEK_SET);
fwrite(buffer, 1, len, f->f);
fseek(f->f, 0, SEEK_END);
}
static void stb__fclose(stbfile *f) { fclose(f->f); }
stbfile *stb_openf(FILE *f)
{
stbfile m = { stb__fgetbyte, stb__fgetdata,
stb__fputbyte, stb__fputdata,
stb__fsize, stb__ftell, stb__fbackpatch, stb__fclose,
0,0,0, };
stbfile *z = (stbfile *) malloc(sizeof(*z));
if (z) {
*z = m;
z->f = f;
}
return z;
}
static int stb__nogetbyte(stbfile *f) { assert(0); return -1; }
static unsigned int stb__nogetdata(stbfile *f, void *buffer, unsigned int len) { assert(0); return 0; }
static int stb__noputbyte(stbfile *f, int ch) { assert(0); return 0; }
static unsigned int stb__noputdata(stbfile *f, void *buffer, unsigned int len) { assert(0); return 0; }
static void stb__nobackpatch(stbfile *f, unsigned int where, void *buffer, unsigned int len) { assert(0); }
static int stb__bgetbyte(stbfile *s)
{
if (s->indata < s->inend)
return *s->indata++;
else
return -1;
}
static unsigned int stb__bgetdata(stbfile *s, void *buffer, unsigned int len)
{
if (s->indata + len > s->inend)
len = (unsigned int) (s->inend - s->indata);
memcpy(buffer, s->indata, len);
s->indata += len;
return len;
}
static unsigned int stb__bsize(stbfile *s) { return (unsigned int) (s->inend - s->buffer); }
static unsigned int stb__btell(stbfile *s) { return (unsigned int) (s->indata - s->buffer); }
static void stb__bclose(stbfile *s)
{
if (s->various)
free(s->buffer);
}
stbfile *stb_open_inbuffer(void *buffer, unsigned int len)
{
stbfile m = { stb__bgetbyte, stb__bgetdata,
stb__noputbyte, stb__noputdata,
stb__bsize, stb__btell, stb__nobackpatch, stb__bclose };
stbfile *z = (stbfile *) malloc(sizeof(*z));
if (z) {
*z = m;
z->buffer = (unsigned char *) buffer;
z->indata = z->buffer;
z->inend = z->indata + len;
}
return z;
}
stbfile *stb_open_inbuffer_free(void *buffer, unsigned int len)
{
stbfile *z = stb_open_inbuffer(buffer, len);
if (z)
z->various = 1; // free
return z;
}
#ifndef STB_VERSION
// if we've been cut-and-pasted elsewhere, you get a limited
// version of stb_open, without the 'k' flag and utf8 support
static void stb__fclose2(stbfile *f)
{
fclose(f->f);
}
stbfile *stb_open(char *filename, char *mode)
{
FILE *f = stb_p_fopen(filename, mode);
stbfile *s;
if (f == NULL) return NULL;
s = stb_openf(f);
if (s)
s->close = stb__fclose2;
return s;
}
#else
// the full version depends on some code in stb.h; this
// also includes the memory buffer output format implemented with stb_arr
static void stb__fclose2(stbfile *f)
{
stb_fclose(f->f, f->various);
}
stbfile *stb_open(char *filename, char *mode)
{
FILE *f = stb_fopen(filename, mode[0] == 'k' ? mode+1 : mode);
stbfile *s;
if (f == NULL) return NULL;
s = stb_openf(f);
if (s) {
s->close = stb__fclose2;
s->various = mode[0] == 'k' ? stb_keep_if_different : stb_keep_yes;
}
return s;
}
static int stb__aputbyte(stbfile *f, int ch)
{
stb_arr_push(f->buffer, ch);
return 1;
}
static unsigned int stb__aputdata(stbfile *f, void *data, unsigned int len)
{
memcpy(stb_arr_addn(f->buffer, (int) len), data, len);
return len;
}
static unsigned int stb__asize(stbfile *f) { return stb_arr_len(f->buffer); }
static void stb__abackpatch(stbfile *f, unsigned int where, void *data, unsigned int len)
{
memcpy(f->buffer+where, data, len);
}
static void stb__aclose(stbfile *f)
{
*(unsigned char **) f->ptr = f->buffer;
}
stbfile *stb_open_outbuffer(unsigned char **update_on_close)
{
stbfile m = { stb__nogetbyte, stb__nogetdata,
stb__aputbyte, stb__aputdata,
stb__asize, stb__asize, stb__abackpatch, stb__aclose };
stbfile *z = (stbfile *) malloc(sizeof(*z));
if (z) {
z->ptr = update_on_close;
*z = m;
}
return z;
}
#endif
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Arithmetic coder... based on cbloom's notes on the subject, should be
// less code than a huffman code.
typedef struct
{
unsigned int range_low;
unsigned int range_high;
unsigned int code, range; // decode
int buffered_u8;
int pending_ffs;
stbfile *output;
} stb_arith;
STB_EXTERN void stb_arith_init_encode(stb_arith *a, stbfile *out);
STB_EXTERN void stb_arith_init_decode(stb_arith *a, stbfile *in);
STB_EXTERN stbfile *stb_arith_encode_close(stb_arith *a);
STB_EXTERN stbfile *stb_arith_decode_close(stb_arith *a);
STB_EXTERN void stb_arith_encode(stb_arith *a, unsigned int totalfreq, unsigned int freq, unsigned int cumfreq);
STB_EXTERN void stb_arith_encode_log2(stb_arith *a, unsigned int totalfreq2, unsigned int freq, unsigned int cumfreq);
STB_EXTERN unsigned int stb_arith_decode_value(stb_arith *a, unsigned int totalfreq);
STB_EXTERN void stb_arith_decode_advance(stb_arith *a, unsigned int totalfreq, unsigned int freq, unsigned int cumfreq);
STB_EXTERN unsigned int stb_arith_decode_value_log2(stb_arith *a, unsigned int totalfreq2);
STB_EXTERN void stb_arith_decode_advance_log2(stb_arith *a, unsigned int totalfreq2, unsigned int freq, unsigned int cumfreq);
STB_EXTERN void stb_arith_encode_byte(stb_arith *a, int byte);
STB_EXTERN int stb_arith_decode_byte(stb_arith *a);
// this is a memory-inefficient way of doing things, but it's
// fast(?) and simple
typedef struct
{
unsigned short cumfreq;
unsigned short samples;
} stb_arith_symstate_item;
typedef struct
{
int num_sym;
unsigned int pow2;
int countdown;
stb_arith_symstate_item data[1];
} stb_arith_symstate;
#ifdef STB_DEFINE
void stb_arith_init_encode(stb_arith *a, stbfile *out)
{
a->range_low = 0;
a->range_high = 0xffffffff;
a->pending_ffs = -1; // means no buffered character currently, to speed up normal case
a->output = out;
}
static void stb__arith_carry(stb_arith *a)
{
int i;
assert(a->pending_ffs != -1); // can't carry with no data
stb_putc(a->output, a->buffered_u8);
for (i=0; i < a->pending_ffs; ++i)
stb_putc(a->output, 0);
}
static void stb__arith_putbyte(stb_arith *a, int byte)
{
if (a->pending_ffs) {
if (a->pending_ffs == -1) { // means no buffered data; encoded for fast path efficiency
if (byte == 0xff)
stb_putc(a->output, byte); // just write it immediately
else {
a->buffered_u8 = byte;
a->pending_ffs = 0;
}
} else if (byte == 0xff) {
++a->pending_ffs;
} else {
int i;
stb_putc(a->output, a->buffered_u8);
for (i=0; i < a->pending_ffs; ++i)
stb_putc(a->output, 0xff);
}
} else if (byte == 0xff) {
++a->pending_ffs;
} else {
// fast path
stb_putc(a->output, a->buffered_u8);
a->buffered_u8 = byte;
}
}
static void stb__arith_flush(stb_arith *a)
{
if (a->pending_ffs >= 0) {
int i;
stb_putc(a->output, a->buffered_u8);
for (i=0; i < a->pending_ffs; ++i)
stb_putc(a->output, 0xff);
}
}
static void stb__renorm_encoder(stb_arith *a)
{
stb__arith_putbyte(a, a->range_low >> 24);
a->range_low <<= 8;
a->range_high = (a->range_high << 8) | 0xff;
}
static void stb__renorm_decoder(stb_arith *a)
{
int c = stb_getc(a->output);
a->code = (a->code << 8) + (c >= 0 ? c : 0); // if EOF, insert 0
}
void stb_arith_encode(stb_arith *a, unsigned int totalfreq, unsigned int freq, unsigned int cumfreq)
{
unsigned int range = a->range_high - a->range_low;
unsigned int old = a->range_low;
range /= totalfreq;
a->range_low += range * cumfreq;
a->range_high = a->range_low + range*freq;
if (a->range_low < old)
stb__arith_carry(a);
while (a->range_high - a->range_low < 0x1000000)
stb__renorm_encoder(a);
}
void stb_arith_encode_log2(stb_arith *a, unsigned int totalfreq2, unsigned int freq, unsigned int cumfreq)
{
unsigned int range = a->range_high - a->range_low;
unsigned int old = a->range_low;
range >>= totalfreq2;
a->range_low += range * cumfreq;
a->range_high = a->range_low + range*freq;
if (a->range_low < old)
stb__arith_carry(a);
while (a->range_high - a->range_low < 0x1000000)
stb__renorm_encoder(a);
}
unsigned int stb_arith_decode_value(stb_arith *a, unsigned int totalfreq)
{
unsigned int freqsize = a->range / totalfreq;
unsigned int z = a->code / freqsize;
return z >= totalfreq ? totalfreq-1 : z;
}
void stb_arith_decode_advance(stb_arith *a, unsigned int totalfreq, unsigned int freq, unsigned int cumfreq)
{
unsigned int freqsize = a->range / totalfreq; // @OPTIMIZE, share with above divide somehow?
a->code -= freqsize * cumfreq;
a->range = freqsize * freq;
while (a->range < 0x1000000)
stb__renorm_decoder(a);
}
unsigned int stb_arith_decode_value_log2(stb_arith *a, unsigned int totalfreq2)
{
unsigned int freqsize = a->range >> totalfreq2;
unsigned int z = a->code / freqsize;
return z >= (1U<<totalfreq2) ? (1U<<totalfreq2)-1 : z;
}
void stb_arith_decode_advance_log2(stb_arith *a, unsigned int totalfreq2, unsigned int freq, unsigned int cumfreq)
{
unsigned int freqsize = a->range >> totalfreq2;
a->code -= freqsize * cumfreq;
a->range = freqsize * freq;
while (a->range < 0x1000000)
stb__renorm_decoder(a);
}
stbfile *stb_arith_encode_close(stb_arith *a)
{
// put exactly as many bytes as we'll read, so we can turn on/off arithmetic coding in a stream
stb__arith_putbyte(a, a->range_low >> 24);
stb__arith_putbyte(a, a->range_low >> 16);
stb__arith_putbyte(a, a->range_low >> 8);
stb__arith_putbyte(a, a->range_low >> 0);
stb__arith_flush(a);
return a->output;
}
stbfile *stb_arith_decode_close(stb_arith *a)
{
return a->output;
}
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Threads
//
#ifndef _WIN32
#ifdef STB_THREADS
#error "threads not implemented except for Windows"
#endif
#endif
// call this function to free any global variables for memory testing
STB_EXTERN void stb_thread_cleanup(void);
typedef void * (*stb_thread_func)(void *);
// do not rely on these types, this is an implementation detail.
// compare against STB_THREAD_NULL and ST_SEMAPHORE_NULL
typedef void *stb_thread;
typedef void *stb_semaphore;
typedef void *stb_mutex;
typedef struct stb__sync *stb_sync;
#define STB_SEMAPHORE_NULL NULL
#define STB_THREAD_NULL NULL
#define STB_MUTEX_NULL NULL
#define STB_SYNC_NULL NULL
// get the number of processors (limited to those in the affinity mask for this process).
STB_EXTERN int stb_processor_count(void);
// force to run on a single core -- needed for RDTSC to work, e.g. for iprof
STB_EXTERN void stb_force_uniprocessor(void);
// stb_work functions: queue up work to be done by some worker threads
// set number of threads to serve the queue; you can change this on the fly,
// but if you decrease it, it won't decrease until things currently on the
// queue are finished
STB_EXTERN void stb_work_numthreads(int n);
// set maximum number of units in the queue; you can only set this BEFORE running any work functions
STB_EXTERN int stb_work_maxunits(int n);
// enqueue some work to be done (can do this from any thread, or even from a piece of work);
// return value of f is stored in *return_code if non-NULL
STB_EXTERN int stb_work(stb_thread_func f, void *d, volatile void **return_code);
// as above, but stb_sync_reach is called on 'rel' after work is complete
STB_EXTERN int stb_work_reach(stb_thread_func f, void *d, volatile void **return_code, stb_sync rel);
// necessary to call this when using volatile to order writes/reads
STB_EXTERN void stb_barrier(void);
// support for independent queues with their own threads
typedef struct stb__workqueue stb_workqueue;
STB_EXTERN stb_workqueue*stb_workq_new(int numthreads, int max_units);
STB_EXTERN stb_workqueue*stb_workq_new_flags(int numthreads, int max_units, int no_add_mutex, int no_remove_mutex);
STB_EXTERN void stb_workq_delete(stb_workqueue *q);
STB_EXTERN void stb_workq_numthreads(stb_workqueue *q, int n);
STB_EXTERN int stb_workq(stb_workqueue *q, stb_thread_func f, void *d, volatile void **return_code);
STB_EXTERN int stb_workq_reach(stb_workqueue *q, stb_thread_func f, void *d, volatile void **return_code, stb_sync rel);
STB_EXTERN int stb_workq_length(stb_workqueue *q);
STB_EXTERN stb_thread stb_create_thread (stb_thread_func f, void *d);
STB_EXTERN stb_thread stb_create_thread2(stb_thread_func f, void *d, volatile void **return_code, stb_semaphore rel);
STB_EXTERN void stb_destroy_thread(stb_thread t);
STB_EXTERN stb_semaphore stb_sem_new(int max_val);
STB_EXTERN stb_semaphore stb_sem_new_extra(int max_val, int start_val);
STB_EXTERN void stb_sem_delete (stb_semaphore s);
STB_EXTERN void stb_sem_waitfor(stb_semaphore s);
STB_EXTERN void stb_sem_release(stb_semaphore s);
STB_EXTERN stb_mutex stb_mutex_new(void);
STB_EXTERN void stb_mutex_delete(stb_mutex m);
STB_EXTERN void stb_mutex_begin(stb_mutex m);
STB_EXTERN void stb_mutex_end(stb_mutex m);
STB_EXTERN stb_sync stb_sync_new(void);
STB_EXTERN void stb_sync_delete(stb_sync s);
STB_EXTERN int stb_sync_set_target(stb_sync s, int count);
STB_EXTERN void stb_sync_reach_and_wait(stb_sync s); // wait for 'target' reachers
STB_EXTERN int stb_sync_reach(stb_sync s);
typedef struct stb__threadqueue stb_threadqueue;
#define STB_THREADQ_DYNAMIC 0
STB_EXTERN stb_threadqueue *stb_threadq_new(int item_size, int num_items, int many_add, int many_remove);
STB_EXTERN void stb_threadq_delete(stb_threadqueue *tq);
STB_EXTERN int stb_threadq_get(stb_threadqueue *tq, void *output);
STB_EXTERN void stb_threadq_get_block(stb_threadqueue *tq, void *output);
STB_EXTERN int stb_threadq_add(stb_threadqueue *tq, void *input);
// can return FALSE if STB_THREADQ_DYNAMIC and attempt to grow fails
STB_EXTERN int stb_threadq_add_block(stb_threadqueue *tq, void *input);
#ifdef STB_THREADS
#ifdef STB_DEFINE
typedef struct
{
stb_thread_func f;
void *d;
volatile void **return_val;
stb_semaphore sem;
} stb__thread;
// this is initialized along all possible paths to create threads, therefore
// it's always initialized before any other threads are create, therefore
// it's free of races AS LONG AS you only create threads through stb_*
static stb_mutex stb__threadmutex, stb__workmutex;
static void stb__threadmutex_init(void)
{
if (stb__threadmutex == STB_SEMAPHORE_NULL) {
stb__threadmutex = stb_mutex_new();
stb__workmutex = stb_mutex_new();
}
}
#ifdef STB_THREAD_TEST
volatile float stb__t1=1, stb__t2;
static void stb__wait(int n)
{
float z = 0;
int i;
for (i=0; i < n; ++i)
z += 1 / (stb__t1+i);
stb__t2 = z;
}
#else
#define stb__wait(x)
#endif
#ifdef _WIN32
// avoid including windows.h -- note that our definitions aren't
// exactly the same (we don't define the security descriptor struct)
// so if you want to include windows.h, make sure you do it first.
#include <process.h>
#ifndef _WINDOWS_ // check windows.h guard
#define STB__IMPORT STB_EXTERN __declspec(dllimport)
#define STB__DW unsigned long
STB__IMPORT int __stdcall TerminateThread(void *, STB__DW);
STB__IMPORT void * __stdcall CreateSemaphoreA(void *sec, long,long,char*);
STB__IMPORT int __stdcall CloseHandle(void *);
STB__IMPORT STB__DW __stdcall WaitForSingleObject(void *, STB__DW);
STB__IMPORT int __stdcall ReleaseSemaphore(void *, long, long *);
STB__IMPORT void __stdcall Sleep(STB__DW);
#endif
// necessary to call this when using volatile to order writes/reads
void stb_barrier(void)
{
#ifdef MemoryBarrier
MemoryBarrier();
#else
long temp;
__asm xchg temp,eax;
#endif
}
static void stb__thread_run(void *t)
{
void *res;
stb__thread info = * (stb__thread *) t;
free(t);
res = info.f(info.d);
if (info.return_val)
*info.return_val = res;
if (info.sem != STB_SEMAPHORE_NULL)
stb_sem_release(info.sem);
}
static stb_thread stb_create_thread_raw(stb_thread_func f, void *d, volatile void **return_code, stb_semaphore rel)
{
#ifdef _MT
#if defined(STB_FASTMALLOC) && !defined(STB_FASTMALLOC_ITS_OKAY_I_ONLY_MALLOC_IN_ONE_THREAD)
stb_fatal("Error! Cannot use STB_FASTMALLOC with threads.\n");
return STB_THREAD_NULL;
#else
unsigned long id;
stb__thread *data = (stb__thread *) malloc(sizeof(*data));
if (!data) return NULL;
stb__threadmutex_init();
data->f = f;
data->d = d;
data->return_val = return_code;
data->sem = rel;
id = _beginthread(stb__thread_run, 0, data);
if (id == -1) return NULL;
return (void *) id;
#endif
#else
#ifdef STB_NO_STB_STRINGS
stb_fatal("Invalid compilation");
#else
stb_fatal("Must compile mult-threaded to use stb_thread/stb_work.");
#endif
return NULL;
#endif
}
// trivial win32 wrappers
void stb_destroy_thread(stb_thread t) { TerminateThread(t,0); }
stb_semaphore stb_sem_new(int maxv) {return CreateSemaphoreA(NULL,0,maxv,NULL); }
stb_semaphore stb_sem_new_extra(int maxv,int start){return CreateSemaphoreA(NULL,start,maxv,NULL); }
void stb_sem_delete(stb_semaphore s) { if (s != NULL) CloseHandle(s); }
void stb_sem_waitfor(stb_semaphore s) { WaitForSingleObject(s, 0xffffffff); } // INFINITE
void stb_sem_release(stb_semaphore s) { ReleaseSemaphore(s,1,NULL); }
static void stb__thread_sleep(int ms) { Sleep(ms); }
#ifndef _WINDOWS_
STB__IMPORT int __stdcall GetProcessAffinityMask(void *, STB__DW *, STB__DW *);
STB__IMPORT void * __stdcall GetCurrentProcess(void);
STB__IMPORT int __stdcall SetProcessAffinityMask(void *, STB__DW);
#endif
int stb_processor_count(void)
{
unsigned long proc,sys;
GetProcessAffinityMask(GetCurrentProcess(), &proc, &sys);
return stb_bitcount(proc);
}
void stb_force_uniprocessor(void)
{
unsigned long proc,sys;
GetProcessAffinityMask(GetCurrentProcess(), &proc, &sys);
if (stb_bitcount(proc) > 1) {
int z;
for (z=0; z < 32; ++z)
if (proc & (1 << z))
break;
if (z < 32) {
proc = 1 << z;
SetProcessAffinityMask(GetCurrentProcess(), proc);
}
}
}
#ifdef _WINDOWS_
#define STB_MUTEX_NATIVE
void *stb_mutex_new(void)
{
CRITICAL_SECTION *p = (CRITICAL_SECTION *) malloc(sizeof(*p));
if (p)
#if _WIN32_WINNT >= 0x0500
InitializeCriticalSectionAndSpinCount(p, 500);
#else
InitializeCriticalSection(p);
#endif
return p;
}
void stb_mutex_delete(void *p)
{
if (p) {
DeleteCriticalSection((CRITICAL_SECTION *) p);
free(p);
}
}
void stb_mutex_begin(void *p)
{
stb__wait(500);
if (p)
EnterCriticalSection((CRITICAL_SECTION *) p);
}
void stb_mutex_end(void *p)
{
if (p)
LeaveCriticalSection((CRITICAL_SECTION *) p);
stb__wait(500);
}
#endif // _WINDOWS_
#if 0
// for future reference,
// InterlockedCompareExchange for x86:
int cas64_mp(void * dest, void * xcmp, void * xxchg) {
__asm
{
mov esi, [xxchg] ; exchange
mov ebx, [esi + 0]
mov ecx, [esi + 4]
mov esi, [xcmp] ; comparand
mov eax, [esi + 0]
mov edx, [esi + 4]
mov edi, [dest] ; destination
lock cmpxchg8b [edi]
jz yyyy;
mov [esi + 0], eax;
mov [esi + 4], edx;
yyyy:
xor eax, eax;
setz al;
};
inline unsigned __int64 _InterlockedCompareExchange64(volatile unsigned __int64 *dest
,unsigned __int64 exchange
,unsigned __int64 comperand)
{
//value returned in eax::edx
__asm {
lea esi,comperand;
lea edi,exchange;
mov eax,[esi];
mov edx,4[esi];
mov ebx,[edi];
mov ecx,4[edi];
mov esi,dest;
lock CMPXCHG8B [esi];
}
#endif // #if 0
#endif // _WIN32
stb_thread stb_create_thread2(stb_thread_func f, void *d, volatile void **return_code, stb_semaphore rel)
{
return stb_create_thread_raw(f,d,return_code,rel);
}
stb_thread stb_create_thread(stb_thread_func f, void *d)
{
return stb_create_thread2(f,d,NULL,STB_SEMAPHORE_NULL);
}
// mutex implemented by wrapping semaphore
#ifndef STB_MUTEX_NATIVE
stb_mutex stb_mutex_new(void) { return stb_sem_new_extra(1,1); }
void stb_mutex_delete(stb_mutex m) { stb_sem_delete (m); }
void stb_mutex_begin(stb_mutex m) { stb__wait(500); if (m) stb_sem_waitfor(m); }
void stb_mutex_end(stb_mutex m) { if (m) stb_sem_release(m); stb__wait(500); }
#endif
// thread merge operation
struct stb__sync
{
int target; // target number of threads to hit it
int sofar; // total threads that hit it
int waiting; // total threads waiting
stb_mutex start; // mutex to prevent starting again before finishing previous
stb_mutex mutex; // mutex while tweaking state
stb_semaphore release; // semaphore wake up waiting threads
// we have to wake them up one at a time, rather than using a single release
// call, because win32 semaphores don't let you dynamically change the max count!
};
stb_sync stb_sync_new(void)
{
stb_sync s = (stb_sync) malloc(sizeof(*s));
if (!s) return s;
s->target = s->sofar = s->waiting = 0;
s->mutex = stb_mutex_new();
s->start = stb_mutex_new();
s->release = stb_sem_new(1);
if (s->mutex == STB_MUTEX_NULL || s->release == STB_SEMAPHORE_NULL || s->start == STB_MUTEX_NULL) {
stb_mutex_delete(s->mutex);
stb_mutex_delete(s->mutex);
stb_sem_delete(s->release);
free(s);
return NULL;
}
return s;
}
void stb_sync_delete(stb_sync s)
{
if (s->waiting) {
// it's bad to delete while there are threads waiting!
// shall we wait for them to reach, or just bail? just bail
assert(0);
}
stb_mutex_delete(s->mutex);
stb_mutex_delete(s->release);
free(s);
}
int stb_sync_set_target(stb_sync s, int count)
{
// don't allow setting a target until the last one is fully released;
// note that this can lead to inefficient pipelining, and maybe we'd
// be better off ping-ponging between two internal syncs?
// I tried seeing how often this happened using TryEnterCriticalSection
// and could _never_ get it to happen in imv(stb), even with more threads
// than processors. So who knows!
stb_mutex_begin(s->start);
// this mutex is pointless, since it's not valid for threads
// to call reach() before anyone calls set_target() anyway
stb_mutex_begin(s->mutex);
assert(s->target == 0); // enforced by start mutex
s->target = count;
s->sofar = 0;
s->waiting = 0;
stb_mutex_end(s->mutex);
return STB_TRUE;
}
void stb__sync_release(stb_sync s)
{
if (s->waiting)
stb_sem_release(s->release);
else {
s->target = 0;
stb_mutex_end(s->start);
}
}
int stb_sync_reach(stb_sync s)
{
int n;
stb_mutex_begin(s->mutex);
assert(s->sofar < s->target);
n = ++s->sofar; // record this value to avoid possible race if we did 'return s->sofar';
if (s->sofar == s->target)
stb__sync_release(s);
stb_mutex_end(s->mutex);
return n;
}
void stb_sync_reach_and_wait(stb_sync s)
{
stb_mutex_begin(s->mutex);
assert(s->sofar < s->target);
++s->sofar;
if (s->sofar == s->target) {
stb__sync_release(s);
stb_mutex_end(s->mutex);
} else {
++s->waiting; // we're waiting, so one more waiter
stb_mutex_end(s->mutex); // release the mutex to other threads
stb_sem_waitfor(s->release); // wait for merge completion
stb_mutex_begin(s->mutex); // on merge completion, grab the mutex
--s->waiting; // we're done waiting
stb__sync_release(s); // restart the next waiter
stb_mutex_end(s->mutex); // and now we're done
// this ends the same as the first case, but it's a lot
// clearer to understand without sharing the code
}
}
struct stb__threadqueue
{
stb_mutex add, remove;
stb_semaphore nonempty, nonfull;
int head_blockers; // number of threads blocking--used to know whether to release(avail)
int tail_blockers;
int head, tail, array_size, growable;
int item_size;
char *data;
};
static int stb__tq_wrap(volatile stb_threadqueue *z, int p)
{
if (p == z->array_size)
return p - z->array_size;
else
return p;
}
int stb__threadq_get_raw(stb_threadqueue *tq2, void *output, int block)
{
volatile stb_threadqueue *tq = (volatile stb_threadqueue *) tq2;
if (tq->head == tq->tail && !block) return 0;
stb_mutex_begin(tq->remove);
while (tq->head == tq->tail) {
if (!block) {
stb_mutex_end(tq->remove);
return 0;
}
++tq->head_blockers;
stb_mutex_end(tq->remove);
stb_sem_waitfor(tq->nonempty);
stb_mutex_begin(tq->remove);
--tq->head_blockers;
}
memcpy(output, tq->data + tq->head*tq->item_size, tq->item_size);
stb_barrier();
tq->head = stb__tq_wrap(tq, tq->head+1);
stb_sem_release(tq->nonfull);
if (tq->head_blockers) // can't check if actually non-empty due to race?
stb_sem_release(tq->nonempty); // if there are other blockers, wake one
stb_mutex_end(tq->remove);
return STB_TRUE;
}
int stb__threadq_grow(volatile stb_threadqueue *tq)
{
int n;
char *p;
assert(tq->remove != STB_MUTEX_NULL); // must have this to allow growth!
stb_mutex_begin(tq->remove);
n = tq->array_size * 2;
p = (char *) realloc(tq->data, n * tq->item_size);
if (p == NULL) {
stb_mutex_end(tq->remove);
stb_mutex_end(tq->add);
return STB_FALSE;
}
if (tq->tail < tq->head) {
memcpy(p + tq->array_size * tq->item_size, p, tq->tail * tq->item_size);
tq->tail += tq->array_size;
}
tq->data = p;
tq->array_size = n;
stb_mutex_end(tq->remove);
return STB_TRUE;
}
int stb__threadq_add_raw(stb_threadqueue *tq2, void *input, int block)
{
int tail,pos;
volatile stb_threadqueue *tq = (volatile stb_threadqueue *) tq2;
stb_mutex_begin(tq->add);
for(;;) {
pos = tq->tail;
tail = stb__tq_wrap(tq, pos+1);
if (tail != tq->head) break;
// full
if (tq->growable) {
if (!stb__threadq_grow(tq)) {
stb_mutex_end(tq->add);
return STB_FALSE; // out of memory
}
} else if (!block) {
stb_mutex_end(tq->add);
return STB_FALSE;
} else {
++tq->tail_blockers;
stb_mutex_end(tq->add);
stb_sem_waitfor(tq->nonfull);
stb_mutex_begin(tq->add);
--tq->tail_blockers;
}
}
memcpy(tq->data + tq->item_size * pos, input, tq->item_size);
stb_barrier();
tq->tail = tail;
stb_sem_release(tq->nonempty);
if (tq->tail_blockers) // can't check if actually non-full due to race?
stb_sem_release(tq->nonfull);
stb_mutex_end(tq->add);
return STB_TRUE;
}
int stb_threadq_length(stb_threadqueue *tq2)
{
int a,b,n;
volatile stb_threadqueue *tq = (volatile stb_threadqueue *) tq2;
stb_mutex_begin(tq->add);
a = tq->head;
b = tq->tail;
n = tq->array_size;
stb_mutex_end(tq->add);
if (a > b) b += n;
return b-a;
}
int stb_threadq_get(stb_threadqueue *tq, void *output)
{
return stb__threadq_get_raw(tq, output, STB_FALSE);
}
void stb_threadq_get_block(stb_threadqueue *tq, void *output)
{
stb__threadq_get_raw(tq, output, STB_TRUE);
}
int stb_threadq_add(stb_threadqueue *tq, void *input)
{
return stb__threadq_add_raw(tq, input, STB_FALSE);
}
int stb_threadq_add_block(stb_threadqueue *tq, void *input)
{
return stb__threadq_add_raw(tq, input, STB_TRUE);
}
void stb_threadq_delete(stb_threadqueue *tq)
{
if (tq) {
free(tq->data);
stb_mutex_delete(tq->add);
stb_mutex_delete(tq->remove);
stb_sem_delete(tq->nonempty);
stb_sem_delete(tq->nonfull);
free(tq);
}
}
#define STB_THREADQUEUE_DYNAMIC 0
stb_threadqueue *stb_threadq_new(int item_size, int num_items, int many_add, int many_remove)
{
int error=0;
stb_threadqueue *tq = (stb_threadqueue *) malloc(sizeof(*tq));
if (tq == NULL) return NULL;
if (num_items == STB_THREADQUEUE_DYNAMIC) {
tq->growable = STB_TRUE;
num_items = 32;
} else
tq->growable = STB_FALSE;
tq->item_size = item_size;
tq->array_size = num_items+1;
tq->add = tq->remove = STB_MUTEX_NULL;
tq->nonempty = tq->nonfull = STB_SEMAPHORE_NULL;
tq->data = NULL;
if (many_add)
{ tq->add = stb_mutex_new(); if (tq->add == STB_MUTEX_NULL) goto error; }
if (many_remove || tq->growable)
{ tq->remove = stb_mutex_new(); if (tq->remove == STB_MUTEX_NULL) goto error; }
tq->nonempty = stb_sem_new(1); if (tq->nonempty == STB_SEMAPHORE_NULL) goto error;
tq->nonfull = stb_sem_new(1); if (tq->nonfull == STB_SEMAPHORE_NULL) goto error;
tq->data = (char *) malloc(tq->item_size * tq->array_size);
if (tq->data == NULL) goto error;
tq->head = tq->tail = 0;
tq->head_blockers = tq->tail_blockers = 0;
return tq;
error:
stb_threadq_delete(tq);
return NULL;
}
typedef struct
{
stb_thread_func f;
void *d;
volatile void **retval;
stb_sync sync;
} stb__workinfo;
//static volatile stb__workinfo *stb__work;
struct stb__workqueue
{
int numthreads;
stb_threadqueue *tq;
};
static stb_workqueue *stb__work_global;
static void *stb__thread_workloop(void *p)
{
volatile stb_workqueue *q = (volatile stb_workqueue *) p;
for(;;) {
void *z;
stb__workinfo w;
stb_threadq_get_block(q->tq, &w);
if (w.f == NULL) // null work is a signal to end the thread
return NULL;
z = w.f(w.d);
if (w.retval) { stb_barrier(); *w.retval = z; }
if (w.sync != STB_SYNC_NULL) stb_sync_reach(w.sync);
}
}
stb_workqueue *stb_workq_new(int num_threads, int max_units)
{
return stb_workq_new_flags(num_threads, max_units, 0,0);
}
stb_workqueue *stb_workq_new_flags(int numthreads, int max_units, int no_add_mutex, int no_remove_mutex)
{
stb_workqueue *q = (stb_workqueue *) malloc(sizeof(*q));
if (q == NULL) return NULL;
q->tq = stb_threadq_new(sizeof(stb__workinfo), max_units, !no_add_mutex, !no_remove_mutex);
if (q->tq == NULL) { free(q); return NULL; }
q->numthreads = 0;
stb_workq_numthreads(q, numthreads);
return q;
}
void stb_workq_delete(stb_workqueue *q)
{
while (stb_workq_length(q) != 0)
stb__thread_sleep(1);
stb_threadq_delete(q->tq);
free(q);
}
static int stb__work_maxitems = STB_THREADQUEUE_DYNAMIC;
static void stb_work_init(int num_threads)
{
if (stb__work_global == NULL) {
stb__threadmutex_init();
stb_mutex_begin(stb__workmutex);
stb_barrier();
if (*(stb_workqueue * volatile *) &stb__work_global == NULL)
stb__work_global = stb_workq_new(num_threads, stb__work_maxitems);
stb_mutex_end(stb__workmutex);
}
}
static int stb__work_raw(stb_workqueue *q, stb_thread_func f, void *d, volatile void **return_code, stb_sync rel)
{
stb__workinfo w;
if (q == NULL) {
stb_work_init(1);
q = stb__work_global;
}
w.f = f;
w.d = d;
w.retval = return_code;
w.sync = rel;
return stb_threadq_add(q->tq, &w);
}
int stb_workq_length(stb_workqueue *q)
{
return stb_threadq_length(q->tq);
}
int stb_workq(stb_workqueue *q, stb_thread_func f, void *d, volatile void **return_code)
{
if (f == NULL) return 0;
return stb_workq_reach(q, f, d, return_code, NULL);
}
int stb_workq_reach(stb_workqueue *q, stb_thread_func f, void *d, volatile void **return_code, stb_sync rel)
{
if (f == NULL) return 0;
return stb__work_raw(q, f, d, return_code, rel);
}
static void stb__workq_numthreads(stb_workqueue *q, int n)
{
while (q->numthreads < n) {
stb_create_thread(stb__thread_workloop, q);
++q->numthreads;
}
while (q->numthreads > n) {
stb__work_raw(q, NULL, NULL, NULL, NULL);
--q->numthreads;
}
}
void stb_workq_numthreads(stb_workqueue *q, int n)
{
stb_mutex_begin(stb__threadmutex);
stb__workq_numthreads(q,n);
stb_mutex_end(stb__threadmutex);
}
int stb_work_maxunits(int n)
{
if (stb__work_global == NULL) {
stb__work_maxitems = n;
stb_work_init(1);
}
return stb__work_maxitems;
}
int stb_work(stb_thread_func f, void *d, volatile void **return_code)
{
return stb_workq(stb__work_global, f,d,return_code);
}
int stb_work_reach(stb_thread_func f, void *d, volatile void **return_code, stb_sync rel)
{
return stb_workq_reach(stb__work_global, f,d,return_code,rel);
}
void stb_work_numthreads(int n)
{
if (stb__work_global == NULL)
stb_work_init(n);
else
stb_workq_numthreads(stb__work_global, n);
}
#endif // STB_DEFINE
//////////////////////////////////////////////////////////////////////////////
//
// Background disk I/O
//
//
#define STB_BGIO_READ_ALL (-1)
STB_EXTERN int stb_bgio_read (char *filename, int offset, int len, stb_uchar **result, int *olen);
STB_EXTERN int stb_bgio_readf (FILE *f , int offset, int len, stb_uchar **result, int *olen);
STB_EXTERN int stb_bgio_read_to (char *filename, int offset, int len, stb_uchar *buffer, int *olen);
STB_EXTERN int stb_bgio_readf_to(FILE *f , int offset, int len, stb_uchar *buffer, int *olen);
typedef struct
{
int have_data;
int is_valid;
int is_dir;
time_t filetime;
stb_int64 filesize;
} stb_bgstat;
STB_EXTERN int stb_bgio_stat (char *filename, stb_bgstat *result);
#ifdef STB_DEFINE
static stb_workqueue *stb__diskio;
static stb_mutex stb__diskio_mutex;
void stb_thread_cleanup(void)
{
if (stb__work_global) stb_workq_delete(stb__work_global); stb__work_global = NULL;
if (stb__threadmutex) stb_mutex_delete(stb__threadmutex); stb__threadmutex = NULL;
if (stb__workmutex) stb_mutex_delete(stb__workmutex); stb__workmutex = NULL;
if (stb__diskio) stb_workq_delete(stb__diskio); stb__diskio = NULL;
if (stb__diskio_mutex)stb_mutex_delete(stb__diskio_mutex);stb__diskio_mutex= NULL;
}
typedef struct
{
char *filename;
FILE *f;
int offset;
int len;
stb_bgstat *stat_out;
stb_uchar *output;
stb_uchar **result;
int *len_output;
int *flag;
} stb__disk_command;
#define STB__MAX_DISK_COMMAND 100
static stb__disk_command stb__dc_queue[STB__MAX_DISK_COMMAND];
static int stb__dc_offset;
void stb__io_init(void)
{
if (!stb__diskio) {
stb__threadmutex_init();
stb_mutex_begin(stb__threadmutex);
stb_barrier();
if (*(stb_thread * volatile *) &stb__diskio == NULL) {
stb__diskio_mutex = stb_mutex_new();
// use many threads so OS can try to schedule seeks
stb__diskio = stb_workq_new_flags(16,STB__MAX_DISK_COMMAND,STB_FALSE,STB_FALSE);
}
stb_mutex_end(stb__threadmutex);
}
}
static void * stb__io_error(stb__disk_command *dc)
{
if (dc->len_output) *dc->len_output = 0;
if (dc->result) *dc->result = NULL;
if (dc->flag) *dc->flag = -1;
return NULL;
}
static void * stb__io_task(void *p)
{
stb__disk_command *dc = (stb__disk_command *) p;
int len;
FILE *f;
stb_uchar *buf;
if (dc->stat_out) {
struct _stati64 s;
if (!_stati64(dc->filename, &s)) {
dc->stat_out->filesize = s.st_size;
dc->stat_out->filetime = s.st_mtime;
dc->stat_out->is_dir = s.st_mode & _S_IFDIR;
dc->stat_out->is_valid = (s.st_mode & _S_IFREG) || dc->stat_out->is_dir;
} else
dc->stat_out->is_valid = 0;
stb_barrier();
dc->stat_out->have_data = 1;
free(dc->filename);
return 0;
}
if (dc->f) {
#ifdef WIN32
f = _fdopen(_dup(_fileno(dc->f)), "rb");
#else
f = fdopen(dup(fileno(dc->f)), "rb");
#endif
if (!f)
return stb__io_error(dc);
} else {
f = fopen(dc->filename, "rb");
free(dc->filename);
if (!f)
return stb__io_error(dc);
}
len = dc->len;
if (len < 0) {
fseek(f, 0, SEEK_END);
len = ftell(f) - dc->offset;
}
if (fseek(f, dc->offset, SEEK_SET)) {
fclose(f);
return stb__io_error(dc);
}
if (dc->output)
buf = dc->output;
else {
buf = (stb_uchar *) malloc(len);
if (buf == NULL) {
fclose(f);
return stb__io_error(dc);
}
}
len = fread(buf, 1, len, f);
fclose(f);
if (dc->len_output) *dc->len_output = len;
if (dc->result) *dc->result = buf;
if (dc->flag) *dc->flag = 1;
return NULL;
}
int stb__io_add(char *fname, FILE *f, int off, int len, stb_uchar *out, stb_uchar **result, int *olen, int *flag, stb_bgstat *stat)
{
int res;
stb__io_init();
// do memory allocation outside of mutex
if (fname) fname = stb_p_strdup(fname);
stb_mutex_begin(stb__diskio_mutex);
{
stb__disk_command *dc = &stb__dc_queue[stb__dc_offset];
dc->filename = fname;
dc->f = f;
dc->offset = off;
dc->len = len;
dc->output = out;
dc->result = result;
dc->len_output = olen;
dc->flag = flag;
dc->stat_out = stat;
res = stb_workq(stb__diskio, stb__io_task, dc, NULL);
if (res)
stb__dc_offset = (stb__dc_offset + 1 == STB__MAX_DISK_COMMAND ? 0 : stb__dc_offset+1);
}
stb_mutex_end(stb__diskio_mutex);
return res;
}
int stb_bgio_read(char *filename, int offset, int len, stb_uchar **result, int *olen)
{
return stb__io_add(filename,NULL,offset,len,NULL,result,olen,NULL,NULL);
}
int stb_bgio_readf(FILE *f, int offset, int len, stb_uchar **result, int *olen)
{
return stb__io_add(NULL,f,offset,len,NULL,result,olen,NULL,NULL);
}
int stb_bgio_read_to(char *filename, int offset, int len, stb_uchar *buffer, int *olen)
{
return stb__io_add(filename,NULL,offset,len,buffer,NULL,olen,NULL,NULL);
}
int stb_bgio_readf_to(FILE *f, int offset, int len, stb_uchar *buffer, int *olen)
{
return stb__io_add(NULL,f,offset,len,buffer,NULL,olen,NULL,NULL);
}
STB_EXTERN int stb_bgio_stat (char *filename, stb_bgstat *result)
{
result->have_data = 0;
return stb__io_add(filename,NULL,0,0,0,NULL,0,NULL, result);
}
#endif
#endif
//////////////////////////////////////////////////////////////////////////////
//
// Fast malloc implementation
//
// This is a clone of TCMalloc, but without the thread support.
// 1. large objects are allocated directly, page-aligned
// 2. small objects are allocated in homogeonous heaps, 0 overhead
//
// We keep an allocation table for pages a la TCMalloc. This would
// require 4MB for the entire address space, but we only allocate
// the parts that are in use. The overhead from using homogenous heaps
// everywhere is 3MB. (That is, if you allocate 1 object of each size,
// you'll use 3MB.)
#if defined(STB_DEFINE) && ((defined(_WIN32) && !defined(_M_AMD64)) || defined(STB_FASTMALLOC))
#ifdef _WIN32
#ifndef _WINDOWS_
#ifndef STB__IMPORT
#define STB__IMPORT STB_EXTERN __declspec(dllimport)
#define STB__DW unsigned long
#endif
STB__IMPORT void * __stdcall VirtualAlloc(void *p, unsigned long size, unsigned long type, unsigned long protect);
STB__IMPORT int __stdcall VirtualFree(void *p, unsigned long size, unsigned long freetype);
#endif
#define stb__alloc_pages_raw(x) (stb_uint32) VirtualAlloc(NULL, (x), 0x3000, 0x04)
#define stb__dealloc_pages_raw(p) VirtualFree((void *) p, 0, 0x8000)
#else
#error "Platform not currently supported"
#endif
typedef struct stb__span
{
int start, len;
struct stb__span *next, *prev;
void *first_free;
unsigned short list; // 1..256 free; 257..511 sizeclass; 0=large block
short allocations; // # outstanding allocations for sizeclass
} stb__span; // 24
static stb__span **stb__span_for_page;
static int stb__firstpage, stb__lastpage;
static void stb__update_page_range(int first, int last)
{
stb__span **sfp;
int i, f,l;
if (first >= stb__firstpage && last <= stb__lastpage) return;
if (stb__span_for_page == NULL) {
f = first;
l = f+stb_max(last-f, 16384);
l = stb_min(l, 1<<20);
} else if (last > stb__lastpage) {
f = stb__firstpage;
l = f + (stb__lastpage - f) * 2;
l = stb_clamp(last, l,1<<20);
} else {
l = stb__lastpage;
f = l - (l - stb__firstpage) * 2;
f = stb_clamp(f, 0,first);
}
sfp = (stb__span **) stb__alloc_pages_raw(sizeof(void *) * (l-f));
for (i=f; i < stb__firstpage; ++i) sfp[i - f] = NULL;
for ( ; i < stb__lastpage ; ++i) sfp[i - f] = stb__span_for_page[i - stb__firstpage];
for ( ; i < l ; ++i) sfp[i - f] = NULL;
if (stb__span_for_page) stb__dealloc_pages_raw(stb__span_for_page);
stb__firstpage = f;
stb__lastpage = l;
stb__span_for_page = sfp;
}
static stb__span *stb__span_free=NULL;
static stb__span *stb__span_first, *stb__span_end;
static stb__span *stb__span_alloc(void)
{
stb__span *s = stb__span_free;
if (s)
stb__span_free = s->next;
else {
if (!stb__span_first) {
stb__span_first = (stb__span *) stb__alloc_pages_raw(65536);
if (stb__span_first == NULL) return NULL;
stb__span_end = stb__span_first + (65536 / sizeof(stb__span));
}
s = stb__span_first++;
if (stb__span_first == stb__span_end) stb__span_first = NULL;
}
return s;
}
static stb__span *stb__spanlist[512];
static void stb__spanlist_unlink(stb__span *s)
{
if (s->prev)
s->prev->next = s->next;
else {
int n = s->list;
assert(stb__spanlist[n] == s);
stb__spanlist[n] = s->next;
}
if (s->next)
s->next->prev = s->prev;
s->next = s->prev = NULL;
s->list = 0;
}
static void stb__spanlist_add(int n, stb__span *s)
{
s->list = n;
s->next = stb__spanlist[n];
s->prev = NULL;
stb__spanlist[n] = s;
if (s->next) s->next->prev = s;
}
#define stb__page_shift 12
#define stb__page_size (1 << stb__page_shift)
#define stb__page_number(x) ((x) >> stb__page_shift)
#define stb__page_address(x) ((x) << stb__page_shift)
static void stb__set_span_for_page(stb__span *s)
{
int i;
for (i=0; i < s->len; ++i)
stb__span_for_page[s->start + i - stb__firstpage] = s;
}
static stb__span *stb__coalesce(stb__span *a, stb__span *b)
{
assert(a->start + a->len == b->start);
if (a->list) stb__spanlist_unlink(a);
if (b->list) stb__spanlist_unlink(b);
a->len += b->len;
b->len = 0;
b->next = stb__span_free;
stb__span_free = b;
stb__set_span_for_page(a);
return a;
}
static void stb__free_span(stb__span *s)
{
stb__span *n = NULL;
if (s->start > stb__firstpage) {
n = stb__span_for_page[s->start-1 - stb__firstpage];
if (n && n->allocations == -2 && n->start + n->len == s->start) s = stb__coalesce(n,s);
}
if (s->start + s->len < stb__lastpage) {
n = stb__span_for_page[s->start + s->len - stb__firstpage];
if (n && n->allocations == -2 && s->start + s->len == n->start) s = stb__coalesce(s,n);
}
s->allocations = -2;
stb__spanlist_add(s->len > 256 ? 256 : s->len, s);
}
static stb__span *stb__alloc_pages(int num)
{
stb__span *s = stb__span_alloc();
int p;
if (!s) return NULL;
p = stb__alloc_pages_raw(num << stb__page_shift);
if (p == 0) { s->next = stb__span_free; stb__span_free = s; return 0; }
assert(stb__page_address(stb__page_number(p)) == p);
p = stb__page_number(p);
stb__update_page_range(p, p+num);
s->start = p;
s->len = num;
s->next = NULL;
s->prev = NULL;
stb__set_span_for_page(s);
return s;
}
static stb__span *stb__alloc_span(int pagecount)
{
int i;
stb__span *p = NULL;
for(i=pagecount; i < 256; ++i)
if (stb__spanlist[i]) {
p = stb__spanlist[i];
break;
}
if (!p) {
p = stb__spanlist[256];
while (p && p->len < pagecount)
p = p->next;
}
if (!p) {
p = stb__alloc_pages(pagecount < 16 ? 16 : pagecount);
if (p == NULL) return 0;
} else
stb__spanlist_unlink(p);
if (p->len > pagecount) {
stb__span *q = stb__span_alloc();
if (q) {
q->start = p->start + pagecount;
q->len = p->len - pagecount;
p->len = pagecount;
for (i=0; i < q->len; ++i)
stb__span_for_page[q->start+i - stb__firstpage] = q;
stb__spanlist_add(q->len > 256 ? 256 : q->len, q);
}
}
return p;
}
#define STB__MAX_SMALL_SIZE 32768
#define STB__MAX_SIZE_CLASSES 256
static unsigned char stb__class_base[32];
static unsigned char stb__class_shift[32];
static unsigned char stb__pages_for_class[STB__MAX_SIZE_CLASSES];
static int stb__size_for_class[STB__MAX_SIZE_CLASSES];
stb__span *stb__get_nonempty_sizeclass(int c)
{
int s = c + 256, i, size, tsize; // remap to span-list index
char *z;
void *q;
stb__span *p = stb__spanlist[s];
if (p) {
if (p->first_free) return p; // fast path: it's in the first one in list
for (p=p->next; p; p=p->next)
if (p->first_free) {
// move to front for future queries
stb__spanlist_unlink(p);
stb__spanlist_add(s, p);
return p;
}
}
// no non-empty ones, so allocate a new one
p = stb__alloc_span(stb__pages_for_class[c]);
if (!p) return NULL;
// create the free list up front
size = stb__size_for_class[c];
tsize = stb__pages_for_class[c] << stb__page_shift;
i = 0;
z = (char *) stb__page_address(p->start);
q = NULL;
while (i + size <= tsize) {
* (void **) z = q; q = z;
z += size;
i += size;
}
p->first_free = q;
p->allocations = 0;
stb__spanlist_add(s,p);
return p;
}
static int stb__sizeclass(size_t sz)
{
int z = stb_log2_floor(sz); // -1 below to group e.g. 13,14,15,16 correctly
return stb__class_base[z] + ((sz-1) >> stb__class_shift[z]);
}
static void stb__init_sizeclass(void)
{
int i, size, overhead;
int align_shift = 2; // allow 4-byte and 12-byte blocks as well, vs. TCMalloc
int next_class = 1;
int last_log = 0;
for (i = 0; i < align_shift; i++) {
stb__class_base [i] = next_class;
stb__class_shift[i] = align_shift;
}
for (size = 1 << align_shift; size <= STB__MAX_SMALL_SIZE; size += 1 << align_shift) {
i = stb_log2_floor(size);
if (i > last_log) {
if (size == 16) ++align_shift; // switch from 4-byte to 8-byte alignment
else if (size >= 128 && align_shift < 8) ++align_shift;
stb__class_base[i] = next_class - ((size-1) >> align_shift);
stb__class_shift[i] = align_shift;
last_log = i;
}
stb__size_for_class[next_class++] = size;
}
for (i=1; i <= STB__MAX_SMALL_SIZE; ++i)
assert(i <= stb__size_for_class[stb__sizeclass(i)]);
overhead = 0;
for (i = 1; i < next_class; i++) {
int s = stb__size_for_class[i];
size = stb__page_size;
while (size % s > size >> 3)
size += stb__page_size;
stb__pages_for_class[i] = (unsigned char) (size >> stb__page_shift);
overhead += size;
}
assert(overhead < (4 << 20)); // make sure it's under 4MB of overhead
}
#ifdef STB_DEBUG
#define stb__smemset(a,b,c) memset((void *) a, b, c)
#elif defined(STB_FASTMALLOC_INIT)
#define stb__smemset(a,b,c) memset((void *) a, b, c)
#else
#define stb__smemset(a,b,c)
#endif
void *stb_smalloc(size_t sz)
{
stb__span *s;
if (sz == 0) return NULL;
if (stb__size_for_class[1] == 0) stb__init_sizeclass();
if (sz > STB__MAX_SMALL_SIZE) {
s = stb__alloc_span((sz + stb__page_size - 1) >> stb__page_shift);
if (s == NULL) return NULL;
s->list = 0;
s->next = s->prev = NULL;
s->allocations = -32767;
stb__smemset(stb__page_address(s->start), 0xcd, (sz+3)&~3);
return (void *) stb__page_address(s->start);
} else {
void *p;
int c = stb__sizeclass(sz);
s = stb__spanlist[256+c];
if (!s || !s->first_free)
s = stb__get_nonempty_sizeclass(c);
if (s == NULL) return NULL;
p = s->first_free;
s->first_free = * (void **) p;
++s->allocations;
stb__smemset(p,0xcd, sz);
return p;
}
}
int stb_ssize(void *p)
{
stb__span *s;
if (p == NULL) return 0;
s = stb__span_for_page[stb__page_number((stb_uint) p) - stb__firstpage];
if (s->list >= 256) {
return stb__size_for_class[s->list - 256];
} else {
assert(s->list == 0);
return s->len << stb__page_shift;
}
}
void stb_sfree(void *p)
{
stb__span *s;
if (p == NULL) return;
s = stb__span_for_page[stb__page_number((stb_uint) p) - stb__firstpage];
if (s->list >= 256) {
stb__smemset(p, 0xfe, stb__size_for_class[s->list-256]);
* (void **) p = s->first_free;
s->first_free = p;
if (--s->allocations == 0) {
stb__spanlist_unlink(s);
stb__free_span(s);
}
} else {
assert(s->list == 0);
stb__smemset(p, 0xfe, stb_ssize(p));
stb__free_span(s);
}
}
void *stb_srealloc(void *p, size_t sz)
{
size_t cur_size;
if (p == NULL) return stb_smalloc(sz);
if (sz == 0) { stb_sfree(p); return NULL; }
cur_size = stb_ssize(p);
if (sz > cur_size || sz <= (cur_size >> 1)) {
void *q;
if (sz > cur_size && sz < (cur_size << 1)) sz = cur_size << 1;
q = stb_smalloc(sz); if (q == NULL) return NULL;
memcpy(q, p, sz < cur_size ? sz : cur_size);
stb_sfree(p);
return q;
}
return p;
}
void *stb_scalloc(size_t n, size_t sz)
{
void *p;
if (n == 0 || sz == 0) return NULL;
if (stb_log2_ceil(n) + stb_log2_ceil(n) >= 32) return NULL;
p = stb_smalloc(n*sz);
if (p) memset(p, 0, n*sz);
return p;
}
char *stb_sstrdup(char *s)
{
int n = strlen(s);
char *p = (char *) stb_smalloc(n+1);
if (p) stb_p_strcpy_s(p,n+1,s);
return p;
}
#endif // STB_DEFINE
//////////////////////////////////////////////////////////////////////////////
//
// Source code constants
//
// This is a trivial system to let you specify constants in source code,
// then while running you can change the constants.
//
// Note that you can't wrap the #defines, because we need to know their
// names. So we provide a pre-wrapped version without 'STB_' for convenience;
// to request it, #define STB_CONVENIENT_H, yielding:
// KI -- integer
// KU -- unsigned integer
// KF -- float
// KD -- double
// KS -- string constant
//
// Defaults to functioning in debug build, not in release builds.
// To force on, define STB_ALWAYS_H
#ifdef STB_CONVENIENT_H
#define KI(x) STB_I(x)
#define KU(x) STB_UI(x)
#define KF(x) STB_F(x)
#define KD(x) STB_D(x)
#define KS(x) STB_S(x)
#endif
STB_EXTERN void stb_source_path(char *str);
#ifdef STB_DEFINE
char *stb__source_path;
void stb_source_path(char *path)
{
stb__source_path = path;
}
char *stb__get_sourcefile_path(char *file)
{
static char filebuf[512];
if (stb__source_path) {
stb_p_sprintf(filebuf stb_p_size(sizeof(filebuf)), "%s/%s", stb__source_path, file);
if (stb_fexists(filebuf)) return filebuf;
}
if (stb_fexists(file)) return file;
stb_p_sprintf(filebuf stb_p_size(sizeof(filebuf)), "../%s", file);
if (!stb_fexists(filebuf)) return filebuf;
return file;
}
#endif
#define STB_F(x) ((float) STB_H(x))
#define STB_UI(x) ((unsigned int) STB_I(x))
#if !defined(STB_DEBUG) && !defined(STB_ALWAYS_H)
#define STB_D(x) ((double) (x))
#define STB_I(x) ((int) (x))
#define STB_S(x) ((char *) (x))
#else
#define STB_D(x) stb__double_constant(__FILE__, __LINE__-1, (x))
#define STB_I(x) stb__int_constant(__FILE__, __LINE__-1, (x))
#define STB_S(x) stb__string_constant(__FILE__, __LINE__-1, (x))
STB_EXTERN double stb__double_constant(char *file, int line, double x);
STB_EXTERN int stb__int_constant(char *file, int line, int x);
STB_EXTERN char * stb__string_constant(char *file, int line, char *str);
#ifdef STB_DEFINE
enum
{
STB__CTYPE_int,
STB__CTYPE_uint,
STB__CTYPE_float,
STB__CTYPE_double,
STB__CTYPE_string,
};
typedef struct
{
int line;
int type;
union {
int ival;
double dval;
char *sval;
};
} stb__Entry;
typedef struct
{
stb__Entry *entries;
char *filename;
time_t timestamp;
char **file_data;
int file_len;
unsigned short *line_index;
} stb__FileEntry;
static void stb__constant_parse(stb__FileEntry *f, int i)
{
char *s;
int n;
if (!stb_arr_valid(f->entries, i)) return;
n = f->entries[i].line;
if (n >= f->file_len) return;
s = f->file_data[n];
switch (f->entries[i].type) {
case STB__CTYPE_float:
while (*s) {
if (!strncmp(s, "STB_D(", 6)) { s+=6; goto matched_float; }
if (!strncmp(s, "STB_F(", 6)) { s+=6; goto matched_float; }
if (!strncmp(s, "KD(", 3)) { s+=3; goto matched_float; }
if (!strncmp(s, "KF(", 3)) { s+=3; goto matched_float; }
++s;
}
break;
matched_float:
f->entries[i].dval = strtod(s, NULL);
break;
case STB__CTYPE_int:
while (*s) {
if (!strncmp(s, "STB_I(", 6)) { s+=6; goto matched_int; }
if (!strncmp(s, "STB_UI(", 7)) { s+=7; goto matched_int; }
if (!strncmp(s, "KI(", 3)) { s+=3; goto matched_int; }
if (!strncmp(s, "KU(", 3)) { s+=3; goto matched_int; }
++s;
}
break;
matched_int: {
int neg=0;
s = stb_skipwhite(s);
while (*s == '-') { neg = !neg; s = stb_skipwhite(s+1); } // handle '- - 5', pointlessly
if (s[0] == '0' && tolower(s[1]) == 'x')
f->entries[i].ival = strtol(s, NULL, 16);
else if (s[0] == '0')
f->entries[i].ival = strtol(s, NULL, 8);
else
f->entries[i].ival = strtol(s, NULL, 10);
if (neg) f->entries[i].ival = -f->entries[i].ival;
break;
}
case STB__CTYPE_string:
// @TODO
break;
}
}
static stb_sdict *stb__constant_file_hash;
stb__Entry *stb__constant_get_entry(char *filename, int line, int type)
{
int i;
stb__FileEntry *f;
if (stb__constant_file_hash == NULL)
stb__constant_file_hash = stb_sdict_new(STB_TRUE);
f = (stb__FileEntry*) stb_sdict_get(stb__constant_file_hash, filename);
if (f == NULL) {
char *s = stb__get_sourcefile_path(filename);
if (s == NULL || !stb_fexists(s)) return 0;
f = (stb__FileEntry *) malloc(sizeof(*f));
f->timestamp = stb_ftimestamp(s);
f->file_data = stb_stringfile(s, &f->file_len);
f->filename = stb_p_strdup(s); // cache the full path
f->entries = NULL;
f->line_index = 0;
stb_arr_setlen(f->line_index, f->file_len);
memset(f->line_index, 0xff, stb_arr_storage(f->line_index));
} else {
time_t t = stb_ftimestamp(f->filename);
if (f->timestamp != t) {
f->timestamp = t;
free(f->file_data);
f->file_data = stb_stringfile(f->filename, &f->file_len);
stb_arr_setlen(f->line_index, f->file_len);
for (i=0; i < stb_arr_len(f->entries); ++i)
stb__constant_parse(f, i);
}
}
if (line >= f->file_len) return 0;
if (f->line_index[line] >= stb_arr_len(f->entries)) {
// need a new entry
int n = stb_arr_len(f->entries);
stb__Entry e;
e.line = line;
if (line < f->file_len)
f->line_index[line] = n;
e.type = type;
stb_arr_push(f->entries, e);
stb__constant_parse(f, n);
}
return f->entries + f->line_index[line];
}
double stb__double_constant(char *file, int line, double x)
{
stb__Entry *e = stb__constant_get_entry(file, line, STB__CTYPE_float);
if (!e) return x;
return e->dval;
}
int stb__int_constant(char *file, int line, int x)
{
stb__Entry *e = stb__constant_get_entry(file, line, STB__CTYPE_int);
if (!e) return x;
return e->ival;
}
char * stb__string_constant(char *file, int line, char *x)
{
stb__Entry *e = stb__constant_get_entry(file, line, STB__CTYPE_string);
if (!e) return x;
return e->sval;
}
#endif // STB_DEFINE
#endif // !STB_DEBUG && !STB_ALWAYS_H
#ifdef STB_STUA
#error "STUA is no longer supported"
//////////////////////////////////////////////////////////////////////////
//
// stua: little scripting language
//
// define STB_STUA to compile it
//
// see http://nothings.org/stb/stb_stua.html for documentation
//
// basic parsing model:
//
// lexical analysis
// use stb_lex() to parse tokens; keywords get their own tokens
//
// parsing:
// recursive descent parser. too much of a hassle to make an unambiguous
// LR(1) grammar, and one-pass generation is clumsier (recursive descent
// makes it easier to e.g. compile nested functions). on the other hand,
// dictionary syntax required hackery to get extra lookahead.
//
// codegen:
// output into an evaluation tree, using array indices as 'pointers'
//
// run:
// traverse the tree; support for 'break/continue/return' is tricky
//
// garbage collection:
// stu__mark and sweep; explicit stack with non-stu__compile_global_scope roots
typedef stb_int32 stua_obj;
typedef stb_idict stua_dict;
STB_EXTERN void stua_run_script(char *s);
STB_EXTERN void stua_uninit(void);
extern stua_obj stua_globals;
STB_EXTERN double stua_number(stua_obj z);
STB_EXTERN stua_obj stua_getnil(void);
STB_EXTERN stua_obj stua_getfalse(void);
STB_EXTERN stua_obj stua_gettrue(void);
STB_EXTERN stua_obj stua_string(char *z);
STB_EXTERN stua_obj stua_make_number(double d);
STB_EXTERN stua_obj stua_box(int type, void *data, int size);
enum
{
STUA_op_negate=129,
STUA_op_shl, STUA_op_ge,
STUA_op_shr, STUA_op_le,
STUA_op_shru,
STUA_op_last
};
#define STUA_NO_VALUE 2 // equivalent to a tagged NULL
STB_EXTERN stua_obj (*stua_overload)(int op, stua_obj a, stua_obj b, stua_obj c);
STB_EXTERN stua_obj stua_error(char *err, ...);
STB_EXTERN stua_obj stua_pushroot(stua_obj o);
STB_EXTERN void stua_poproot ( void );
#ifdef STB_DEFINE
// INTERPRETER
// 31-bit floating point implementation
// force the (1 << 30) bit (2nd highest bit) to be zero by re-biasing the exponent;
// then shift and set the bottom bit
static stua_obj stu__floatp(float *f)
{
unsigned int n = *(unsigned int *) f;
unsigned int e = n & (0xff << 23);
assert(sizeof(int) == 4 && sizeof(float) == 4);
if (!e) // zero?
n = n; // no change
else if (e < (64 << 23)) // underflow of the packed encoding?
n = (n & 0x80000000); // signed 0
else if (e > (190 << 23)) // overflow of the encoding? (or INF or NAN)
n = (n & 0x80000000) + (127 << 23); // new INF encoding
else
n -= 0x20000000;
// now we need to shuffle the bits so that the spare bit is at the bottom
assert((n & 0x40000000) == 0);
return (n & 0x80000000) + (n << 1) + 1;
}
static unsigned char stu__getfloat_addend[256];
static float stu__getfloat(stua_obj v)
{
unsigned int n;
unsigned int e = ((unsigned int) v) >> 24;
n = (int) v >> 1; // preserve high bit
n += stu__getfloat_addend[e] << 24;
return *(float *) &n;
}
stua_obj stua_float(float f)
{
return stu__floatp(&f);
}
static void stu__float_init(void)
{
int i;
stu__getfloat_addend[0] = 0; // do nothing to biased exponent of 0
for (i=1; i < 127; ++i)
stu__getfloat_addend[i] = 32; // undo the -0x20000000
stu__getfloat_addend[127] = 64; // convert packed INF to INF (0x3f -> 0x7f)
for (i=0; i < 128; ++i) // for signed floats, remove the bit we just shifted down
stu__getfloat_addend[128+i] = stu__getfloat_addend[i] - 64;
}
// Tagged data type implementation
// TAGS:
#define stu__int_tag 0 // of 2 bits // 00 int
#define stu__float_tag 1 // of 1 bit // 01 float
#define stu__ptr_tag 2 // of 2 bits // 10 boxed
// 11 float
#define stu__tag(x) ((x) & 3)
#define stu__number(x) (stu__tag(x) != stu__ptr_tag)
#define stu__isint(x) (stu__tag(x) == stu__int_tag)
#define stu__int(x) ((x) >> 2)
#define stu__float(x) (stu__getfloat(x))
#define stu__makeint(v) ((v)*4+stu__int_tag)
// boxed data, and tag support for boxed data
enum
{
STU___float = 1, STU___int = 2,
STU___number = 3, STU___string = 4,
STU___function = 5, STU___dict = 6,
STU___boolean = 7, STU___error = 8,
};
// boxed data
#define STU__BOX short type, stua_gc
typedef struct stu__box { STU__BOX; } stu__box;
stu__box stu__nil = { 0, 1 };
stu__box stu__true = { STU___boolean, 1, };
stu__box stu__false = { STU___boolean, 1, };
#define stu__makeptr(v) ((stua_obj) (v) + stu__ptr_tag)
#define stua_nil stu__makeptr(&stu__nil)
#define stua_true stu__makeptr(&stu__true)
#define stua_false stu__makeptr(&stu__false)
stua_obj stua_getnil(void) { return stua_nil; }
stua_obj stua_getfalse(void) { return stua_false; }
stua_obj stua_gettrue(void) { return stua_true; }
#define stu__ptr(x) ((stu__box *) ((x) - stu__ptr_tag))
#define stu__checkt(t,x) ((t) == STU___float ? ((x) & 1) == stu__float_tag : \
(t) == STU___int ? stu__isint(x) : \
(t) == STU___number ? stu__number(x) : \
stu__tag(x) == stu__ptr_tag && stu__ptr(x)->type == (t))
typedef struct
{
STU__BOX;
void *ptr;
} stu__wrapper;
// implementation of a 'function' or function + closure
typedef struct stu__func
{
STU__BOX;
stua_obj closure_source; // 0 - regular function; 4 - C function
// if closure, pointer to source function
union {
stua_obj closure_data; // partial-application data
void *store; // pointer to free that holds 'code'
stua_obj (*func)(stua_dict *context);
} f;
// closure ends here
short *code;
int num_param;
stua_obj *param; // list of parameter strings
} stu__func;
// apply this to 'short *code' to get at data
#define stu__const(f) ((stua_obj *) (f))
static void stu__free_func(stu__func *f)
{
if (f->closure_source == 0) free(f->f.store);
if ((stb_uint) f->closure_source <= 4) free(f->param);
free(f);
}
#define stu__pd(x) ((stua_dict *) stu__ptr(x))
#define stu__pw(x) ((stu__wrapper *) stu__ptr(x))
#define stu__pf(x) ((stu__func *) stu__ptr(x))
// garbage-collection
static stu__box ** stu__gc_ptrlist;
static stua_obj * stu__gc_root_stack;
stua_obj stua_pushroot(stua_obj o) { stb_arr_push(stu__gc_root_stack, o); return o; }
void stua_poproot ( void ) { stb_arr_pop(stu__gc_root_stack); }
static stb_sdict *stu__strings;
static void stu__mark(stua_obj z)
{
int i;
stu__box *p = stu__ptr(z);
if (p->stua_gc == 1) return; // already marked
assert(p->stua_gc == 0);
p->stua_gc = 1;
switch(p->type) {
case STU___function: {
stu__func *f = (stu__func *) p;
if ((stb_uint) f->closure_source <= 4) {
if (f->closure_source == 0) {
for (i=1; i <= f->code[0]; ++i)
if (!stu__number(((stua_obj *) f->code)[-i]))
stu__mark(((stua_obj *) f->code)[-i]);
}
for (i=0; i < f->num_param; ++i)
stu__mark(f->param[i]);
} else {
stu__mark(f->closure_source);
stu__mark(f->f.closure_data);
}
break;
}
case STU___dict: {
stua_dict *e = (stua_dict *) p;
for (i=0; i < e->limit; ++i)
if (e->table[i].k != STB_IEMPTY && e->table[i].k != STB_IDEL) {
if (!stu__number(e->table[i].k)) stu__mark((int) e->table[i].k);
if (!stu__number(e->table[i].v)) stu__mark((int) e->table[i].v);
}
break;
}
}
}
static int stu__num_allocs, stu__size_allocs;
static stua_obj stu__flow_val = stua_nil; // used for break & return
static void stua_gc(int force)
{
int i;
if (!force && stu__num_allocs == 0 && stu__size_allocs == 0) return;
stu__num_allocs = stu__size_allocs = 0;
//printf("[gc]\n");
// clear marks
for (i=0; i < stb_arr_len(stu__gc_ptrlist); ++i)
stu__gc_ptrlist[i]->stua_gc = 0;
// stu__mark everything reachable
stu__nil.stua_gc = stu__true.stua_gc = stu__false.stua_gc = 1;
stu__mark(stua_globals);
if (!stu__number(stu__flow_val))
stu__mark(stu__flow_val);
for (i=0; i < stb_arr_len(stu__gc_root_stack); ++i)
if (!stu__number(stu__gc_root_stack[i]))
stu__mark(stu__gc_root_stack[i]);
// sweep unreachables
for (i=0; i < stb_arr_len(stu__gc_ptrlist);) {
stu__box *z = stu__gc_ptrlist[i];
if (!z->stua_gc) {
switch (z->type) {
case STU___dict: stb_idict_destroy((stua_dict *) z); break;
case STU___error: free(((stu__wrapper *) z)->ptr); break;
case STU___string: stb_sdict_remove(stu__strings, (char*) ((stu__wrapper *) z)->ptr, NULL); free(z); break;
case STU___function: stu__free_func((stu__func *) z); break;
}
// swap in the last item over this, and repeat
z = stb_arr_pop(stu__gc_ptrlist);
stu__gc_ptrlist[i] = z;
} else
++i;
}
}
static void stu__consider_gc(stua_obj x)
{
if (stu__size_allocs < 100000) return;
if (stu__num_allocs < 10 && stu__size_allocs < 1000000) return;
stb_arr_push(stu__gc_root_stack, x);
stua_gc(0);
stb_arr_pop(stu__gc_root_stack);
}
static stua_obj stu__makeobj(int type, void *data, int size, int safe_to_gc)
{
stua_obj x = stu__makeptr(data);
((stu__box *) data)->type = type;
stb_arr_push(stu__gc_ptrlist, (stu__box *) data);
stu__num_allocs += 1;
stu__size_allocs += size;
if (safe_to_gc) stu__consider_gc(x);
return x;
}
stua_obj stua_box(int type, void *data, int size)
{
stu__wrapper *p = (stu__wrapper *) malloc(sizeof(*p));
p->ptr = data;
return stu__makeobj(type, p, size, 0);
}
// a stu string can be directly compared for equality, because
// they go into a hash table
stua_obj stua_string(char *z)
{
stu__wrapper *b = (stu__wrapper *) stb_sdict_get(stu__strings, z);
if (b == NULL) {
int o = stua_box(STU___string, NULL, strlen(z) + sizeof(*b));
b = stu__pw(o);
stb_sdict_add(stu__strings, z, b);
stb_sdict_getkey(stu__strings, z, (char **) &b->ptr);
}
return stu__makeptr(b);
}
// stb_obj dictionary is just an stb_idict
static void stu__set(stua_dict *d, stua_obj k, stua_obj v)
{ if (stb_idict_set(d, k, v)) stu__size_allocs += 8; }
static stua_obj stu__get(stua_dict *d, stua_obj k, stua_obj res)
{
stb_idict_get_flag(d, k, &res);
return res;
}
static stua_obj make_string(char *z, int len)
{
stua_obj s;
char temp[256], *q = (char *) stb_temp(temp, len+1), *p = q;
while (len > 0) {
if (*z == '\\') {
if (z[1] == 'n') *p = '\n';
else if (z[1] == 'r') *p = '\r';
else if (z[1] == 't') *p = '\t';
else *p = z[1];
p += 1; z += 2; len -= 2;
} else {
*p++ = *z++; len -= 1;
}
}
*p = 0;
s = stua_string(q);
stb_tempfree(temp, q);
return s;
}
enum token_names
{
T__none=128,
ST_shl = STUA_op_shl, ST_ge = STUA_op_ge,
ST_shr = STUA_op_shr, ST_le = STUA_op_le,
ST_shru = STUA_op_shru, STU__negate = STUA_op_negate,
ST__reset_numbering = STUA_op_last,
ST_white,
ST_id, ST_float, ST_decimal, ST_hex, ST_char,ST_string, ST_number,
// make sure the keywords come _AFTER_ ST_id, so stb_lex prefer them
ST_if, ST_while, ST_for, ST_eq, ST_nil,
ST_then, ST_do, ST_in, ST_ne, ST_true,
ST_else, ST_break, ST_let, ST_and, ST_false,
ST_elseif, ST_continue, ST_into, ST_or, ST_repeat,
ST_end, ST_as, ST_return, ST_var, ST_func,
ST_catch, ST__frame,
ST__max_terminals,
STU__defaultparm, STU__seq,
};
static stua_dict * stu__globaldict;
stua_obj stua_globals;
static enum
{
FLOW_normal, FLOW_continue, FLOW_break, FLOW_return, FLOW_error,
} stu__flow;
stua_obj stua_error(char *z, ...)
{
stua_obj a;
char temp[4096], *x;
va_list v; va_start(v,z); vsprintf(temp, z, v); va_end(v);
x = stb_p_strdup(temp);
a = stua_box(STU___error, x, strlen(x));
stu__flow = FLOW_error;
stu__flow_val = a;
return stua_nil;
}
double stua_number(stua_obj z)
{
return stu__tag(z) == stu__int_tag ? stu__int(z) : stu__float(z);
}
stua_obj stua_make_number(double d)
{
double e = floor(d);
if (e == d && e < (1 << 29) && e >= -(1 << 29))
return stu__makeint((int) e);
else
return stua_float((float) d);
}
stua_obj (*stua_overload)(int op, stua_obj a, stua_obj b, stua_obj c) = NULL;
static stua_obj stu__op(int op, stua_obj a, stua_obj b, stua_obj c)
{
stua_obj r = STUA_NO_VALUE;
if (op == '+') {
if (stu__checkt(STU___string, a) && stu__checkt(STU___string, b)) {
;// @TODO: string concatenation
} else if (stu__checkt(STU___function, a) && stu__checkt(STU___dict, b)) {
stu__func *f = (stu__func *) malloc(12);
assert(offsetof(stu__func, code)==12);
f->closure_source = a;
f->f.closure_data = b;
return stu__makeobj(STU___function, f, 16, 1);
}
}
if (stua_overload) r = stua_overload(op,a,b,c);
if (stu__flow != FLOW_error && r == STUA_NO_VALUE)
stua_error("Typecheck for operator %d", op), r=stua_nil;
return r;
}
#define STU__EVAL2(a,b) \
a = stu__eval(stu__f[n+1]); if (stu__flow) break; stua_pushroot(a); \
b = stu__eval(stu__f[n+2]); stua_poproot(); if (stu__flow) break;
#define STU__FB(op) \
STU__EVAL2(a,b) \
if (stu__tag(a) == stu__int_tag && stu__tag(b) == stu__int_tag) \
return ((a) op (b)); \
if (stu__number(a) && stu__number(b)) \
return stua_make_number(stua_number(a) op stua_number(b)); \
return stu__op(stu__f[n], a,b, stua_nil)
#define STU__F(op) \
STU__EVAL2(a,b) \
if (stu__number(a) && stu__number(b)) \
return stua_make_number(stua_number(a) op stua_number(b)); \
return stu__op(stu__f[n], a,b, stua_nil)
#define STU__I(op) \
STU__EVAL2(a,b) \
if (stu__tag(a) == stu__int_tag && stu__tag(b) == stu__int_tag) \
return stu__makeint(stu__int(a) op stu__int(b)); \
return stu__op(stu__f[n], a,b, stua_nil)
#define STU__C(op) \
STU__EVAL2(a,b) \
if (stu__number(a) && stu__number(b)) \
return (stua_number(a) op stua_number(b)) ? stua_true : stua_false; \
return stu__op(stu__f[n], a,b, stua_nil)
#define STU__CE(op) \
STU__EVAL2(a,b) \
return (a op b) ? stua_true : stua_false
static short *stu__f;
static stua_obj stu__f_obj;
static stua_dict *stu__c;
static stua_obj stu__funceval(stua_obj fo, stua_obj co);
static int stu__cond(stua_obj x)
{
if (stu__flow) return 0;
if (!stu__checkt(STU___boolean, x))
x = stu__op('!', x, stua_nil, stua_nil);
if (x == stua_true ) return 1;
if (x == stua_false) return 0;
stu__flow = FLOW_error;
return 0;
}
// had to manually eliminate tailcall recursion for debugging complex stuff
#define TAILCALL(x) n = (x); goto top;
static stua_obj stu__eval(int n)
{
top:
if (stu__flow >= FLOW_return) return stua_nil; // is this needed?
if (n < 0) return stu__const(stu__f)[n];
assert(n != 0 && n != 1);
switch (stu__f[n]) {
stua_obj a,b,c;
case ST_catch: a = stu__eval(stu__f[n+1]);
if (stu__flow == FLOW_error) { a=stu__flow_val; stu__flow = FLOW_normal; }
return a;
case ST_var: b = stu__eval(stu__f[n+2]); if (stu__flow) break;
stu__set(stu__c, stu__const(stu__f)[stu__f[n+1]], b);
return b;
case STU__seq: stu__eval(stu__f[n+1]); if (stu__flow) break;
TAILCALL(stu__f[n+2]);
case ST_if: if (!stu__cond(stu__eval(stu__f[n+1]))) return stua_nil;
TAILCALL(stu__f[n+2]);
case ST_else: a = stu__cond(stu__eval(stu__f[n+1]));
TAILCALL(stu__f[n + 2 + !a]);
#define STU__HANDLE_BREAK \
if (stu__flow >= FLOW_break) { \
if (stu__flow == FLOW_break) { \
a = stu__flow_val; \
stu__flow = FLOW_normal; \
stu__flow_val = stua_nil; \
return a; \
} \
return stua_nil; \
}
case ST_as: stu__eval(stu__f[n+3]);
STU__HANDLE_BREAK
// fallthrough!
case ST_while: a = stua_nil; stua_pushroot(a);
while (stu__cond(stu__eval(stu__f[n+1]))) {
stua_poproot();
a = stu__eval(stu__f[n+2]);
STU__HANDLE_BREAK
stu__flow = FLOW_normal; // clear 'continue' flag
stua_pushroot(a);
if (stu__f[n+3]) stu__eval(stu__f[n+3]);
STU__HANDLE_BREAK
stu__flow = FLOW_normal; // clear 'continue' flag
}
stua_poproot();
return a;
case ST_break: stu__flow = FLOW_break; stu__flow_val = stu__eval(stu__f[n+1]); break;
case ST_continue:stu__flow = FLOW_continue; break;
case ST_return: stu__flow = FLOW_return; stu__flow_val = stu__eval(stu__f[n+1]); break;
case ST__frame: return stu__f_obj;
case '[': STU__EVAL2(a,b);
if (stu__checkt(STU___dict, a))
return stu__get(stu__pd(a), b, stua_nil);
return stu__op(stu__f[n], a, b, stua_nil);
case '=': a = stu__eval(stu__f[n+2]); if (stu__flow) break;
n = stu__f[n+1];
if (stu__f[n] == ST_id) {
if (!stb_idict_update(stu__c, stu__const(stu__f)[stu__f[n+1]], a))
if (!stb_idict_update(stu__globaldict, stu__const(stu__f)[stu__f[n+1]], a))
return stua_error("Assignment to undefined variable");
} else if (stu__f[n] == '[') {
stua_pushroot(a);
b = stu__eval(stu__f[n+1]); if (stu__flow) { stua_poproot(); break; }
stua_pushroot(b);
c = stu__eval(stu__f[n+2]); stua_poproot(); stua_poproot();
if (stu__flow) break;
if (!stu__checkt(STU___dict, b)) return stua_nil;
stu__set(stu__pd(b), c, a);
} else {
return stu__op(stu__f[n], stu__eval(n), a, stua_nil);
}
return a;
case STU__defaultparm:
a = stu__eval(stu__f[n+2]);
stu__flow = FLOW_normal;
if (stb_idict_add(stu__c, stu__const(stu__f)[stu__f[n+1]], a))
stu__size_allocs += 8;
return stua_nil;
case ST_id: a = stu__get(stu__c, stu__const(stu__f)[stu__f[n+1]], STUA_NO_VALUE); // try local variable
return a != STUA_NO_VALUE // else try stu__compile_global_scope variable
? a : stu__get(stu__globaldict, stu__const(stu__f)[stu__f[n+1]], stua_nil);
case STU__negate:a = stu__eval(stu__f[n+1]); if (stu__flow) break;
return stu__isint(a) ? -a : stu__op(stu__f[n], a, stua_nil, stua_nil);
case '~': a = stu__eval(stu__f[n+1]); if (stu__flow) break;
return stu__isint(a) ? (~a)&~3 : stu__op(stu__f[n], a, stua_nil, stua_nil);
case '!': a = stu__eval(stu__f[n+1]); if (stu__flow) break;
a = stu__cond(a); if (stu__flow) break;
return a ? stua_true : stua_false;
case ST_eq: STU__CE(==); case ST_le: STU__C(<=); case '<': STU__C(<);
case ST_ne: STU__CE(!=); case ST_ge: STU__C(>=); case '>': STU__C(>);
case '+' : STU__FB(+); case '*': STU__F(*); case '&': STU__I(&); case ST_shl: STU__I(<<);
case '-' : STU__FB(-); case '/': STU__F(/); case '|': STU__I(|); case ST_shr: STU__I(>>);
case '%': STU__I(%); case '^': STU__I(^);
case ST_shru: STU__EVAL2(a,b);
if (stu__tag(a) == stu__int_tag && stu__tag(b) == stu__int_tag)
return stu__makeint((unsigned) stu__int(a) >> stu__int(b));
return stu__op(stu__f[n], a,b, stua_nil);
case ST_and: a = stu__eval(stu__f[n+1]); b = stu__cond(a); if (stu__flow) break;
return a ? stu__eval(stu__f[n+2]) : a;
case ST_or : a = stu__eval(stu__f[n+1]); b = stu__cond(a); if (stu__flow) break;
return a ? b : stu__eval(stu__f[n+2]);
case'(':case':': STU__EVAL2(a,b);
if (!stu__checkt(STU___function, a))
return stu__op(stu__f[n], a,b, stua_nil);
if (!stu__checkt(STU___dict, b))
return stua_nil;
if (stu__f[n] == ':')
b = stu__makeobj(STU___dict, stb_idict_copy(stu__pd(b)), stb_idict_memory_usage(stu__pd(b)), 0);
a = stu__funceval(a,b);
return a;
case '{' : {
stua_dict *d;
d = stb_idict_new_size(stu__f[n+1] > 40 ? 64 : 16);
if (d == NULL)
return stua_nil; // breakpoint fodder
c = stu__makeobj(STU___dict, d, 32, 1);
stua_pushroot(c);
a = stu__f[n+1];
for (b=0; b < a; ++b) {
stua_obj x = stua_pushroot(stu__eval(stu__f[n+2 + b*2 + 0]));
stua_obj y = stu__eval(stu__f[n+2 + b*2 + 1]);
stua_poproot();
if (stu__flow) { stua_poproot(); return stua_nil; }
stu__set(d, x, y);
}
stua_poproot();
return c;
}
default: if (stu__f[n] < 0) return stu__const(stu__f)[stu__f[n]];
assert(0); /* NOTREACHED */ // internal error!
}
return stua_nil;
}
int stb__stua_nesting;
static stua_obj stu__funceval(stua_obj fo, stua_obj co)
{
stu__func *f = stu__pf(fo);
stua_dict *context = stu__pd(co);
int i,j;
stua_obj p;
short *tf = stu__f; // save previous function
stua_dict *tc = stu__c;
if (stu__flow == FLOW_error) return stua_nil;
assert(stu__flow == FLOW_normal);
stua_pushroot(fo);
stua_pushroot(co);
stu__consider_gc(stua_nil);
while ((stb_uint) f->closure_source > 4) {
// add data from closure to context
stua_dict *e = (stua_dict *) stu__pd(f->f.closure_data);
for (i=0; i < e->limit; ++i)
if (e->table[i].k != STB_IEMPTY && e->table[i].k != STB_IDEL)
if (stb_idict_add(context, e->table[i].k, e->table[i].v))
stu__size_allocs += 8;
// use add so if it's already defined, we don't override it; that way
// explicit parameters win over applied ones, and most recent applications
// win over previous ones
f = stu__pf(f->closure_source);
}
for (j=0, i=0; i < f->num_param; ++i)
// if it doesn't already exist, add it from the numbered parameters
if (stb_idict_add(context, f->param[i], stu__get(context, stu__int(j), stua_nil)))
++j;
// @TODO: if (stu__get(context, stu__int(f->num_param+1)) != STUA_NO_VALUE) // error: too many parameters
// @TODO: ditto too few parameters
if (f->closure_source == 4)
p = f->f.func(context);
else {
stu__f = f->code, stu__c = context;
stu__f_obj = co;
++stb__stua_nesting;
if (stu__f[1])
p = stu__eval(stu__f[1]);
else
p = stua_nil;
--stb__stua_nesting;
stu__f = tf, stu__c = tc; // restore previous function
if (stu__flow == FLOW_return) {
stu__flow = FLOW_normal;
p = stu__flow_val;
stu__flow_val = stua_nil;
}
}
stua_poproot();
stua_poproot();
return p;
}
// Parser
static int stu__tok;
static stua_obj stu__tokval;
static char *stu__curbuf, *stu__bufstart;
static stb_matcher *stu__lex_matcher;
static unsigned char stu__prec[ST__max_terminals], stu__end[ST__max_terminals];
static void stu__nexttoken(void)
{
int len;
retry:
stu__tok = stb_lex(stu__lex_matcher, stu__curbuf, &len);
if (stu__tok == 0)
return;
switch(stu__tok) {
case ST_white : stu__curbuf += len; goto retry;
case T__none : stu__tok = *stu__curbuf; break;
case ST_string: stu__tokval = make_string(stu__curbuf+1, len-2); break;
case ST_id : stu__tokval = make_string(stu__curbuf, len); break;
case ST_hex : stu__tokval = stu__makeint(strtol(stu__curbuf+2,NULL,16)); stu__tok = ST_number; break;
case ST_decimal: stu__tokval = stu__makeint(strtol(stu__curbuf ,NULL,10)); stu__tok = ST_number; break;
case ST_float : stu__tokval = stua_float((float) atof(stu__curbuf)) ; stu__tok = ST_number; break;
case ST_char : stu__tokval = stu__curbuf[2] == '\\' ? stu__curbuf[3] : stu__curbuf[2];
if (stu__curbuf[3] == 't') stu__tokval = '\t';
if (stu__curbuf[3] == 'n') stu__tokval = '\n';
if (stu__curbuf[3] == 'r') stu__tokval = '\r';
stu__tokval = stu__makeint(stu__tokval);
stu__tok = ST_number;
break;
}
stu__curbuf += len;
}
static struct { int stu__tok; char *regex; } stu__lexemes[] =
{
ST_white , "([ \t\n\r]|/\\*(.|\n)*\\*/|//[^\r\n]*([\r\n]|$))+",
ST_id , "[_a-zA-Z][_a-zA-Z0-9]*",
ST_hex , "0x[0-9a-fA-F]+",
ST_decimal, "[0-9]+[0-9]*",
ST_float , "[0-9]+\\.?[0-9]*([eE][-+]?[0-9]+)?",
ST_float , "\\.[0-9]+([eE][-+]?[0-9]+)?",
ST_char , "c'(\\\\.|[^\\'])'",
ST_string , "\"(\\\\.|[^\\\"\n\r])*\"",
ST_string , "\'(\\\\.|[^\\\'\n\r])*\'",
#define stua_key4(a,b,c,d) ST_##a, #a, ST_##b, #b, ST_##c, #c, ST_##d, #d,
stua_key4(if,then,else,elseif) stua_key4(while,do,for,in)
stua_key4(func,var,let,break) stua_key4(nil,true,false,end)
stua_key4(return,continue,as,repeat) stua_key4(_frame,catch,catch,catch)
ST_shl, "<<", ST_and, "&&", ST_eq, "==", ST_ge, ">=",
ST_shr, ">>", ST_or , "||", ST_ne, "!=", ST_le, "<=",
ST_shru,">>>", ST_into, "=>",
T__none, ".",
};
typedef struct
{
stua_obj *data; // constants being compiled
short *code; // code being compiled
stua_dict *locals;
short *non_local_refs;
} stu__comp_func;
static stu__comp_func stu__pfunc;
static stu__comp_func *func_stack = NULL;
static void stu__push_func_comp(void)
{
stb_arr_push(func_stack, stu__pfunc);
stu__pfunc.data = NULL;
stu__pfunc.code = NULL;
stu__pfunc.locals = stb_idict_new_size(16);
stu__pfunc.non_local_refs = NULL;
stb_arr_push(stu__pfunc.code, 0); // number of data items
stb_arr_push(stu__pfunc.code, 1); // starting execution address
}
static void stu__pop_func_comp(void)
{
stb_arr_free(stu__pfunc.code);
stb_arr_free(stu__pfunc.data);
stb_idict_destroy(stu__pfunc.locals);
stb_arr_free(stu__pfunc.non_local_refs);
stu__pfunc = stb_arr_pop(func_stack);
}
// if an id is a reference to an outer lexical scope, this
// function returns the "name" of it, and updates the stack
// structures to make sure the names are propagated in.
static int stu__nonlocal_id(stua_obj var_obj)
{
stua_obj dummy, var = var_obj;
int i, n = stb_arr_len(func_stack), j,k;
if (stb_idict_get_flag(stu__pfunc.locals, var, &dummy)) return 0;
for (i=n-1; i > 1; --i) {
if (stb_idict_get_flag(func_stack[i].locals, var, &dummy))
break;
}
if (i <= 1) return 0; // stu__compile_global_scope
j = i; // need to access variable from j'th frame
for (i=0; i < stb_arr_len(stu__pfunc.non_local_refs); ++i)
if (stu__pfunc.non_local_refs[i] == j) return j-n;
stb_arr_push(stu__pfunc.non_local_refs, j-n);
// now make sure all the parents propagate it down
for (k=n-1; k > 1; --k) {
if (j-k >= 0) return j-n; // comes direct from this parent
for(i=0; i < stb_arr_len(func_stack[k].non_local_refs); ++i)
if (func_stack[k].non_local_refs[i] == j-k)
return j-n;
stb_arr_push(func_stack[k].non_local_refs, j-k);
}
assert (k != 1);
return j-n;
}
static int stu__off(void) { return stb_arr_len(stu__pfunc.code); }
static void stu__cc(int a)
{
assert(a >= -2000 && a < 5000);
stb_arr_push(stu__pfunc.code, a);
}
static int stu__cc1(int a) { stu__cc(a); return stu__off()-1; }
static int stu__cc2(int a, int b) { stu__cc(a); stu__cc(b); return stu__off()-2; }
static int stu__cc3(int a, int b, int c) {
if (a == '=') assert(c != 0);
stu__cc(a); stu__cc(b); stu__cc(c); return stu__off()-3; }
static int stu__cc4(int a, int b, int c, int d) { stu__cc(a); stu__cc(b); stu__cc(c); stu__cc(d); return stu__off()-4; }
static int stu__cdv(stua_obj p)
{
int i;
assert(p != STUA_NO_VALUE);
for (i=0; i < stb_arr_len(stu__pfunc.data); ++i)
if (stu__pfunc.data[i] == p)
break;
if (i == stb_arr_len(stu__pfunc.data))
stb_arr_push(stu__pfunc.data, p);
return ~i;
}
static int stu__cdt(void)
{
int z = stu__cdv(stu__tokval);
stu__nexttoken();
return z;
}
static int stu__seq(int a, int b)
{
return !a ? b : !b ? a : stu__cc3(STU__seq, a,b);
}
static char stu__comp_err_str[1024];
static int stu__comp_err_line;
static int stu__err(char *str, ...)
{
va_list v;
char *s = stu__bufstart;
stu__comp_err_line = 1;
while (s < stu__curbuf) {
if (s[0] == '\n' || s[0] == '\r') {
if (s[0]+s[1] == '\n' + '\r') ++s;
++stu__comp_err_line;
}
++s;
}
va_start(v, str);
vsprintf(stu__comp_err_str, str, v);
va_end(v);
return 0;
}
static int stu__accept(int p)
{
if (stu__tok != p) return 0;
stu__nexttoken();
return 1;
}
static int stu__demand(int p)
{
if (stu__accept(p)) return 1;
return stu__err("Didn't find expected stu__tok");
}
static int stu__demandv(int p, stua_obj *val)
{
if (stu__tok == p || p==0) {
*val = stu__tokval;
stu__nexttoken();
return 1;
} else
return 0;
}
static int stu__expr(int p);
int stu__nexpr(int p) { stu__nexttoken(); return stu__expr(p); }
static int stu__statements(int once, int as);
static int stu__parse_if(void) // parse both ST_if and ST_elseif
{
int b,c,a;
a = stu__nexpr(1); if (!a) return 0;
if (!stu__demand(ST_then)) return stu__err("expecting THEN");
b = stu__statements(0,0); if (!b) return 0;
if (b == 1) b = -1;
if (stu__tok == ST_elseif) {
return stu__parse_if();
} else if (stu__accept(ST_else)) {
c = stu__statements(0,0); if (!c) return 0;
if (!stu__demand(ST_end)) return stu__err("expecting END after else clause");
return stu__cc4(ST_else, a, b, c);
} else {
if (!stu__demand(ST_end)) return stu__err("expecting END in if statement");
return stu__cc3(ST_if, a, b);
}
}
int stu__varinit(int z, int in_globals)
{
int a,b;
stu__nexttoken();
while (stu__demandv(ST_id, &b)) {
if (!stb_idict_add(stu__pfunc.locals, b, 1))
if (!in_globals) return stu__err("Redefined variable %s.", stu__pw(b)->ptr);
if (stu__accept('=')) {
a = stu__expr(1); if (!a) return 0;
} else
a = stu__cdv(stua_nil);
z = stu__seq(z, stu__cc3(ST_var, stu__cdv(b), a));
if (!stu__accept(',')) break;
}
return z;
}
static int stu__compile_unary(int z, int outparm, int require_inparm)
{
int op = stu__tok, a, b;
stu__nexttoken();
if (outparm) {
if (require_inparm || (stu__tok && stu__tok != ST_end && stu__tok != ST_else && stu__tok != ST_elseif && stu__tok !=';')) {
a = stu__expr(1); if (!a) return 0;
} else
a = stu__cdv(stua_nil);
b = stu__cc2(op, a);
} else
b = stu__cc1(op);
return stu__seq(z,b);
}
static int stu__assign(void)
{
int z;
stu__accept(ST_let);
z = stu__expr(1); if (!z) return 0;
if (stu__accept('=')) {
int y,p = (z >= 0 ? stu__pfunc.code[z] : 0);
if (z < 0 || (p != ST_id && p != '[')) return stu__err("Invalid lvalue in assignment");
y = stu__assign(); if (!y) return 0;
z = stu__cc3('=', z, y);
}
return z;
}
static int stu__statements(int once, int stop_while)
{
int a,b, c, z=0;
for(;;) {
switch (stu__tok) {
case ST_if : a = stu__parse_if(); if (!a) return 0;
z = stu__seq(z, a);
break;
case ST_while : if (stop_while) return (z ? z:1);
a = stu__nexpr(1); if (!a) return 0;
if (stu__accept(ST_as)) c = stu__statements(0,0); else c = 0;
if (!stu__demand(ST_do)) return stu__err("expecting DO");
b = stu__statements(0,0); if (!b) return 0;
if (!stu__demand(ST_end)) return stu__err("expecting END");
if (b == 1) b = -1;
z = stu__seq(z, stu__cc4(ST_while, a, b, c));
break;
case ST_repeat : stu__nexttoken();
c = stu__statements(0,1); if (!c) return 0;
if (!stu__demand(ST_while)) return stu__err("expecting WHILE");
a = stu__expr(1); if (!a) return 0;
if (!stu__demand(ST_do)) return stu__err("expecting DO");
b = stu__statements(0,0); if (!b) return 0;
if (!stu__demand(ST_end)) return stu__err("expecting END");
if (b == 1) b = -1;
z = stu__seq(z, stu__cc4(ST_as, a, b, c));
break;
case ST_catch : a = stu__nexpr(1); if (!a) return 0;
z = stu__seq(z, stu__cc2(ST_catch, a));
break;
case ST_var : z = stu__varinit(z,0); break;
case ST_return : z = stu__compile_unary(z,1,1); break;
case ST_continue:z = stu__compile_unary(z,0,0); break;
case ST_break : z = stu__compile_unary(z,1,0); break;
case ST_into : if (z == 0 && !once) return stu__err("=> cannot be first statement in block");
a = stu__nexpr(99);
b = (a >= 0? stu__pfunc.code[a] : 0);
if (a < 0 || (b != ST_id && b != '[')) return stu__err("Invalid lvalue on right side of =>");
z = stu__cc3('=', a, z);
break;
default : if (stu__end[stu__tok]) return once ? 0 : (z ? z:1);
a = stu__assign(); if (!a) return 0;
stu__accept(';');
if (stu__tok && !stu__end[stu__tok]) {
if (a < 0)
return stu__err("Constant has no effect");
if (stu__pfunc.code[a] != '(' && stu__pfunc.code[a] != '=')
return stu__err("Expression has no effect");
}
z = stu__seq(z, a);
break;
}
if (!z) return 0;
stu__accept(';');
if (once && stu__tok != ST_into) return z;
}
}
static int stu__postexpr(int z, int p);
static int stu__dictdef(int end, int *count)
{
int z,n=0,i,flags=0;
short *dict=NULL;
stu__nexttoken();
while (stu__tok != end) {
if (stu__tok == ST_id) {
stua_obj id = stu__tokval;
stu__nexttoken();
if (stu__tok == '=') {
flags |= 1;
stb_arr_push(dict, stu__cdv(id));
z = stu__nexpr(1); if (!z) return 0;
} else {
z = stu__cc2(ST_id, stu__cdv(id));
z = stu__postexpr(z,1); if (!z) return 0;
flags |= 2;
stb_arr_push(dict, stu__cdv(stu__makeint(n++)));
}
} else {
z = stu__expr(1); if (!z) return 0;
flags |= 2;
stb_arr_push(dict, stu__cdv(stu__makeint(n++)));
}
if (end != ')' && flags == 3) { z=stu__err("can't mix initialized and uninitialized defs"); goto done;}
stb_arr_push(dict, z);
if (!stu__accept(',')) break;
}
if (!stu__demand(end))
return stu__err(end == ')' ? "Expecting ) at end of function call"
: "Expecting } at end of dictionary definition");
z = stu__cc2('{', stb_arr_len(dict)/2);
for (i=0; i < stb_arr_len(dict); ++i)
stu__cc(dict[i]);
if (count) *count = n;
done:
stb_arr_free(dict);
return z;
}
static int stu__comp_id(void)
{
int z,d;
d = stu__nonlocal_id(stu__tokval);
if (d == 0)
return z = stu__cc2(ST_id, stu__cdt());
// access a non-local frame by naming it with the appropriate int
assert(d < 0);
z = stu__cdv(d); // relative frame # is the 'variable' in our local frame
z = stu__cc2(ST_id, z); // now access that dictionary
return stu__cc3('[', z, stu__cdt()); // now access the variable from that dir
}
static stua_obj stu__funcdef(stua_obj *id, stua_obj *func);
static int stu__expr(int p)
{
int z;
// unary
switch (stu__tok) {
case ST_number: z = stu__cdt(); break;
case ST_string: z = stu__cdt(); break; // @TODO - string concatenation like C
case ST_id : z = stu__comp_id(); break;
case ST__frame: z = stu__cc1(ST__frame); stu__nexttoken(); break;
case ST_func : z = stu__funcdef(NULL,NULL); break;
case ST_if : z = stu__parse_if(); break;
case ST_nil : z = stu__cdv(stua_nil); stu__nexttoken(); break;
case ST_true : z = stu__cdv(stua_true); stu__nexttoken(); break;
case ST_false : z = stu__cdv(stua_false); stu__nexttoken(); break;
case '-' : z = stu__nexpr(99); if (z) z=stu__cc2(STU__negate,z); else return z; break;
case '!' : z = stu__nexpr(99); if (z) z=stu__cc2('!',z); else return z; break;
case '~' : z = stu__nexpr(99); if (z) z=stu__cc2('~',z); else return z; break;
case '{' : z = stu__dictdef('}', NULL); break;
default : return stu__err("Unexpected token");
case '(' : stu__nexttoken(); z = stu__statements(0,0); if (!stu__demand(')')) return stu__err("Expecting )");
}
return stu__postexpr(z,p);
}
static int stu__postexpr(int z, int p)
{
int q;
// postfix
while (stu__tok == '(' || stu__tok == '[' || stu__tok == '.') {
if (stu__accept('.')) {
// MUST be followed by a plain identifier! use [] for other stuff
if (stu__tok != ST_id) return stu__err("Must follow . with plain name; try [] instead");
z = stu__cc3('[', z, stu__cdv(stu__tokval));
stu__nexttoken();
} else if (stu__accept('[')) {
while (stu__tok != ']') {
int r = stu__expr(1); if (!r) return 0;
z = stu__cc3('[', z, r);
if (!stu__accept(',')) break;
}
if (!stu__demand(']')) return stu__err("Expecting ]");
} else {
int n, p = stu__dictdef(')', &n); if (!p) return 0;
#if 0 // this is incorrect!
if (z > 0 && stu__pfunc.code[z] == ST_id) {
stua_obj q = stu__get(stu__globaldict, stu__pfunc.data[-stu__pfunc.code[z+1]-1], stua_nil);
if (stu__checkt(STU___function, q))
if ((stu__pf(q))->num_param != n)
return stu__err("Incorrect number of parameters");
}
#endif
z = stu__cc3('(', z, p);
}
}
// binop - this implementation taken from lcc
for (q=stu__prec[stu__tok]; q >= p; --q) {
while (stu__prec[stu__tok] == q) {
int o = stu__tok, y = stu__nexpr(p+1); if (!y) return 0;
z = stu__cc3(o,z,y);
}
}
return z;
}
static stua_obj stu__finish_func(stua_obj *param, int start)
{
int n, size;
stu__func *f = (stu__func *) malloc(sizeof(*f));
f->closure_source = 0;
f->num_param = stb_arr_len(param);
f->param = (int *) stb_copy(param, f->num_param * sizeof(*f->param));
size = stb_arr_storage(stu__pfunc.code) + stb_arr_storage(stu__pfunc.data) + sizeof(*f) + 8;
f->f.store = malloc(stb_arr_storage(stu__pfunc.code) + stb_arr_storage(stu__pfunc.data));
f->code = (short *) ((char *) f->f.store + stb_arr_storage(stu__pfunc.data));
memcpy(f->code, stu__pfunc.code, stb_arr_storage(stu__pfunc.code));
f->code[1] = start;
f->code[0] = stb_arr_len(stu__pfunc.data);
for (n=0; n < f->code[0]; ++n)
((stua_obj *) f->code)[-1-n] = stu__pfunc.data[n];
return stu__makeobj(STU___function, f, size, 0);
}
static int stu__funcdef(stua_obj *id, stua_obj *result)
{
int n,z=0,i,q;
stua_obj *param = NULL;
short *nonlocal;
stua_obj v,f=stua_nil;
assert(stu__tok == ST_func);
stu__nexttoken();
if (id) {
if (!stu__demandv(ST_id, id)) return stu__err("Expecting function name");
} else
stu__accept(ST_id);
if (!stu__demand('(')) return stu__err("Expecting ( for function parameter");
stu__push_func_comp();
while (stu__tok != ')') {
if (!stu__demandv(ST_id, &v)) { z=stu__err("Expecting parameter name"); goto done; }
stb_idict_add(stu__pfunc.locals, v, 1);
if (stu__tok == '=') {
n = stu__nexpr(1); if (!n) { z=0; goto done; }
z = stu__seq(z, stu__cc3(STU__defaultparm, stu__cdv(v), n));
} else
stb_arr_push(param, v);
if (!stu__accept(',')) break;
}
if (!stu__demand(')')) { z=stu__err("Expecting ) at end of parameter list"); goto done; }
n = stu__statements(0,0); if (!n) { z=0; goto done; }
if (!stu__demand(ST_end)) { z=stu__err("Expecting END at end of function"); goto done; }
if (n == 1) n = 0;
n = stu__seq(z,n);
f = stu__finish_func(param, n);
if (result) { *result = f; z=1; stu__pop_func_comp(); }
else {
nonlocal = stu__pfunc.non_local_refs;
stu__pfunc.non_local_refs = NULL;
stu__pop_func_comp();
z = stu__cdv(f);
if (nonlocal) { // build a closure with references to the needed frames
short *initcode = NULL;
for (i=0; i < stb_arr_len(nonlocal); ++i) {
int k = nonlocal[i], p;
stb_arr_push(initcode, stu__cdv(k));
if (k == -1) p = stu__cc1(ST__frame);
else { p = stu__cdv(stu__makeint(k+1)); p = stu__cc2(ST_id, p); }
stb_arr_push(initcode, p);
}
q = stu__cc2('{', stb_arr_len(nonlocal));
for (i=0; i < stb_arr_len(initcode); ++i)
stu__cc(initcode[i]);
z = stu__cc3('+', z, q);
stb_arr_free(initcode);
}
stb_arr_free(nonlocal);
}
done:
stb_arr_free(param);
if (!z) stu__pop_func_comp();
return z;
}
static int stu__compile_global_scope(void)
{
stua_obj o;
int z=0;
stu__push_func_comp();
while (stu__tok != 0) {
if (stu__tok == ST_func) {
stua_obj id, f;
if (!stu__funcdef(&id,&f))
goto error;
stu__set(stu__globaldict, id, f);
} else if (stu__tok == ST_var) {
z = stu__varinit(z,1); if (!z) goto error;
} else {
int y = stu__statements(1,0); if (!y) goto error;
z = stu__seq(z,y);
}
stu__accept(';');
}
o = stu__finish_func(NULL, z);
stu__pop_func_comp();
o = stu__funceval(o, stua_globals); // initialize stu__globaldict
if (stu__flow == FLOW_error)
printf("Error: %s\n", ((stu__wrapper *) stu__ptr(stu__flow_val))->ptr);
return 1;
error:
stu__pop_func_comp();
return 0;
}
stua_obj stu__myprint(stua_dict *context)
{
stua_obj x = stu__get(context, stua_string("x"), stua_nil);
if ((x & 1) == stu__float_tag) printf("%f", stu__getfloat(x));
else if (stu__tag(x) == stu__int_tag) printf("%d", stu__int(x));
else {
stu__wrapper *s = stu__pw(x);
if (s->type == STU___string || s->type == STU___error)
printf("%s", s->ptr);
else if (s->type == STU___dict) printf("{{dictionary}}");
else if (s->type == STU___function) printf("[[function]]");
else
printf("[[ERROR:%s]]", s->ptr);
}
return x;
}
void stua_init(void)
{
if (!stu__globaldict) {
int i;
stua_obj s;
stu__func *f;
stu__prec[ST_and] = stu__prec[ST_or] = 1;
stu__prec[ST_eq ] = stu__prec[ST_ne] = stu__prec[ST_le] =
stu__prec[ST_ge] = stu__prec['>' ] = stu__prec['<'] = 2;
stu__prec[':'] = 3;
stu__prec['&'] = stu__prec['|'] = stu__prec['^'] = 4;
stu__prec['+'] = stu__prec['-'] = 5;
stu__prec['*'] = stu__prec['/'] = stu__prec['%'] =
stu__prec[ST_shl]= stu__prec[ST_shr]= stu__prec[ST_shru]= 6;
stu__end[')'] = stu__end[ST_end] = stu__end[ST_else] = 1;
stu__end[ST_do] = stu__end[ST_elseif] = 1;
stu__float_init();
stu__lex_matcher = stb_lex_matcher();
for (i=0; i < sizeof(stu__lexemes)/sizeof(stu__lexemes[0]); ++i)
stb_lex_item(stu__lex_matcher, stu__lexemes[i].regex, stu__lexemes[i].stu__tok);
stu__globaldict = stb_idict_new_size(64);
stua_globals = stu__makeobj(STU___dict, stu__globaldict, 0,0);
stu__strings = stb_sdict_new(0);
stu__curbuf = stu__bufstart = "func _print(x) end\n"
"func print()\n var x=0 while _frame[x] != nil as x=x+1 do _print(_frame[x]) end end\n";
stu__nexttoken();
if (!stu__compile_global_scope())
printf("Compile error in line %d: %s\n", stu__comp_err_line, stu__comp_err_str);
s = stu__get(stu__globaldict, stua_string("_print"), stua_nil);
if (stu__tag(s) == stu__ptr_tag && stu__ptr(s)->type == STU___function) {
f = stu__pf(s);
free(f->f.store);
f->closure_source = 4;
f->f.func = stu__myprint;
f->code = NULL;
}
}
}
void stua_uninit(void)
{
if (stu__globaldict) {
stb_idict_remove_all(stu__globaldict);
stb_arr_setlen(stu__gc_root_stack, 0);
stua_gc(1);
stb_idict_destroy(stu__globaldict);
stb_sdict_delete(stu__strings);
stb_matcher_free(stu__lex_matcher);
stb_arr_free(stu__gc_ptrlist);
stb_arr_free(func_stack);
stb_arr_free(stu__gc_root_stack);
stu__globaldict = NULL;
}
}
void stua_run_script(char *s)
{
stua_init();
stu__curbuf = stu__bufstart = s;
stu__nexttoken();
stu__flow = FLOW_normal;
if (!stu__compile_global_scope())
printf("Compile error in line %d: %s\n", stu__comp_err_line, stu__comp_err_str);
stua_gc(1);
}
#endif // STB_DEFINE
#endif // STB_STUA
#undef STB_EXTERN
#endif // STB_INCLUDE_STB_H
/*
------------------------------------------------------------------------------
This software is available under 2 licenses -- choose whichever you prefer.
------------------------------------------------------------------------------
ALTERNATIVE A - MIT License
Copyright (c) 2017 Sean Barrett
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------------------------------------------------------------------------------
ALTERNATIVE B - Public Domain (www.unlicense.org)
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
software, either in source code form or as a compiled binary, for any purpose,
commercial or non-commercial, and by any means.
In jurisdictions that recognize copyright laws, the author or authors of this
software dedicate any and all copyright interest in the software to the public
domain. We make this dedication for the benefit of the public at large and to
the detriment of our heirs and successors. We intend this dedication to be an
overt act of relinquishment in perpetuity of all present and future rights to
this software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------
*/
| gpl-2.0 |
shehbazj/DyRe | test/shell/lvcreate-mirror.sh | 1521 | #!/bin/sh
# Copyright (C) 2010 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux prepare_vg 5 80
aux lvmconf 'allocation/maximise_cling = 0' \
'allocation/mirror_logs_require_separate_pvs = 1'
# 2-way mirror with corelog, 2 PVs
lvcreate -aey -l2 --type mirror -m1 --mirrorlog core -n $lv1 $vg "$dev1" "$dev2"
check mirror_images_redundant $vg $lv1
# 2-way mirror with disklog, 3 PVs
# lvcreate --nosync is in 100% sync after creation (bz429342)
lvcreate -aey -l2 --type mirror -m1 --nosync -n $lv2 $vg "$dev1" "$dev2" "$dev3":0-1 2>&1 | tee out
grep "New mirror won't be synchronised." out
check lv_field $vg/$lv2 copy_percent "100.00"
check mirror_images_redundant $vg $lv2
check mirror_log_on $vg $lv2 "$dev3"
# 3-way mirror with disklog, 4 PVs
lvcreate -aey -l2 --type mirror -m2 --nosync --mirrorlog disk -n $lv3 $vg "$dev1" "$dev2" "$dev4" "$dev3":0-1
check mirror_images_redundant $vg $lv3
check mirror_log_on $vg $lv3 "$dev3"
lvremove -ff $vg
# creating 2-way mirror with disklog from 2 PVs fails
not lvcreate -aey -l2 --type mirror -m1 -n $lv1 $vg "$dev1" "$dev2"
vgremove -ff $vg
| gpl-2.0 |
nycbjr/kernel_sony_sgp3xx | arch/arm/mach-msm/board-sony_pollux-pmic.c | 20236 | /* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
* Copyright (C) 2012-2013 Sony Mobile Communications AB.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/bootmem.h>
#include <linux/mfd/pm8xxx/pm8921.h>
#include <linux/leds.h>
#include <linux/leds-pm8xxx.h>
#include <linux/mfd/pm8xxx/pm8xxx-adc.h>
#include <linux/gpio_event.h>
#include <linux/gpio_keys.h>
#include <asm/mach-types.h>
#include <asm/mach/mmc.h>
#include <mach/msm_bus_board.h>
#include <mach/board.h>
#include <mach/gpiomux.h>
#include <mach/restart.h>
#include "devices.h"
#include "board-8064.h"
#include "charger-sony_fusion3.h"
struct pm8xxx_gpio_init {
unsigned gpio;
struct pm_gpio config;
};
struct pm8xxx_mpp_init {
unsigned mpp;
struct pm8xxx_mpp_config_data config;
};
#define PM8921_GPIO_INIT(_gpio, _dir, _buf, _val, _pull, _vin, _out_strength, \
_func, _inv, _disable) \
{ \
.gpio = PM8921_GPIO_PM_TO_SYS(_gpio), \
.config = { \
.direction = _dir, \
.output_buffer = _buf, \
.output_value = _val, \
.pull = _pull, \
.vin_sel = _vin, \
.out_strength = _out_strength, \
.function = _func, \
.inv_int_pol = _inv, \
.disable_pin = _disable, \
} \
}
#define PM8921_MPP_INIT(_mpp, _type, _level, _control) \
{ \
.mpp = PM8921_MPP_PM_TO_SYS(_mpp), \
.config = { \
.type = PM8XXX_MPP_TYPE_##_type, \
.level = _level, \
.control = PM8XXX_MPP_##_control, \
} \
}
#define PM8921_MPP_DISABLE(_mpp) \
PM8921_MPP_INIT(_mpp, SINK, PM8XXX_MPP_CS_OUT_5MA, CS_CTRL_DISABLE)
#define PM8821_MPP_INIT(_mpp, _type, _level, _control) \
{ \
.mpp = PM8821_MPP_PM_TO_SYS(_mpp), \
.config = { \
.type = PM8XXX_MPP_TYPE_##_type, \
.level = _level, \
.control = PM8XXX_MPP_##_control, \
} \
}
#define PM8821_MPP_DISABLE(_mpp) \
PM8821_MPP_INIT(_mpp, SINK, PM8XXX_MPP_CS_OUT_5MA, CS_CTRL_DISABLE)
#define PM8921_GPIO_DISABLE(_gpio) \
PM8921_GPIO_INIT(_gpio, PM_GPIO_DIR_IN, 0, 0, 0, PM_GPIO_VIN_S4, \
0, 0, 0, 1)
#define PM8921_GPIO_OUTPUT(_gpio, _val, _strength) \
PM8921_GPIO_INIT(_gpio, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, _val, \
PM_GPIO_PULL_NO, PM_GPIO_VIN_S4, \
PM_GPIO_STRENGTH_##_strength, \
PM_GPIO_FUNC_NORMAL, 0, 0)
#define PM8921_GPIO_OUTPUT_BUFCONF(_gpio, _val, _strength, _bufconf) \
PM8921_GPIO_INIT(_gpio, PM_GPIO_DIR_OUT,\
PM_GPIO_OUT_BUF_##_bufconf, _val, \
PM_GPIO_PULL_NO, PM_GPIO_VIN_S4, \
PM_GPIO_STRENGTH_##_strength, \
PM_GPIO_FUNC_NORMAL, 0, 0)
#define PM8921_GPIO_INPUT(_gpio, _pull) \
PM8921_GPIO_INIT(_gpio, PM_GPIO_DIR_IN, PM_GPIO_OUT_BUF_CMOS, 0, \
_pull, PM_GPIO_VIN_S4, \
PM_GPIO_STRENGTH_NO, \
PM_GPIO_FUNC_NORMAL, 0, 0)
#define PM8921_GPIO_OUTPUT_FUNC(_gpio, _val, _func) \
PM8921_GPIO_INIT(_gpio, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, _val, \
PM_GPIO_PULL_NO, PM_GPIO_VIN_S4, \
PM_GPIO_STRENGTH_HIGH, \
_func, 0, 0)
#define PM8921_GPIO_OUTPUT_VIN(_gpio, _val, _vin) \
PM8921_GPIO_INIT(_gpio, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, _val, \
PM_GPIO_PULL_NO, _vin, \
PM_GPIO_STRENGTH_HIGH, \
PM_GPIO_FUNC_NORMAL, 0, 0)
#define PM8921_GPIO_OUTPUT_BUFCONF_VPH(_gpio, _val, _strength, _bufconf) \
PM8921_GPIO_INIT(_gpio, PM_GPIO_DIR_OUT,\
PM_GPIO_OUT_BUF_##_bufconf, _val, \
PM_GPIO_PULL_NO, PM_GPIO_VIN_VPH, \
PM_GPIO_STRENGTH_##_strength, \
PM_GPIO_FUNC_NORMAL, 0, 0)
/* Initial PM8921 GPIO configurations */
static struct pm8xxx_gpio_init pm8921_gpios[] __initdata = {
PM8921_GPIO_INPUT(1, PM_GPIO_PULL_NO), /* SIM_DET_N */
PM8921_GPIO_INPUT(2, PM_GPIO_PULL_UP_1P5), /* ACC_COVER_OPEN */
PM8921_GPIO_DISABLE(3), /* NC */
PM8921_GPIO_DISABLE(4), /* NC */
PM8921_GPIO_OUTPUT(5, 0, LOW), /* IR_LEVEL_EN */
PM8921_GPIO_DISABLE(6), /* HWID[0] used only by S1 */
PM8921_GPIO_DISABLE(7), /* HWID[1] used only by S1 */
PM8921_GPIO_DISABLE(8), /* HWID[2] used only by S1 */
PM8921_GPIO_DISABLE(9), /* NC */
PM8921_GPIO_OUTPUT(10, 0, LOW), /* BACKLIGHT_EN */
PM8921_GPIO_DISABLE(11), /* NC */
PM8921_GPIO_DISABLE(12), /* NC */
PM8921_GPIO_DISABLE(13), /* NC */
PM8921_GPIO_OUTPUT(14, 0, LOW), /* IR_RESET_N */
PM8921_GPIO_DISABLE(15), /* NC */
PM8921_GPIO_DISABLE(16), /* NC */
PM8921_GPIO_DISABLE(17), /* NC */
PM8921_GPIO_DISABLE(18), /* NC */
PM8921_GPIO_OUTPUT(19, 0, MED), /* Right speaker enab */
PM8921_GPIO_INPUT(20, PM_GPIO_PULL_NO), /* OTG_OVRCUR_DET_N */
PM8921_GPIO_OUTPUT(21, 0, LOW), /* NFC_DWLD_EN */
PM8921_GPIO_OUTPUT(22, 0, HIGH), /* RF_ID_EN */
PM8921_GPIO_INPUT(23, PM_GPIO_PULL_NO), /* LCD ID */
PM8921_GPIO_OUTPUT(24, 0, LOW), /* LCD_DCDC_EN */
PM8921_GPIO_OUTPUT(25, 0, LOW), /* DISP_RESET_N */
PM8921_GPIO_OUTPUT(26, 1, LOW), /* LMU_EN */
PM8921_GPIO_OUTPUT(27, 0, LOW), /* MHL_RST_N */
PM8921_GPIO_OUTPUT(28, 0, LOW), /* MCAM_RST_N */
PM8921_GPIO_INPUT(29, PM_GPIO_PULL_UP_30), /* VOLUME_UP_KEY */
PM8921_GPIO_DISABLE(30), /* NC */
PM8921_GPIO_DISABLE(31), /* NC */
PM8921_GPIO_DISABLE(32), /* NC */
/* NFC_EXT_EN */
PM8921_GPIO_INIT(33, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_OPEN_DRAIN, 0, \
PM_GPIO_PULL_NO, PM_GPIO_VIN_VPH, \
PM_GPIO_STRENGTH_LOW, \
PM_GPIO_FUNC_NORMAL, 0, 0),
PM8921_GPIO_OUTPUT(34, 1, HIGH), /* WCD9310_RESET_N */
PM8921_GPIO_DISABLE(35), /* NC */
PM8921_GPIO_DISABLE(36), /* NC */
PM8921_GPIO_DISABLE(37), /* NC */
PM8921_GPIO_INPUT(38, PM_GPIO_PULL_UP_30),/* VOLUME_DOWN_KEY */
/* GPIO_39 (SSBI_PMIC_FWD_CLK) is set by SBL */
PM8921_GPIO_DISABLE(40), /* NC */
PM8921_GPIO_DISABLE(41), /* NC */
PM8921_GPIO_OUTPUT_BUFCONF_VPH(42, 1, LOW, CMOS), /* OTG_OVP_CNTL */
PM8921_GPIO_DISABLE(43), /* NC */
PM8921_GPIO_DISABLE(44), /* NC */
};
static struct pm8xxx_gpio_init pm8921_mtp_kp_gpios[] __initdata = {
PM8921_GPIO_INPUT(3, PM_GPIO_PULL_UP_30),
PM8921_GPIO_INPUT(4, PM_GPIO_PULL_UP_30),
};
static struct pm8xxx_gpio_init pm8921_cdp_kp_gpios[] __initdata = {
PM8921_GPIO_INPUT(27, PM_GPIO_PULL_UP_30),
PM8921_GPIO_INPUT(42, PM_GPIO_PULL_UP_30),
PM8921_GPIO_INPUT(17, PM_GPIO_PULL_UP_1P5), /* SD_WP */
};
/* Initial PM8XXX MPP configurations */
static struct pm8xxx_mpp_init pm8xxx_mpps[] __initdata = {
PM8921_MPP_DISABLE(1), /* N/C */
PM8921_MPP_DISABLE(2), /* N/C */
PM8921_MPP_DISABLE(3), /* N/C */
PM8921_MPP_DISABLE(4), /* N/C */
/* MPP 5 and 6 are handled by QCT*/
/* External 5V regulator enable; used by USB_OTG switches. */
PM8921_MPP_INIT(7, D_OUTPUT, PM8921_MPP_DIG_LEVEL_S4, DOUT_CTRL_LOW),
/* BL_THERM */
PM8921_MPP_INIT(8, A_INPUT, PM8XXX_MPP_AIN_AMUX_CH5, DOUT_CTRL_LOW),
/* RF_ID */
PM8921_MPP_INIT(9, A_INPUT, PM8XXX_MPP_AIN_AMUX_CH6, DOUT_CTRL_LOW),
PM8921_MPP_DISABLE(10), /* N/C */
PM8921_MPP_DISABLE(11), /* N/C, Reserve Audio Study */
PM8921_MPP_DISABLE(12), /* N/C */
/* TMM_ANT_SW1 */
PM8821_MPP_INIT(1, D_OUTPUT, PM8821_MPP_DIG_LEVEL_1P8, DOUT_CTRL_HIGH),
/* TMM_ANT_SW2 */
PM8821_MPP_INIT(2, D_OUTPUT, PM8821_MPP_DIG_LEVEL_1P8, DOUT_CTRL_LOW),
/* TMM_ANT_SW3 */
PM8821_MPP_INIT(3, D_OUTPUT, PM8821_MPP_DIG_LEVEL_1P8, DOUT_CTRL_HIGH),
PM8821_MPP_DISABLE(4), /* N/C */
};
void __init apq8064_pm8xxx_gpio_mpp_init(void)
{
int i, rc;
for (i = 0; i < ARRAY_SIZE(pm8921_gpios); i++) {
rc = pm8xxx_gpio_config(pm8921_gpios[i].gpio,
&pm8921_gpios[i].config);
if (rc) {
pr_err("%s: pm8xxx_gpio_config: rc=%d\n", __func__, rc);
break;
}
}
if (machine_is_apq8064_cdp() || machine_is_apq8064_liquid())
for (i = 0; i < ARRAY_SIZE(pm8921_cdp_kp_gpios); i++) {
rc = pm8xxx_gpio_config(pm8921_cdp_kp_gpios[i].gpio,
&pm8921_cdp_kp_gpios[i].config);
if (rc) {
pr_err("%s: pm8xxx_gpio_config: rc=%d\n",
__func__, rc);
break;
}
}
if (machine_is_apq8064_mtp())
for (i = 0; i < ARRAY_SIZE(pm8921_mtp_kp_gpios); i++) {
rc = pm8xxx_gpio_config(pm8921_mtp_kp_gpios[i].gpio,
&pm8921_mtp_kp_gpios[i].config);
if (rc) {
pr_err("%s: pm8xxx_gpio_config: rc=%d\n",
__func__, rc);
break;
}
}
for (i = 0; i < ARRAY_SIZE(pm8xxx_mpps); i++) {
rc = pm8xxx_mpp_config(pm8xxx_mpps[i].mpp,
&pm8xxx_mpps[i].config);
if (rc) {
pr_err("%s: pm8xxx_mpp_config: rc=%d\n", __func__, rc);
break;
}
}
}
static struct pm8xxx_pwrkey_platform_data apq8064_pm8921_pwrkey_pdata = {
.pull_up = 1,
.kpd_trigger_delay_us = 15625,
.wakeup = 1,
};
static struct pm8xxx_misc_platform_data apq8064_pm8921_misc_pdata = {
.priority = 0,
};
#define PM8921_LC_LED_MAX_CURRENT 4 /* I = 4mA */
#define PM8921_LC_LED_LOW_CURRENT 1 /* I = 1mA */
#define PM8XXX_LED_PWM_PERIOD 1000
#define PM8XXX_LED_PWM_DUTY_MS 20
/**
* PM8XXX_PWM_CHANNEL_NONE shall be used when LED shall not be
* driven using PWM feature.
*/
#define PM8XXX_PWM_CHANNEL_NONE -1
static struct led_info pm8921_led_info[] = {
[0] = {
.name = "led:red",
.default_trigger = "ac-online",
},
};
static struct led_platform_data pm8921_led_core_pdata = {
.num_leds = ARRAY_SIZE(pm8921_led_info),
.leds = pm8921_led_info,
};
static int pm8921_led0_pwm_duty_pcts[56] = {
1, 4, 8, 12, 16, 20, 24, 28, 32, 36,
40, 44, 46, 52, 56, 60, 64, 68, 72, 76,
80, 84, 88, 92, 96, 100, 100, 100, 98, 95,
92, 88, 84, 82, 78, 74, 70, 66, 62, 58,
58, 54, 50, 48, 42, 38, 34, 30, 26, 22,
14, 10, 6, 4, 1
};
/*
* Note: There is a bug in LPG module that results in incorrect
* behavior of pattern when LUT index 0 is used. So effectively
* there are 63 usable LUT entries.
*/
static struct pm8xxx_pwm_duty_cycles pm8921_led0_pwm_duty_cycles = {
.duty_pcts = (int *)&pm8921_led0_pwm_duty_pcts,
.num_duty_pcts = ARRAY_SIZE(pm8921_led0_pwm_duty_pcts),
.duty_ms = PM8XXX_LED_PWM_DUTY_MS,
.start_idx = 1,
};
static struct pm8xxx_led_config pm8921_led_configs[] = {
[0] = {
.id = PM8XXX_ID_LED_0,
.mode = PM8XXX_LED_MODE_PWM2,
.max_current = PM8921_LC_LED_MAX_CURRENT,
.pwm_channel = 5,
.pwm_period_us = PM8XXX_LED_PWM_PERIOD,
.pwm_duty_cycles = &pm8921_led0_pwm_duty_cycles,
},
};
static struct pm8xxx_led_platform_data apq8064_pm8921_leds_pdata = {
.led_core = &pm8921_led_core_pdata,
.configs = pm8921_led_configs,
.num_configs = ARRAY_SIZE(pm8921_led_configs),
};
static struct pm8xxx_adc_amux apq8064_pm8921_adc_channels_data[] = {
{"vcoin", CHANNEL_VCOIN, CHAN_PATH_SCALING2, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"vbat", CHANNEL_VBAT, CHAN_PATH_SCALING2, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"dcin", CHANNEL_DCIN, CHAN_PATH_SCALING4, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"ichg", CHANNEL_ICHG, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"vph_pwr", CHANNEL_VPH_PWR, CHAN_PATH_SCALING2, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"ibat", CHANNEL_IBAT, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"batt_therm", CHANNEL_BATT_THERM, CHAN_PATH_SCALING1, AMUX_RSV2,
ADC_DECIMATION_TYPE2, ADC_SCALE_BATT_THERM},
{"batt_id", CHANNEL_BATT_ID, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"usbin", CHANNEL_USBIN, CHAN_PATH_SCALING3, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"pmic_therm", CHANNEL_DIE_TEMP, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_PMIC_THERM},
{"625mv", CHANNEL_625MV, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"125v", CHANNEL_125V, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"chg_temp", CHANNEL_CHG_TEMP, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"pba_therm", CHANNEL_MUXOFF, CHAN_PATH_SCALING1, AMUX_RSV0,
ADC_DECIMATION_TYPE2, ADC_SCALE_PBA_THERM},
{"bl_therm", ADC_MPP_1_AMUX5, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_BL_THERM},
{"rf_id", ADC_MPP_1_AMUX6, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT},
{"apq_therm", ADC_MPP_1_AMUX3, CHAN_PATH_SCALING1, AMUX_RSV1,
ADC_DECIMATION_TYPE2, ADC_SCALE_PA_THERM},
};
static struct pm8xxx_adc_properties apq8064_pm8921_adc_data = {
.adc_vdd_reference = 1800, /* milli-voltage for this adc */
.bitresolution = 15,
.bipolar = 0,
};
static struct pm8xxx_adc_platform_data apq8064_pm8921_adc_pdata = {
.adc_channel = apq8064_pm8921_adc_channels_data,
.adc_num_board_channel = ARRAY_SIZE(apq8064_pm8921_adc_channels_data),
.adc_prop = &apq8064_pm8921_adc_data,
.adc_mpp_base = PM8921_MPP_PM_TO_SYS(1),
};
static struct pm8xxx_mpp_platform_data
apq8064_pm8921_mpp_pdata __devinitdata = {
.mpp_base = PM8921_MPP_PM_TO_SYS(1),
};
static struct pm8xxx_gpio_platform_data
apq8064_pm8921_gpio_pdata __devinitdata = {
.gpio_base = PM8921_GPIO_PM_TO_SYS(1),
};
static struct pm8xxx_irq_platform_data
apq8064_pm8921_irq_pdata __devinitdata = {
.irq_base = PM8921_IRQ_BASE,
.devirq = MSM_GPIO_TO_INT(74),
.irq_trigger_flag = IRQF_TRIGGER_LOW,
.dev_id = 0,
};
static struct pm8xxx_rtc_platform_data
apq8064_pm8921_rtc_pdata = {
#ifdef CONFIG_RTC_SEMC_ETS
.rtc_write_enable = true,
#else
.rtc_write_enable = false,
#endif
.rtc_alarm_powerup = false,
};
static int apq8064_pm8921_therm_mitigation[] = {
1525,
825,
475,
325,
};
#define MAX_VOLTAGE_MV 4200
#define V_CUTOFF_MV 3200
#define CHG_TERM_MA 200
static struct pm8921_charger_platform_data
apq8064_pm8921_chg_pdata __devinitdata = {
.ttrkl_time = 64,
.update_time = 30000,
#ifdef CONFIG_PM8921_SONY_BMS_CHARGER
.update_time_at_low_bat = 1000,
.alarm_low_mv = V_CUTOFF_MV,
.alarm_high_mv = V_CUTOFF_MV + 100,
#endif
.max_voltage = MAX_VOLTAGE_MV,
.min_voltage = V_CUTOFF_MV,
.resume_voltage_delta = 100,
.resume_charge_percent = 95,
.term_current = CHG_TERM_MA,
.cool_temp = 10,
.warm_temp = 45,
.hysteresis_temp = 3,
.temp_check_period = 1,
.safe_current_ma = 1525,
.max_bat_chg_current = 1525,
.cool_bat_chg_current = 1525,
.warm_bat_chg_current = 325,
.cool_bat_voltage = 4200,
.warm_bat_voltage = 4000,
.ibat_calib_enable = 1,
.thermal_mitigation = apq8064_pm8921_therm_mitigation,
.thermal_levels = ARRAY_SIZE(apq8064_pm8921_therm_mitigation),
.rconn_mohm = 18,
.btc_override = 1,
.btc_override_cold_degc = 5,
.btc_override_hot_degc = 55,
.btc_delay_ms = 10000,
.btc_panic_if_cant_stop_chg = 1,
.stop_chg_upon_expiry = 1,
.repeat_safety_time = 1,
.safety_time = 512,
.soc_scaling = 1,
};
static struct pm8xxx_ccadc_platform_data
apq8064_pm8xxx_ccadc_pdata = {
.r_sense_uohm = 10000,
.calib_delay_ms = 600000,
};
static struct pm8921_bms_platform_data
apq8064_pm8921_bms_pdata __devinitdata = {
#ifdef CONFIG_PM8921_SONY_BMS_CHARGER
.battery_data = &pm8921_battery_data,
#else
.battery_type = BATT_OEM,
#endif
.r_sense_uohm = 10000,
.v_cutoff = V_CUTOFF_MV,
.i_test = 1000,
.max_voltage_uv = MAX_VOLTAGE_MV * 1000,
.rconn_mohm = 30,
#ifndef CONFIG_PM8921_SONY_BMS_CHARGER
.alarm_low_mv = V_CUTOFF_MV,
.alarm_high_mv = V_CUTOFF_MV + 100,
#endif
.shutdown_soc_valid_limit = 20,
.adjust_soc_low_threshold = 25,
.chg_term_ua = CHG_TERM_MA * 1000,
.enable_fcc_learning = 1,
.normal_voltage_calc_ms = 20000,
.low_voltage_calc_ms = 1000,
#ifndef CONFIG_PM8921_SONY_BMS_CHARGER
.low_voltage_detect = 1,
.vbatt_cutoff_retries = 5,
.high_ocv_correction_limit_uv = 50,
.low_ocv_correction_limit_uv = 100,
.hold_soc_est = 3,
#endif
};
static struct pm8921_platform_data
apq8064_pm8921_platform_data __devinitdata = {
.regulator_pdatas = msm8064_pm8921_regulator_pdata,
.irq_pdata = &apq8064_pm8921_irq_pdata,
.gpio_pdata = &apq8064_pm8921_gpio_pdata,
.mpp_pdata = &apq8064_pm8921_mpp_pdata,
.rtc_pdata = &apq8064_pm8921_rtc_pdata,
.pwrkey_pdata = &apq8064_pm8921_pwrkey_pdata,
.misc_pdata = &apq8064_pm8921_misc_pdata,
.leds_pdata = &apq8064_pm8921_leds_pdata,
.adc_pdata = &apq8064_pm8921_adc_pdata,
.charger_pdata = &apq8064_pm8921_chg_pdata,
.bms_pdata = &apq8064_pm8921_bms_pdata,
.ccadc_pdata = &apq8064_pm8xxx_ccadc_pdata,
};
static struct pm8xxx_irq_platform_data
apq8064_pm8821_irq_pdata __devinitdata = {
.irq_base = PM8821_IRQ_BASE,
.devirq = PM8821_SEC_IRQ_N,
.irq_trigger_flag = IRQF_TRIGGER_HIGH,
.dev_id = 1,
};
static struct pm8xxx_mpp_platform_data
apq8064_pm8821_mpp_pdata __devinitdata = {
.mpp_base = PM8821_MPP_PM_TO_SYS(1),
};
static struct pm8821_platform_data
apq8064_pm8821_platform_data __devinitdata = {
.irq_pdata = &apq8064_pm8821_irq_pdata,
.mpp_pdata = &apq8064_pm8821_mpp_pdata,
};
static struct msm_ssbi_platform_data apq8064_ssbi_pm8921_pdata __devinitdata = {
.controller_type = MSM_SBI_CTRL_PMIC_ARBITER,
.slave = {
.name = "pm8921-core",
.platform_data = &apq8064_pm8921_platform_data,
},
};
static struct msm_ssbi_platform_data apq8064_ssbi_pm8821_pdata __devinitdata = {
.controller_type = MSM_SBI_CTRL_PMIC_ARBITER,
.slave = {
.name = "pm8821-core",
.platform_data = &apq8064_pm8821_platform_data,
},
};
void __init apq8064_init_pmic(void)
{
pmic_reset_irq = PM8921_IRQ_BASE + PM8921_RESOUT_IRQ;
apq8064_device_ssbi_pmic1.dev.platform_data =
&apq8064_ssbi_pm8921_pdata;
apq8064_device_ssbi_pmic2.dev.platform_data =
&apq8064_ssbi_pm8821_pdata;
apq8064_pm8921_platform_data.num_regulators =
msm8064_pm8921_regulator_pdata_len;
if (machine_is_apq8064_rumi3()) {
apq8064_pm8921_irq_pdata.devirq = 0;
apq8064_pm8821_irq_pdata.devirq = 0;
}
}
static struct gpio_keys_button fusion3_keys[] = {
{
.code = KEY_VOLUMEUP,
.gpio = PM8921_GPIO_PM_TO_SYS(29),
.desc = "volume_up_key",
.active_low = 1,
.type = EV_KEY,
.wakeup = 1,
.debounce_interval = 15,
},
{
.code = KEY_VOLUMEDOWN,
.gpio = PM8921_GPIO_PM_TO_SYS(38),
.desc = "volume_down_key",
.active_low = 1,
.type = EV_KEY,
.wakeup = 1,
.debounce_interval = 15,
},
};
static struct gpio_keys_platform_data fusion3_keys_data = {
.buttons = fusion3_keys,
.nbuttons = ARRAY_SIZE(fusion3_keys),
};
static struct platform_device fusion3_kp_pdev = {
.name = "gpio-keys",
.id = -1,
.dev = {
.platform_data = &fusion3_keys_data,
},
};
#define GPIO_SW_SIM_DETECTION 1
static struct gpio_event_direct_entry gpio_sw_gpio_map[] = {
{PM8921_GPIO_PM_TO_SYS(GPIO_SW_SIM_DETECTION), SW_JACK_PHYSICAL_INSERT},
};
static struct gpio_event_input_info gpio_sw_gpio_info = {
.info.func = gpio_event_input_func,
.info.no_suspend = true,
.flags = GPIOEDF_ACTIVE_HIGH,
.type = EV_SW,
.keymap = gpio_sw_gpio_map,
.keymap_size = ARRAY_SIZE(gpio_sw_gpio_map),
.debounce_time.tv64 = 100 * NSEC_PER_MSEC,
};
static struct gpio_event_info *pmic_keypad_info[] = {
&gpio_sw_gpio_info.info,
};
struct gpio_event_platform_data pmic_keypad_data = {
.name = "sim-detection",
.info = pmic_keypad_info,
.info_count = ARRAY_SIZE(pmic_keypad_info),
};
static struct platform_device pmic_keypad_device = {
.name = GPIO_EVENT_DEV_NAME,
.id = 1,
.dev = {.platform_data = &pmic_keypad_data},
};
static int __init input_devices_init(void)
{
platform_device_register(&fusion3_kp_pdev);
platform_device_register(&pmic_keypad_device);
return 0;
}
static void __exit input_devices_exit(void)
{
}
module_init(input_devices_init);
module_exit(input_devices_exit);
| gpl-2.0 |
aldencolerain/mc2kernel | toolchain/share/doc/arm-marvell-linux-gnueabi/html/libc/Other-Special.html | 4839 | <html lang="en">
<head>
<title>Other Special - The GNU C Library</title>
<meta http-equiv="Content-Type" content="text/html">
<meta name="description" content="The GNU C Library">
<meta name="generator" content="makeinfo 4.13">
<link title="Top" rel="start" href="index.html#Top">
<link rel="up" href="Special-Characters.html#Special-Characters" title="Special Characters">
<link rel="prev" href="Start_002fStop-Characters.html#Start_002fStop-Characters" title="Start/Stop Characters">
<link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
<!--
This file documents the GNU C library.
This is Edition 0.13, last updated 2011-07-19,
of `The GNU C Library Reference Manual', for version
2.14 (EGLIBC).
Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2001, 2002,
2003, 2007, 2008, 2010, 2011 Free Software Foundation, Inc.
Permission is granted to copy, distribute and/or modify this document
under the terms of the GNU Free Documentation License, Version 1.3 or
any later version published by the Free Software Foundation; with the
Invariant Sections being ``Free Software Needs Free Documentation''
and ``GNU Lesser General Public License'', the Front-Cover texts being
``A GNU Manual'', and with the Back-Cover Texts as in (a) below. A
copy of the license is included in the section entitled "GNU Free
Documentation License".
(a) The FSF's Back-Cover Text is: ``You have the freedom to
copy and modify this GNU manual. Buying copies from the FSF
supports it in developing GNU and promoting software freedom.''-->
<meta http-equiv="Content-Style-Type" content="text/css">
<style type="text/css"><!--
pre.display { font-family:inherit }
pre.format { font-family:inherit }
pre.smalldisplay { font-family:inherit; font-size:smaller }
pre.smallformat { font-family:inherit; font-size:smaller }
pre.smallexample { font-size:smaller }
pre.smalllisp { font-size:smaller }
span.sc { font-variant:small-caps }
span.roman { font-family:serif; font-weight:normal; }
span.sansserif { font-family:sans-serif; font-weight:normal; }
--></style>
</head>
<body>
<div class="node">
<a name="Other-Special"></a>
<p>
Previous: <a rel="previous" accesskey="p" href="Start_002fStop-Characters.html#Start_002fStop-Characters">Start/Stop Characters</a>,
Up: <a rel="up" accesskey="u" href="Special-Characters.html#Special-Characters">Special Characters</a>
<hr>
</div>
<h5 class="subsubsection">17.4.9.4 Other Special Characters</h5>
<p>These special characters exist only in BSD systems and the GNU system.
<!-- termios.h -->
<!-- BSD -->
<div class="defun">
— Macro: int <b>VLNEXT</b><var><a name="index-VLNEXT-1985"></a></var><br>
<blockquote><p><a name="index-LNEXT-character-1986"></a>This is the subscript for the LNEXT character in the special control
character array. <var>termios</var><code>.c_cc[VLNEXT]</code> holds the character
itself.
<p>The LNEXT character is recognized only when <code>IEXTEN</code> is set, but in
both canonical and noncanonical mode. It disables any special
significance of the next character the user types. Even if the
character would normally perform some editing function or generate a
signal, it is read as a plain character. This is the analogue of the
<kbd>C-q</kbd> command in Emacs. “LNEXT” stands for “literal next.”
<p>The LNEXT character is usually <kbd>C-v</kbd>.
</p></blockquote></div>
<!-- termios.h -->
<!-- BSD -->
<div class="defun">
— Macro: int <b>VDISCARD</b><var><a name="index-VDISCARD-1987"></a></var><br>
<blockquote><p><a name="index-DISCARD-character-1988"></a>This is the subscript for the DISCARD character in the special control
character array. <var>termios</var><code>.c_cc[VDISCARD]</code> holds the character
itself.
<p>The DISCARD character is recognized only when <code>IEXTEN</code> is set, but
in both canonical and noncanonical mode. Its effect is to toggle the
discard-output flag. When this flag is set, all program output is
discarded. Setting the flag also discards all output currently in the
output buffer. Typing any other character resets the flag.
</p></blockquote></div>
<!-- termios.h -->
<!-- BSD -->
<div class="defun">
— Macro: int <b>VSTATUS</b><var><a name="index-VSTATUS-1989"></a></var><br>
<blockquote><p><a name="index-STATUS-character-1990"></a>This is the subscript for the STATUS character in the special control
character array. <var>termios</var><code>.c_cc[VSTATUS]</code> holds the character
itself.
<p>The STATUS character's effect is to print out a status message about how
the current process is running.
<p>The STATUS character is recognized only in canonical mode, and only if
<code>NOKERNINFO</code> is not set.
</p></blockquote></div>
</body></html>
| gpl-2.0 |
bgat/linux-udoo | backports/drivers/media/platform/marvell-ccic/mcam-core.h | 11628 | /*
* Marvell camera core structures.
*
* Copyright 2011 Jonathan Corbet [email protected]
*/
#ifndef _MCAM_CORE_H
#define _MCAM_CORE_H
#include <linux/list.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
#include <media/videobuf2-core.h>
/*
* Create our own symbols for the supported buffer modes, but, for now,
* base them entirely on which videobuf2 options have been selected.
*/
#if IS_ENABLED(CONFIG_BACKPORT_VIDEOBUF2_VMALLOC)
#define MCAM_MODE_VMALLOC 1
#endif
#if IS_ENABLED(CONFIG_BACKPORT_VIDEOBUF2_DMA_CONTIG)
#define MCAM_MODE_DMA_CONTIG 1
#endif
#if IS_ENABLED(CONFIG_BACKPORT_VIDEOBUF2_DMA_SG)
#define MCAM_MODE_DMA_SG 1
#endif
#if !defined(MCAM_MODE_VMALLOC) && !defined(MCAM_MODE_DMA_CONTIG) && \
!defined(MCAM_MODE_DMA_SG)
#error One of the videobuf buffer modes must be selected in the config
#endif
enum mcam_state {
S_NOTREADY, /* Not yet initialized */
S_IDLE, /* Just hanging around */
S_FLAKED, /* Some sort of problem */
S_STREAMING, /* Streaming data */
S_BUFWAIT /* streaming requested but no buffers yet */
};
#define MAX_DMA_BUFS 3
/*
* Different platforms work best with different buffer modes, so we
* let the platform pick.
*/
enum mcam_buffer_mode {
B_vmalloc = 0,
B_DMA_contig = 1,
B_DMA_sg = 2
};
enum mcam_chip_id {
MCAM_CAFE,
MCAM_ARMADA610,
};
/*
* Is a given buffer mode supported by the current kernel configuration?
*/
static inline int mcam_buffer_mode_supported(enum mcam_buffer_mode mode)
{
switch (mode) {
#ifdef MCAM_MODE_VMALLOC
case B_vmalloc:
#endif
#ifdef MCAM_MODE_DMA_CONTIG
case B_DMA_contig:
#endif
#ifdef MCAM_MODE_DMA_SG
case B_DMA_sg:
#endif
return 1;
default:
return 0;
}
}
/*
* Basic frame states
*/
struct mcam_frame_state {
unsigned int frames;
unsigned int singles;
unsigned int delivered;
};
#define NR_MCAM_CLK 3
/*
* A description of one of our devices.
* Locking: controlled by s_mutex. Certain fields, however, require
* the dev_lock spinlock; they are marked as such by comments.
* dev_lock is also required for access to device registers.
*/
struct mcam_camera {
/*
* These fields should be set by the platform code prior to
* calling mcam_register().
*/
struct i2c_adapter *i2c_adapter;
unsigned char __iomem *regs;
unsigned regs_size; /* size in bytes of the register space */
spinlock_t dev_lock;
struct device *dev; /* For messages, dma alloc */
enum mcam_chip_id chip_id;
short int clock_speed; /* Sensor clock speed, default 30 */
short int use_smbus; /* SMBUS or straight I2c? */
enum mcam_buffer_mode buffer_mode;
int mclk_min; /* The minimal value of mclk */
int mclk_src; /* which clock source the mclk derives from */
int mclk_div; /* Clock Divider Value for MCLK */
int ccic_id;
enum v4l2_mbus_type bus_type;
/* MIPI support */
/* The dphy config value, allocated in board file
* dphy[0]: DPHY3
* dphy[1]: DPHY5
* dphy[2]: DPHY6
*/
int *dphy;
bool mipi_enabled; /* flag whether mipi is enabled already */
int lane; /* lane number */
/* clock tree support */
struct clk *clk[NR_MCAM_CLK];
/*
* Callbacks from the core to the platform code.
*/
int (*plat_power_up) (struct mcam_camera *cam);
void (*plat_power_down) (struct mcam_camera *cam);
void (*calc_dphy) (struct mcam_camera *cam);
void (*ctlr_reset) (struct mcam_camera *cam);
/*
* Everything below here is private to the mcam core and
* should not be touched by the platform code.
*/
struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
enum mcam_state state;
unsigned long flags; /* Buffer status, mainly (dev_lock) */
int users; /* How many open FDs */
struct mcam_frame_state frame_state; /* Frame state counter */
/*
* Subsystem structures.
*/
struct video_device vdev;
struct v4l2_subdev *sensor;
unsigned short sensor_addr;
/* Videobuf2 stuff */
struct vb2_queue vb_queue;
struct list_head buffers; /* Available frames */
unsigned int nbufs; /* How many are alloc'd */
int next_buf; /* Next to consume (dev_lock) */
/* DMA buffers - vmalloc mode */
#ifdef MCAM_MODE_VMALLOC
unsigned int dma_buf_size; /* allocated size */
void *dma_bufs[MAX_DMA_BUFS]; /* Internal buffer addresses */
dma_addr_t dma_handles[MAX_DMA_BUFS]; /* Buffer bus addresses */
struct tasklet_struct s_tasklet;
#endif
unsigned int sequence; /* Frame sequence number */
unsigned int buf_seq[MAX_DMA_BUFS]; /* Sequence for individual bufs */
/* DMA buffers - DMA modes */
struct mcam_vb_buffer *vb_bufs[MAX_DMA_BUFS];
struct vb2_alloc_ctx *vb_alloc_ctx;
struct vb2_alloc_ctx *vb_alloc_ctx_sg;
/* Mode-specific ops, set at open time */
void (*dma_setup)(struct mcam_camera *cam);
void (*frame_complete)(struct mcam_camera *cam, int frame);
/* Current operating parameters */
struct v4l2_pix_format pix_format;
u32 mbus_code;
/* Locks */
struct mutex s_mutex; /* Access to this structure */
};
/*
* Register I/O functions. These are here because the platform code
* may legitimately need to mess with the register space.
*/
/*
* Device register I/O
*/
static inline void mcam_reg_write(struct mcam_camera *cam, unsigned int reg,
unsigned int val)
{
iowrite32(val, cam->regs + reg);
}
static inline unsigned int mcam_reg_read(struct mcam_camera *cam,
unsigned int reg)
{
return ioread32(cam->regs + reg);
}
static inline void mcam_reg_write_mask(struct mcam_camera *cam, unsigned int reg,
unsigned int val, unsigned int mask)
{
unsigned int v = mcam_reg_read(cam, reg);
v = (v & ~mask) | (val & mask);
mcam_reg_write(cam, reg, v);
}
static inline void mcam_reg_clear_bit(struct mcam_camera *cam,
unsigned int reg, unsigned int val)
{
mcam_reg_write_mask(cam, reg, 0, val);
}
static inline void mcam_reg_set_bit(struct mcam_camera *cam,
unsigned int reg, unsigned int val)
{
mcam_reg_write_mask(cam, reg, val, val);
}
/*
* Functions for use by platform code.
*/
int mccic_register(struct mcam_camera *cam);
int mccic_irq(struct mcam_camera *cam, unsigned int irqs);
void mccic_shutdown(struct mcam_camera *cam);
#ifdef CONFIG_PM
void mccic_suspend(struct mcam_camera *cam);
int mccic_resume(struct mcam_camera *cam);
#endif
/*
* Register definitions for the m88alp01 camera interface. Offsets in bytes
* as given in the spec.
*/
#define REG_Y0BAR 0x00
#define REG_Y1BAR 0x04
#define REG_Y2BAR 0x08
#define REG_U0BAR 0x0c
#define REG_U1BAR 0x10
#define REG_U2BAR 0x14
#define REG_V0BAR 0x18
#define REG_V1BAR 0x1C
#define REG_V2BAR 0x20
/*
* register definitions for MIPI support
*/
#define REG_CSI2_CTRL0 0x100
#define CSI2_C0_MIPI_EN (0x1 << 0)
#define CSI2_C0_ACT_LANE(n) ((n-1) << 1)
#define REG_CSI2_DPHY3 0x12c
#define REG_CSI2_DPHY5 0x134
#define REG_CSI2_DPHY6 0x138
/* ... */
#define REG_IMGPITCH 0x24 /* Image pitch register */
#define IMGP_YP_SHFT 2 /* Y pitch params */
#define IMGP_YP_MASK 0x00003ffc /* Y pitch field */
#define IMGP_UVP_SHFT 18 /* UV pitch (planar) */
#define IMGP_UVP_MASK 0x3ffc0000
#define REG_IRQSTATRAW 0x28 /* RAW IRQ Status */
#define IRQ_EOF0 0x00000001 /* End of frame 0 */
#define IRQ_EOF1 0x00000002 /* End of frame 1 */
#define IRQ_EOF2 0x00000004 /* End of frame 2 */
#define IRQ_SOF0 0x00000008 /* Start of frame 0 */
#define IRQ_SOF1 0x00000010 /* Start of frame 1 */
#define IRQ_SOF2 0x00000020 /* Start of frame 2 */
#define IRQ_OVERFLOW 0x00000040 /* FIFO overflow */
#define IRQ_TWSIW 0x00010000 /* TWSI (smbus) write */
#define IRQ_TWSIR 0x00020000 /* TWSI read */
#define IRQ_TWSIE 0x00040000 /* TWSI error */
#define TWSIIRQS (IRQ_TWSIW|IRQ_TWSIR|IRQ_TWSIE)
#define FRAMEIRQS (IRQ_EOF0|IRQ_EOF1|IRQ_EOF2|IRQ_SOF0|IRQ_SOF1|IRQ_SOF2)
#define ALLIRQS (TWSIIRQS|FRAMEIRQS|IRQ_OVERFLOW)
#define REG_IRQMASK 0x2c /* IRQ mask - same bits as IRQSTAT */
#define REG_IRQSTAT 0x30 /* IRQ status / clear */
#define REG_IMGSIZE 0x34 /* Image size */
#define IMGSZ_V_MASK 0x1fff0000
#define IMGSZ_V_SHIFT 16
#define IMGSZ_H_MASK 0x00003fff
#define REG_IMGOFFSET 0x38 /* IMage offset */
#define REG_CTRL0 0x3c /* Control 0 */
#define C0_ENABLE 0x00000001 /* Makes the whole thing go */
/* Mask for all the format bits */
#define C0_DF_MASK 0x00fffffc /* Bits 2-23 */
/* RGB ordering */
#define C0_RGB4_RGBX 0x00000000
#define C0_RGB4_XRGB 0x00000004
#define C0_RGB4_BGRX 0x00000008
#define C0_RGB4_XBGR 0x0000000c
#define C0_RGB5_RGGB 0x00000000
#define C0_RGB5_GRBG 0x00000004
#define C0_RGB5_GBRG 0x00000008
#define C0_RGB5_BGGR 0x0000000c
/* Spec has two fields for DIN and DOUT, but they must match, so
combine them here. */
#define C0_DF_YUV 0x00000000 /* Data is YUV */
#define C0_DF_RGB 0x000000a0 /* ... RGB */
#define C0_DF_BAYER 0x00000140 /* ... Bayer */
/* 8-8-8 must be missing from the below - ask */
#define C0_RGBF_565 0x00000000
#define C0_RGBF_444 0x00000800
#define C0_RGB_BGR 0x00001000 /* Blue comes first */
#define C0_YUV_PLANAR 0x00000000 /* YUV 422 planar format */
#define C0_YUV_PACKED 0x00008000 /* YUV 422 packed */
#define C0_YUV_420PL 0x0000a000 /* YUV 420 planar */
/* Think that 420 packed must be 111 - ask */
#define C0_YUVE_YUYV 0x00000000 /* Y1CbY0Cr */
#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
#define C0_YUVE_NOSWAP 0x00000000 /* no bytes swapping */
#define C0_YUVE_SWAP13 0x00010000 /* swap byte 1 and 3 */
#define C0_YUVE_SWAP24 0x00020000 /* swap byte 2 and 4 */
#define C0_YUVE_SWAP1324 0x00030000 /* swap bytes 1&3 and 2&4 */
/* Bayer bits 18,19 if needed */
#define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */
#define C0_VEDGE_CTRL 0x00800000 /* Detect falling edge of VSYNC */
#define C0_HPOL_LOW 0x01000000 /* HSYNC polarity active low */
#define C0_VPOL_LOW 0x02000000 /* VSYNC polarity active low */
#define C0_VCLK_LOW 0x04000000 /* VCLK on falling edge */
#define C0_DOWNSCALE 0x08000000 /* Enable downscaler */
/* SIFMODE */
#define C0_SIF_HVSYNC 0x00000000 /* Use H/VSYNC */
#define C0_SOF_NOSYNC 0x40000000 /* Use inband active signaling */
#define C0_SIFM_MASK 0xc0000000 /* SIF mode bits */
/* Bits below C1_444ALPHA are not present in Cafe */
#define REG_CTRL1 0x40 /* Control 1 */
#define C1_CLKGATE 0x00000001 /* Sensor clock gate */
#define C1_DESC_ENA 0x00000100 /* DMA descriptor enable */
#define C1_DESC_3WORD 0x00000200 /* Three-word descriptors used */
#define C1_444ALPHA 0x00f00000 /* Alpha field in RGB444 */
#define C1_ALPHA_SHFT 20
#define C1_DMAB32 0x00000000 /* 32-byte DMA burst */
#define C1_DMAB16 0x02000000 /* 16-byte DMA burst */
#define C1_DMAB64 0x04000000 /* 64-byte DMA burst */
#define C1_DMAB_MASK 0x06000000
#define C1_TWOBUFS 0x08000000 /* Use only two DMA buffers */
#define C1_PWRDWN 0x10000000 /* Power down */
#define REG_CLKCTRL 0x88 /* Clock control */
#define CLK_DIV_MASK 0x0000ffff /* Upper bits RW "reserved" */
/* This appears to be a Cafe-only register */
#define REG_UBAR 0xc4 /* Upper base address register */
/* Armada 610 DMA descriptor registers */
#define REG_DMA_DESC_Y 0x200
#define REG_DMA_DESC_U 0x204
#define REG_DMA_DESC_V 0x208
#define REG_DESC_LEN_Y 0x20c /* Lengths are in bytes */
#define REG_DESC_LEN_U 0x210
#define REG_DESC_LEN_V 0x214
/*
* Useful stuff that probably belongs somewhere global.
*/
#define VGA_WIDTH 640
#define VGA_HEIGHT 480
#endif /* _MCAM_CORE_H */
| gpl-2.0 |
paericksen/broadworks-ocip | src/BroadworksOCIP/api/Rel_17_sp4_1_197_OCISchemaAS/OCISchemaDataTypes/ExtendedFileResourceSelection.php | 703 | <?php
/**
* This file is part of http://github.com/LukeBeer/BroadworksOCIP
*
* (c) 2013-2015 Luke Berezynskyj <[email protected]>
*/
namespace BroadworksOCIP\api\Rel_17_sp4_1_197_OCISchemaAS\OCISchemaDataTypes;
use BroadworksOCIP\Builder\Types\SimpleType;
use BroadworksOCIP\Builder\Restrictions\Enumeration;
/**
* Choices for extended file resource usage.
*/
class ExtendedFileResourceSelection extends SimpleType
{
public $elementName = "ExtendedFileResourceSelection";
public function __construct($value) {
$this->setElementValue($value);
$this->addRestriction(new Enumeration([
'File',
'URL',
'Default'
]));
}
}
| gpl-2.0 |
ev3dev/u-boot | board/freescale/common/arm_sleep.c | 2754 | // SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2014 Freescale Semiconductor, Inc.
*/
#include <common.h>
#include <asm/io.h>
#ifndef CONFIG_ARMV7_NONSEC
#error " Deep sleep needs non-secure mode support. "
#else
#include <asm/secure.h>
#endif
#include <asm/armv7.h>
#if defined(CONFIG_ARCH_LS1021A)
#include <asm/arch/immap_ls102xa.h>
#endif
#include "sleep.h"
#ifdef CONFIG_U_QE
#include <fsl_qe.h>
#endif
DECLARE_GLOBAL_DATA_PTR;
void __weak board_mem_sleep_setup(void)
{
}
void __weak board_sleep_prepare(void)
{
}
bool is_warm_boot(void)
{
struct ccsr_gur __iomem *gur = (void *)CONFIG_SYS_FSL_GUTS_ADDR;
if (in_be32(&gur->crstsr) & DCFG_CCSR_CRSTSR_WDRFR)
return 1;
return 0;
}
void fsl_dp_disable_console(void)
{
gd->flags |= GD_FLG_SILENT | GD_FLG_DISABLE_CONSOLE;
}
/*
* When wakeup from deep sleep, the first 128 bytes space
* will be used to do DDR training which corrupts the data
* in there. This function will restore them.
*/
static void dp_ddr_restore(void)
{
u64 *src, *dst;
int i;
struct ccsr_scfg __iomem *scfg = (void *)CONFIG_SYS_FSL_SCFG_ADDR;
/* get the address of ddr date from SPARECR3 */
src = (u64 *)in_le32(&scfg->sparecr[2]);
dst = (u64 *)CONFIG_SYS_SDRAM_BASE;
for (i = 0; i < DDR_BUFF_LEN / 8; i++)
*dst++ = *src++;
}
#if defined(CONFIG_ARMV7_PSCI) && defined(CONFIG_ARCH_LS1021A)
void ls1_psci_resume_fixup(void)
{
u32 tmp;
struct ccsr_scfg __iomem *scfg = (void *)CONFIG_SYS_FSL_SCFG_ADDR;
#ifdef QIXIS_BASE
void *qixis_base = (void *)QIXIS_BASE;
/* Pull on PCIe RST# */
out_8(qixis_base + QIXIS_RST_FORCE_3, 0);
/* disable deep sleep signals in FPGA */
tmp = in_8(qixis_base + QIXIS_PWR_CTL2);
tmp &= ~QIXIS_PWR_CTL2_PCTL;
out_8(qixis_base + QIXIS_PWR_CTL2, tmp);
#endif
/* Disable wakeup interrupt during deep sleep */
out_be32(&scfg->pmcintecr, 0);
/* Clear PMC interrupt status */
out_be32(&scfg->pmcintsr, 0xffffffff);
/* Disable Warm Device Reset */
tmp = in_be32(&scfg->dpslpcr);
tmp &= ~SCFG_DPSLPCR_WDRR_EN;
out_be32(&scfg->dpslpcr, tmp);
}
#endif
static void dp_resume_prepare(void)
{
dp_ddr_restore();
board_sleep_prepare();
armv7_init_nonsec();
#ifdef CONFIG_U_QE
u_qe_resume();
#endif
#if defined(CONFIG_ARMV7_PSCI) && defined(CONFIG_ARCH_LS1021A)
ls1_psci_resume_fixup();
#endif
}
int fsl_dp_resume(void)
{
u32 start_addr;
void (*kernel_resume)(void);
struct ccsr_scfg __iomem *scfg = (void *)CONFIG_SYS_FSL_SCFG_ADDR;
if (!is_warm_boot())
return 0;
dp_resume_prepare();
/* Get the entry address and jump to kernel */
start_addr = in_le32(&scfg->sparecr[3]);
debug("Entry address is 0x%08x\n", start_addr);
kernel_resume = (void (*)(void))start_addr;
secure_ram_addr(_do_nonsec_entry)(kernel_resume, 0, 0, 0);
return 0;
}
| gpl-2.0 |
steilerDev/WhatToStudy | Source/src/de/steilerdev/whatToStudy/Exception/WhatToStudyException.java | 4019 | /**
* Copyright (C) 2015 Frank Steiler <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package de.steilerdev.whatToStudy.Exception;
/**
* This class is a application specific exception, always thrown and handled if an application specific error occurs.
* In general other exceptions are tried to be wrapped inside this exception, so the program only needs to handle these kind of exceptions.
*/
public class WhatToStudyException extends Exception
{
/**
* Constructs a new exception with {@code null} as its detail message. The cause is not initialized, and may
* subsequently be initialized by a call to {@link #initCause}.
*/
public WhatToStudyException()
{
super();
}
/**
* Constructs a new exception with the specified detail message. The cause is not initialized, and may subsequently
* be initialized by a call to {@link #initCause}.
*
* @param message the detail message. The detail message is saved for later retrieval by the {@link #getMessage()}
* method.
*/
public WhatToStudyException(String message)
{
super(message);
}
/**
* Constructs a new exception with the specified detail message and cause. <p>Note that the detail message
* associated with {@code cause} is <i>not</i> automatically incorporated in this exception's detail message.
*
* @param message the detail message (which is saved for later retrieval by the {@link #getMessage()} method).
* @param cause the cause (which is saved for later retrieval by the {@link #getCause()} method). (A
* <tt>null</tt> value is permitted, and indicates that the cause is nonexistent or unknown.)
* @since 1.4
*/
public WhatToStudyException(String message, Throwable cause)
{
super(message, cause);
}
/**
* Constructs a new exception with the specified cause and a detail message of <tt>(cause==null ? null :
* cause.toString())</tt> (which typically contains the class and detail message of <tt>cause</tt>). This
* constructor is useful for exceptions that are little more than wrappers for other throwables (for example, {@link
* java.security.PrivilegedActionException}).
*
* @param cause the cause (which is saved for later retrieval by the {@link #getCause()} method). (A <tt>null</tt>
* value is permitted, and indicates that the cause is nonexistent or unknown.)
* @since 1.4
*/
public WhatToStudyException(Throwable cause)
{
super(cause);
}
/**
* Constructs a new exception with the specified detail message, cause, suppression enabled or disabled, and
* writable stack trace enabled or disabled.
*
* @param message the detail message.
* @param cause the cause. (A {@code null} value is permitted, and indicates that the cause is
* nonexistent or unknown.)
* @param enableSuppression whether or not suppression is enabled or disabled
* @param writableStackTrace whether or not the stack trace should be writable
* @since 1.7
*/
protected WhatToStudyException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace)
{
super(message, cause, enableSuppression, writableStackTrace);
}
}
| gpl-2.0 |
venkatarajasekhar/wzgraphicsmods | tools/qwzm/wzmutils.c | 16210 | /*
This file is part of Warzone 2100.
Copyright (C) 2007-2009 Warzone Resurrection Project
Warzone 2100 is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Warzone 2100 is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Warzone 2100; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "wzmutils.h"
#include <png.h>
#if (defined(WIN64) || defined(_WIN64) || defined(__WIN64__) || defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__))
# define strcasecmp _stricmp
# define strncasecmp _strnicmp
# define inline __inline
# define alloca _alloca
# define fileno _fileno
#endif
void prepareModel(MODEL *psModel)
{
if (!psModel || !psModel->pixmap || psModel->pixmap->w <= 0 || psModel->pixmap->h <= 0 || !psModel->pixmap->pixels)
{
printf("Bad model passed to prepareModel!\n");
exit(EXIT_FAILURE);
}
glGenTextures(1, &psModel->texture);
glBindTexture(GL_TEXTURE_2D, psModel->texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, psModel->pixmap->w, psModel->pixmap->h, 0, GL_RGBA,
GL_UNSIGNED_BYTE, psModel->pixmap->pixels);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}
// Adjust the vector in vec1 with the vector to be in vec2 by fractional value in
// fraction which indicates how far we've come toward vec2. The result is put into
// the result vector.
static void interpolateVectors(Vector3f vec1, Vector3f vec2, Vector3f *result, double fraction)
{
result->x = vec1.x * (1.0 - fraction) + vec2.x * fraction;
result->y = vec1.y * (1.0 - fraction) + vec2.y * fraction;
result->z = vec1.z * (1.0 - fraction) + vec2.z * fraction;
}
static inline void drawMesh(MODEL *psModel, int now, int mesh)
{
MESH *psMesh = &psModel->mesh[mesh];
assert(psMesh);
if (psMesh->frameArray)
{
FRAME *psFrame = &psMesh->frameArray[psMesh->currentFrame];
FRAME *nextFrame = psFrame;
double fraction = 1.0f / (psFrame->timeSlice * 1000) * (now - psMesh->lastChange); // until next frame
Vector3f vec;
glPushMatrix(); // save matrix state
assert(psMesh->currentFrame < psMesh->frames);
if (psMesh->currentFrame == psMesh->frames - 1)
{
nextFrame = &psMesh->frameArray[0]; // wrap around
}
else
{
nextFrame = &psMesh->frameArray[psMesh->currentFrame + 1];
}
// Try to avoid crap drivers from taking down the entire system
assert(finitef(psFrame->translation.x) && finitef(psFrame->translation.y) && finitef(psFrame->translation.z));
assert(psFrame->rotation.x >= -360.0f && psFrame->rotation.y >= -360.0f && psFrame->rotation.z >= -360.0f);
assert(psFrame->rotation.x <= 360.0f && psFrame->rotation.y <= 360.0f && psFrame->rotation.z <= 360.0f);
// Translate
interpolateVectors(psFrame->translation, nextFrame->translation, &vec, fraction);
glTranslatef(vec.x, vec.z, vec.y); // z and y flipped
// Rotate
interpolateVectors(psFrame->rotation, nextFrame->rotation, &vec, fraction);
glRotatef(vec.x, 1, 0, 0);
glRotatef(vec.z, 0, 1, 0); // z and y flipped again...
glRotatef(vec.y, 0, 0, 1);
// Morph
if (!psMesh->teamColours)
{
psMesh->currentTextureArray = psFrame->textureArray;
}
}
glTexCoordPointer(2, GL_FLOAT, 0, psMesh->textureArray[psMesh->currentTextureArray]);
glVertexPointer(3, GL_FLOAT, 0, psMesh->vertexArray);
glDrawElements(GL_TRIANGLES, psMesh->faces * 3, GL_UNSIGNED_INT, psMesh->indexArray);
if (psMesh->frameArray)
{
glPopMatrix(); // restore position for next mesh
}
}
void drawModel(MODEL *psModel, int now, int selectedMesh)
{
int i;
assert(psModel && psModel->mesh);
// Update animation frames
for (i = 0; i < psModel->meshes; i++)
{
MESH *psMesh = &psModel->mesh[i];
FRAME *psFrame;
if (!psMesh->frameArray)
{
continue;
}
psFrame = &psMesh->frameArray[psMesh->currentFrame];
assert(psMesh->currentFrame < psMesh->frames && psMesh->currentFrame >= 0);
if (psFrame->timeSlice != 0 && psFrame->timeSlice * 1000 + psMesh->lastChange < now)
{
psMesh->lastChange = now;
psMesh->currentFrame++;
if (psMesh->currentFrame >= psMesh->frames)
{
psMesh->currentFrame = 0; // loop
}
}
}
// Draw model
glColor3f(1.0f, 1.0f, 1.0f);
if (selectedMesh >= 0 && selectedMesh < psModel->meshes)
{
drawMesh(psModel, now, selectedMesh);
}
else
{
for (i = 0; i < psModel->meshes; i++)
{
drawMesh(psModel, now, i);
}
}
}
MODEL *createModel(int meshes, int now)
{
MODEL *psModel = malloc(sizeof(MODEL));
int i;
psModel->meshes = meshes;
psModel->mesh = malloc(sizeof(MESH) * meshes);
psModel->texPath[0] = '\0';
psModel->pixmap = NULL;
for (i = 0; i < meshes; i++)
{
MESH *psMesh = &psModel->mesh[i];
int j;
psMesh->faces = 0;
psMesh->frames = 0;
psMesh->vertices = 0;
psMesh->textureArrays = 0;
psMesh->connectors = 0;
psMesh->teamColours = false;
psMesh->vertexArray = NULL;
psMesh->indexArray = NULL;
psMesh->connectorArray = NULL;
for (j = 0; j < MAX_TEXARRAYS; j++)
{
psMesh->textureArray[j] = NULL;
}
psMesh->frameArray = NULL;
psMesh->currentFrame = 0;
psMesh->lastChange = now;
psMesh->currentTextureArray = 0;
}
return psModel;
}
PIXMAP *readPixmap(const char *filename)
{
PIXMAP *gfx;
png_structp pngp;
png_infop infop;
png_uint_32 width, height;
png_int_32 y, stride;
int bit_depth, color_type, interlace_type;
FILE *fp;
png_bytep *row_pointers;
const unsigned int sig_length = 8;
unsigned char header[8];
unsigned char *image_data;
unsigned int result;
if (PNG_LIBPNG_VER_MAJOR != 1 || PNG_LIBPNG_VER_MINOR < 2)
{
printf("libpng 1.2.6 or higher required!\n");
exit(EXIT_FAILURE);
}
if (!(fp = fopen(filename, "rb")))
{
printf("%s won't open!\n", filename);
exit(EXIT_FAILURE);
}
result = fread(header, 1, sig_length, fp);
if (result != sig_length)
{
printf("Bad file %s\n", filename);
exit(EXIT_FAILURE);
}
if (png_sig_cmp(header, 0, sig_length))
{
printf("%s is not a PNG file!\n", filename);
exit(EXIT_FAILURE);
}
if (!(pngp = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL)))
{
printf("Failed creating PNG struct reading %s.\n", filename);
exit(EXIT_FAILURE);
}
if (!(infop = png_create_info_struct(pngp)))
{
printf("Failed creating PNG info struct reading %s.\n", filename);
exit(EXIT_FAILURE);
}
if (setjmp(pngp->jmpbuf))
{
printf("Failed while reading PNG file: %s\n", filename);
exit(EXIT_FAILURE);
}
png_init_io(pngp, fp);
png_set_sig_bytes(pngp, sig_length);
png_read_info(pngp, infop);
/* Transformations to ensure we end up with 32bpp, 4 channel RGBA */
png_set_strip_16(pngp);
png_set_gray_to_rgb(pngp);
png_set_packing(pngp);
png_set_palette_to_rgb(pngp);
png_set_tRNS_to_alpha(pngp);
png_set_filler(pngp, 0xFF, PNG_FILLER_AFTER);
png_set_gray_1_2_4_to_8(pngp);
png_read_update_info(pngp, infop);
png_get_IHDR(pngp, infop, &width, &height, &bit_depth, &color_type, &interlace_type, NULL, NULL);
stride = png_get_rowbytes(pngp, infop);
row_pointers = malloc(sizeof(png_bytep) * height);
image_data = malloc(height * width * 4);
for (y = 0; y < (int)height; y++)
{
row_pointers[y] = image_data + (y * width * 4);
}
png_read_image(pngp, row_pointers);
png_read_end(pngp, infop);
fclose(fp);
gfx = malloc(sizeof(*gfx));
gfx->w = width;
gfx->h = height;
gfx->pixels = (char *)image_data;
png_destroy_read_struct(&pngp, &infop, NULL);
free(row_pointers);
return gfx;
}
int saveModel(const char *filename, MODEL *psModel)
{
FILE *fp = fopen(filename, "w");
int mesh;
if (!fp)
{
fprintf(stderr, "Cannot open \"%s\" for writing: %s", filename, strerror(errno));
return -1;
}
fprintf(fp, "WZM %d\n", 1);
fprintf(fp, "TEXTURE %s\n", psModel->texPath);
fprintf(fp, "MESHES %d", psModel->meshes);
for (mesh = 0; mesh < psModel->meshes; mesh++)
{
MESH *psMesh = &psModel->mesh[mesh];
int j;
fprintf(fp, "\nMESH %d\n", mesh);
fprintf(fp, "TEAMCOLOURS %d\n", psMesh->teamColours);
fprintf(fp, "VERTICES %d\n", psMesh->vertices);
fprintf(fp, "FACES %d\n", psMesh->faces);
fprintf(fp, "VERTEXARRAY\n");
for (j = 0; j < psMesh->vertices; j++)
{
GLfloat *v = &psMesh->vertexArray[j * 3];
fprintf(fp, "\t%g %g %g\n", v[0], v[1], v[2]);
}
fprintf(fp, "TEXTUREARRAYS %d", psMesh->textureArrays);
for (j = 0; j < psMesh->textureArrays; j++)
{
int k;
fprintf(fp, "\nTEXTUREARRAY %d", j);
for (k = 0; k < psMesh->vertices; k++)
{
GLfloat *v = &psMesh->textureArray[j][k * 2];
fprintf(fp, "\n\t%g %g", v[0], v[1]);
}
}
fprintf(fp, "\nINDEXARRAY");
for (j = 0; j < psMesh->faces; j++)
{
GLuint *v = &psMesh->indexArray[j * 3];
fprintf(fp, "\n\t%u %u %u", v[0], v[1], v[2]);
}
fprintf(fp, "\nFRAMES %d", psMesh->frames);
// Read animation frames
for (j = 0; j < psMesh->frames; j++)
{
FRAME *psFrame = &psMesh->frameArray[j];
fprintf(fp, "\n\t%g %d %g %g %g %g %g %g", psFrame->timeSlice, psFrame->textureArray,
psFrame->translation.x, psFrame->translation.y, psFrame->translation.z,
psFrame->rotation.x, psFrame->rotation.y, psFrame->rotation.z);
}
fprintf(fp, "\nCONNECTORS %d", psMesh->connectors);
for (j = 0; j < psMesh->connectors; j++)
{
CONNECTOR *conn = &psMesh->connectorArray[j];
fprintf(fp, "\n\t%g %g %g 0", conn->pos.x, conn->pos.y, conn->pos.z);
}
}
fclose(fp);
return 0;
}
MODEL *readModel(const char *filename, int now)
{
FILE *fp = fopen(filename, "r");
int num, x, meshes, mesh, version;
char s[200];
MODEL *psModel;
if (!fp)
{
fprintf(stderr, "Cannot open \"%s\" for reading: %s", filename, strerror(errno));
return NULL;
}
num = fscanf(fp, "WZM %d\n", &version);
if (num != 1)
{
fprintf(stderr, "Bad WZM file or wrong version: %s\n", filename);
fclose(fp);
return NULL;
}
if (version != 1 && version != 2)
{
fprintf(stderr, "Bad WZM version %d in %s\n", version, filename);
fclose(fp);
return NULL;
}
num = fscanf(fp, "TEXTURE %s\n", s);
if (num != 1)
{
fprintf(stderr, "Bad TEXTURE directive in %s\n", filename);
fclose(fp);
return NULL;
}
num = fscanf(fp, "MESHES %d", &meshes);
if (num != 1)
{
fprintf(stderr, "Bad MESHES directive in %s\n", filename);
fclose(fp);
return NULL;
}
psModel = createModel(meshes, now);
strcpy(psModel->texPath, s);
for (mesh = 0; mesh < meshes; mesh++)
{
MESH *psMesh = &psModel->mesh[mesh];
int j;
num = fscanf(fp, "\nMESH %s\n", s);
if (num != 1)
{
fprintf(stderr, "Bad MESH directive in %s, was \"%s\".\n", filename, s);
fclose(fp);
freeModel(psModel);
return NULL;
}
num = fscanf(fp, "TEAMCOLOURS %d\n", &x);
if (num != 1 || x > 1 || x < 0)
{
fprintf(stderr, "Bad TEAMCOLOURS directive in %s, mesh %d.\n", filename, mesh);
fclose(fp);
freeModel(psModel);
return NULL;
}
psMesh->teamColours = x;
num = fscanf(fp, "VERTICES %d\n", &x);
if (num != 1 || x < 0)
{
fprintf(stderr, "Bad VERTICES directive in %s, mesh %d.\n", filename, mesh);
fclose(fp);
freeModel(psModel);
return NULL;
}
psMesh->vertices = x;
psMesh->vertexArray = malloc(sizeof(GLfloat) * x * 3);
num = fscanf(fp, "FACES %d\n", &x);
if (num != 1)
{
fprintf(stderr, "Bad VERTICES directive in %s, mesh %d.\n", filename, mesh);
fclose(fp);
freeModel(psModel);
return NULL;
}
psMesh->faces = x;
psMesh->indexArray = malloc(sizeof(GLuint) * x * 3);
num = fscanf(fp, "VERTEXARRAY");
if (num == EOF)
{
fprintf(stderr, "No VERTEXARRAY directive in %s, mesh %d.\n", filename, mesh);
fclose(fp);
freeModel(psModel);
return NULL;
}
for (j = 0; j < psMesh->vertices; j++)
{
GLfloat *v = &psMesh->vertexArray[j * 3];
num = fscanf(fp, "%f %f %f\n", &v[0], &v[1], &v[2]);
if (num != 3)
{
fprintf(stderr, "Bad VERTEXARRAY entry mesh %d, number %d\n", mesh, j);
fclose(fp);
freeModel(psModel);
return NULL;
}
}
num = fscanf(fp, "TEXTUREARRAYS %d", &x);
if (num != 1 || x < 0)
{
fprintf(stderr, "Bad TEXTUREARRAYS directive in %s, mesh %d.\n", filename, mesh);
fclose(fp);
freeModel(psModel);
return NULL;
}
psMesh->textureArrays = x;
for (j = 0; j < psMesh->textureArrays; j++)
{
int k;
num = fscanf(fp, "\nTEXTUREARRAY %d", &x);
if (num != 1 || x < 0 || x != j)
{
fprintf(stderr, "Bad TEXTUREARRAY directive in %s, mesh %d, array %d.\n", filename, mesh, j);
fclose(fp);
freeModel(psModel);
return NULL;
}
psMesh->textureArray[j] = malloc(sizeof(GLfloat) * psMesh->vertices * 2);
for (k = 0; k < psMesh->vertices; k++)
{
GLfloat *v = &psMesh->textureArray[j][k * 2];
num = fscanf(fp, "\n%f %f", &v[0], &v[1]);
if (num != 2)
{
fprintf(stderr, "Bad TEXTUREARRAY entry mesh %d, array %d, number %d\n", mesh, j, k);
fclose(fp);
freeModel(psModel);
return NULL;
}
}
}
num = fscanf(fp, "\nINDEXARRAY");
if (num == EOF)
{
fprintf(stderr, "No INDEXARRAY directive in %s, mesh %d.\n", filename, mesh);
fclose(fp);
freeModel(psModel);
return NULL;
}
for (j = 0; j < psMesh->faces; j++)
{
GLuint *v = &psMesh->indexArray[j * 3];
num = fscanf(fp, "\n%u %u %u", &v[0], &v[1], &v[2]);
if (num != 3)
{
fprintf(stderr, "Bad INDEXARRAY entry in mesh %d, number %d\n", mesh, j);
fclose(fp);
freeModel(psModel);
return NULL;
}
}
// Read animation frames
num = fscanf(fp, "\nFRAMES %d", &psMesh->frames);
if (num != 1 || psMesh->frames < 0)
{
fprintf(stderr, "Bad FRAMES directive in mesh %d\n", mesh);
fclose(fp);
freeModel(psModel);
return NULL;
}
if (psMesh->frames)
{
psMesh->frameArray = malloc(sizeof(FRAME) * psMesh->frames);
}
for (j = 0; j < psMesh->frames; j++)
{
FRAME *psFrame = &psMesh->frameArray[j];
num = fscanf(fp, "\n%f %d %f %f %f %f %f %f", &psFrame->timeSlice, &psFrame->textureArray,
&psFrame->translation.x, &psFrame->translation.y, &psFrame->translation.z,
&psFrame->rotation.x, &psFrame->rotation.y, &psFrame->rotation.z);
if (num != 8)
{
fprintf(stderr, "Bad FRAMES entry in mesh %d, number %d\n", mesh, j);
fclose(fp);
freeModel(psModel);
return NULL;
}
}
// Read connectors
num = fscanf(fp, "\nCONNECTORS %d", &psMesh->connectors);
if (num != 1 || psMesh->connectors < 0)
{
fprintf(stderr, "Bad CONNECTORS directive in mesh %d\n", mesh);
fclose(fp);
freeModel(psModel);
return NULL;
}
if (psMesh->connectors)
{
psMesh->connectorArray = malloc(sizeof(CONNECTOR) * psMesh->connectors);
}
for (j = 0; j < psMesh->connectors; j++)
{
CONNECTOR *conn = &psMesh->connectorArray[j];
int angle, angler1, angler2;
if (version == 1)
{
num = fscanf(fp, "\n%f %f %f %d", &conn->pos.x, &conn->pos.y, &conn->pos.z, &conn->type);
}
else if (version == 2)
{
num = fscanf(fp, "\n%s %f %f %f %d %d %d", s, &conn->pos.x, &conn->pos.y, &conn->pos.z, &angle, &angler1, &angler2);
conn->type = 0; // TODO
}
if (num != 4)
{
fprintf(stderr, "Bad CONNECTORS entry in mesh %d, number %d\n", mesh, j);
fclose(fp);
freeModel(psModel);
return NULL;
}
}
}
return psModel;
}
void freeModel(MODEL *psModel)
{
int i;
for (i = 0; i < psModel->meshes; i++)
{
MESH *psMesh = &psModel->mesh[i];
int j;
free(psMesh->vertexArray);
free(psMesh->indexArray);
for (j = 0; j < MAX_TEXARRAYS; j++)
{
free(psMesh->textureArray[j]);
}
free(psMesh->frameArray);
}
free(psModel->pixmap);
free(psModel->mesh);
free(psModel);
}
| gpl-2.0 |
adamwespiser/balapiller | SimpleIDE/Learn/Simple Libraries/Remote/libsirc/html/sirc_8h_source.html | 17142 | <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<title>sirc (SONY Infrared Remote Control) library: sirc.h Source File</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<td style="padding-left: 0.5em;">
<div id="projectname">sirc (SONY Infrared Remote Control) library
 <span id="projectnumber">v0.86</span>
</div>
<div id="projectbrief">Decodes SONY IR remote signals</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.1.2 -->
<div id="navrow1" class="tabs">
<ul class="tablist">
<li><a href="index.html"><span>Main Page</span></a></li>
<li class="current"><a href="files.html"><span>Files</span></a></li>
</ul>
</div>
<div id="navrow2" class="tabs2">
<ul class="tablist">
<li><a href="files.html"><span>File List</span></a></li>
<li><a href="globals.html"><span>Globals</span></a></li>
</ul>
</div>
</div><!-- top -->
<div class="header">
<div class="headertitle">
<div class="title">sirc.h</div> </div>
</div><!--header-->
<div class="contents">
<a href="sirc_8h.html">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span> </div>
<div class="line"><a name="l00017"></a><span class="lineno"> 17</span> <span class="preprocessor">#ifndef SONY_REMOTE_H</span></div>
<div class="line"><a name="l00018"></a><span class="lineno"> 18</span> <span class="preprocessor"></span><span class="preprocessor">#define SONY_REMOTE_H</span></div>
<div class="line"><a name="l00019"></a><span class="lineno"> 19</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00020"></a><span class="lineno"> 20</span> <span class="preprocessor">#if defined(__cplusplus)</span></div>
<div class="line"><a name="l00021"></a><span class="lineno"> 21</span> <span class="preprocessor"></span><span class="keyword">extern</span> <span class="stringliteral">"C"</span> {</div>
<div class="line"><a name="l00022"></a><span class="lineno"> 22</span> <span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00023"></a><span class="lineno"> 23</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00024"></a><span class="lineno"> 24</span> </div>
<div class="line"><a name="l00028"></a><span class="lineno"> 28</span> <span class="preprocessor">#ifndef ENTER</span></div>
<div class="line"><a name="l00029"></a><span class="lineno"><a class="code" href="sirc_8h.html#af4bced5cf8ed55746d4b5d34f9a0fe39"> 29</a></span> <span class="preprocessor"></span><span class="preprocessor">#define ENTER 11</span></div>
<div class="line"><a name="l00030"></a><span class="lineno"> 30</span> <span class="preprocessor"></span><span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00031"></a><span class="lineno"> 31</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00035"></a><span class="lineno"> 35</span> <span class="preprocessor">#ifndef CH_UP</span></div>
<div class="line"><a name="l00036"></a><span class="lineno"><a class="code" href="sirc_8h.html#adb10e817f63e0c48ef941f4403a15ec4"> 36</a></span> <span class="preprocessor"></span><span class="preprocessor">#define CH_UP 16</span></div>
<div class="line"><a name="l00037"></a><span class="lineno"> 37</span> <span class="preprocessor"></span><span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00038"></a><span class="lineno"> 38</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00042"></a><span class="lineno"> 42</span> <span class="preprocessor">#ifndef CH_DN</span></div>
<div class="line"><a name="l00043"></a><span class="lineno"><a class="code" href="sirc_8h.html#a17edeab5aa7ad2135492de1dd62ec3c8"> 43</a></span> <span class="preprocessor"></span><span class="preprocessor">#define CH_DN 17</span></div>
<div class="line"><a name="l00044"></a><span class="lineno"> 44</span> <span class="preprocessor"></span><span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00045"></a><span class="lineno"> 45</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00049"></a><span class="lineno"> 49</span> <span class="preprocessor">#ifndef VOL_UP</span></div>
<div class="line"><a name="l00050"></a><span class="lineno"><a class="code" href="sirc_8h.html#ab5ae3648183faf01244f975f6d4e8a59"> 50</a></span> <span class="preprocessor"></span><span class="preprocessor">#define VOL_UP 18</span></div>
<div class="line"><a name="l00051"></a><span class="lineno"> 51</span> <span class="preprocessor"></span><span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00052"></a><span class="lineno"> 52</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00056"></a><span class="lineno"> 56</span> <span class="preprocessor">#ifndef VOL_DN</span></div>
<div class="line"><a name="l00057"></a><span class="lineno"><a class="code" href="sirc_8h.html#a9ac68e13ad2f7d2adb550dd909bf2956"> 57</a></span> <span class="preprocessor"></span><span class="preprocessor">#define VOL_DN 19</span></div>
<div class="line"><a name="l00058"></a><span class="lineno"> 58</span> <span class="preprocessor"></span><span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00059"></a><span class="lineno"> 59</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00063"></a><span class="lineno"> 63</span> <span class="preprocessor">#ifndef MUTE</span></div>
<div class="line"><a name="l00064"></a><span class="lineno"><a class="code" href="sirc_8h.html#a1f61941701e0251593e01aec567e6ceb"> 64</a></span> <span class="preprocessor"></span><span class="preprocessor">#define MUTE 20</span></div>
<div class="line"><a name="l00065"></a><span class="lineno"> 65</span> <span class="preprocessor"></span><span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00066"></a><span class="lineno"> 66</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00070"></a><span class="lineno"> 70</span> <span class="preprocessor">#ifndef PWR</span></div>
<div class="line"><a name="l00071"></a><span class="lineno"><a class="code" href="sirc_8h.html#a04651c526497822a859942b928e57f8e"> 71</a></span> <span class="preprocessor"></span><span class="preprocessor">#define PWR 21</span></div>
<div class="line"><a name="l00072"></a><span class="lineno"> 72</span> <span class="preprocessor"></span><span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00073"></a><span class="lineno"> 73</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00077"></a><span class="lineno"> 77</span> <span class="preprocessor">#ifndef PREV_CH</span></div>
<div class="line"><a name="l00078"></a><span class="lineno"><a class="code" href="sirc_8h.html#a6b1fd1eb0ed6d2482f5b1005da01cdbe"> 78</a></span> <span class="preprocessor"></span><span class="preprocessor">#define PREV_CH 59</span></div>
<div class="line"><a name="l00079"></a><span class="lineno"> 79</span> <span class="preprocessor"></span><span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00080"></a><span class="lineno"> 80</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00085"></a><span class="lineno"> 85</span> <span class="preprocessor">#ifndef ARROW_UP</span></div>
<div class="line"><a name="l00086"></a><span class="lineno"><a class="code" href="sirc_8h.html#a016e68eaa6a8185455fc7a372055e623"> 86</a></span> <span class="preprocessor"></span><span class="preprocessor">#define ARROW_UP 53</span></div>
<div class="line"><a name="l00087"></a><span class="lineno"> 87</span> <span class="preprocessor"></span><span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00088"></a><span class="lineno"> 88</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00093"></a><span class="lineno"> 93</span> <span class="preprocessor">#ifndef ARROW_DN</span></div>
<div class="line"><a name="l00094"></a><span class="lineno"><a class="code" href="sirc_8h.html#a4991c25bfd003e68807cc21827246468"> 94</a></span> <span class="preprocessor"></span><span class="preprocessor">#define ARROW_DN 54</span></div>
<div class="line"><a name="l00095"></a><span class="lineno"> 95</span> <span class="preprocessor"></span><span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00096"></a><span class="lineno"> 96</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00101"></a><span class="lineno"> 101</span> <span class="preprocessor">#ifndef ARROW_L</span></div>
<div class="line"><a name="l00102"></a><span class="lineno"><a class="code" href="sirc_8h.html#a1b08407b9550a6b1b47bbde93a876c31"> 102</a></span> <span class="preprocessor"></span><span class="preprocessor">#define ARROW_L 52</span></div>
<div class="line"><a name="l00103"></a><span class="lineno"> 103</span> <span class="preprocessor"></span><span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00104"></a><span class="lineno"> 104</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00109"></a><span class="lineno"> 109</span> <span class="preprocessor">#ifndef ARROW_R</span></div>
<div class="line"><a name="l00110"></a><span class="lineno"><a class="code" href="sirc_8h.html#a283ef6bc20b33bb873147de077097b50"> 110</a></span> <span class="preprocessor"></span><span class="preprocessor">#define ARROW_R 51</span></div>
<div class="line"><a name="l00111"></a><span class="lineno"> 111</span> <span class="preprocessor"></span><span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00112"></a><span class="lineno"> 112</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00113"></a><span class="lineno"> 113</span> <span class="preprocessor">#include "simpletools.h"</span> <span class="comment">// Include simple tools</span></div>
<div class="line"><a name="l00114"></a><span class="lineno"> 114</span> </div>
<div class="line"><a name="l00120"></a><span class="lineno"> 120</span> <span class="keywordtype">void</span> sirc_setTimeout(<span class="keywordtype">int</span> ms);</div>
<div class="line"><a name="l00121"></a><span class="lineno"> 121</span> </div>
<div class="line"><a name="l00131"></a><span class="lineno"> 131</span> <span class="keywordtype">int</span> sirc_button(<span class="keywordtype">int</span> pin);</div>
<div class="line"><a name="l00132"></a><span class="lineno"> 132</span> </div>
<div class="line"><a name="l00141"></a><span class="lineno"> 141</span> <span class="keywordtype">int</span> sirc_device(<span class="keywordtype">int</span> pin);</div>
<div class="line"><a name="l00142"></a><span class="lineno"> 142</span> </div>
<div class="line"><a name="l00152"></a><span class="lineno"> 152</span> <span class="keywordtype">int</span> sirc_code(<span class="keywordtype">int</span> pin, <span class="keywordtype">int</span> bits);</div>
<div class="line"><a name="l00153"></a><span class="lineno"> 153</span> </div>
<div class="line"><a name="l00154"></a><span class="lineno"> 154</span> <span class="preprocessor">#if defined(__cplusplus)</span></div>
<div class="line"><a name="l00155"></a><span class="lineno"> 155</span> <span class="preprocessor"></span>}</div>
<div class="line"><a name="l00156"></a><span class="lineno"> 156</span> <span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00157"></a><span class="lineno"> 157</span> <span class="preprocessor"></span><span class="comment">/* __cplusplus */</span> </div>
<div class="line"><a name="l00158"></a><span class="lineno"> 158</span> <span class="preprocessor">#endif</span></div>
<div class="line"><a name="l00159"></a><span class="lineno"> 159</span> <span class="preprocessor"></span><span class="comment">/* SONY_REMOTE_H */</span> </div>
<div class="line"><a name="l00160"></a><span class="lineno"> 160</span> </div>
<div class="line"><a name="l00161"></a><span class="lineno"> 161</span> <span class="comment">/*</span></div>
<div class="line"><a name="l00162"></a><span class="lineno"> 162</span> <span class="comment"> * TERMS OF USE: MIT License</span></div>
<div class="line"><a name="l00163"></a><span class="lineno"> 163</span> <span class="comment"> *</span></div>
<div class="line"><a name="l00164"></a><span class="lineno"> 164</span> <span class="comment"> * Permission is hereby granted, free of charge, to any person obtaining a</span></div>
<div class="line"><a name="l00165"></a><span class="lineno"> 165</span> <span class="comment"> * copy of this software and associated documentation files (the "Software"),</span></div>
<div class="line"><a name="l00166"></a><span class="lineno"> 166</span> <span class="comment"> * to deal in the Software without restriction, including without limitation</span></div>
<div class="line"><a name="l00167"></a><span class="lineno"> 167</span> <span class="comment"> * the rights to use, copy, modify, merge, publish, distribute, sublicense,</span></div>
<div class="line"><a name="l00168"></a><span class="lineno"> 168</span> <span class="comment"> * and/or sell copies of the Software, and to permit persons to whom the</span></div>
<div class="line"><a name="l00169"></a><span class="lineno"> 169</span> <span class="comment"> * Software is furnished to do so, subject to the following conditions:</span></div>
<div class="line"><a name="l00170"></a><span class="lineno"> 170</span> <span class="comment"> *</span></div>
<div class="line"><a name="l00171"></a><span class="lineno"> 171</span> <span class="comment"> * The above copyright notice and this permission notice shall be included in</span></div>
<div class="line"><a name="l00172"></a><span class="lineno"> 172</span> <span class="comment"> * all copies or substantial portions of the Software.</span></div>
<div class="line"><a name="l00173"></a><span class="lineno"> 173</span> <span class="comment"> *</span></div>
<div class="line"><a name="l00174"></a><span class="lineno"> 174</span> <span class="comment"> * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR</span></div>
<div class="line"><a name="l00175"></a><span class="lineno"> 175</span> <span class="comment"> * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,</span></div>
<div class="line"><a name="l00176"></a><span class="lineno"> 176</span> <span class="comment"> * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL</span></div>
<div class="line"><a name="l00177"></a><span class="lineno"> 177</span> <span class="comment"> * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER</span></div>
<div class="line"><a name="l00178"></a><span class="lineno"> 178</span> <span class="comment"> * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING</span></div>
<div class="line"><a name="l00179"></a><span class="lineno"> 179</span> <span class="comment"> * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER</span></div>
<div class="line"><a name="l00180"></a><span class="lineno"> 180</span> <span class="comment"> * DEALINGS IN THE SOFTWARE.</span></div>
<div class="line"><a name="l00181"></a><span class="lineno"> 181</span> <span class="comment"> */</span></div>
<div class="line"><a name="l00182"></a><span class="lineno"> 182</span> </div>
</div><!-- fragment --></div><!-- contents -->
<!-- start footer part -->
<hr class="footer"/><address class="footer"><small>
Generated on Fri Nov 8 2013 17:42:04 for sirc (SONY Infrared Remote Control) library by  <a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/>
</a> 1.8.1.2
</small></address>
</body>
</html>
| gpl-2.0 |
TECLIB/glpi | inc/ruledictionnarymonitormodel.class.php | 2216 | <?php
/*
* @version $Id$
-------------------------------------------------------------------------
GLPI - Gestionnaire Libre de Parc Informatique
Copyright (C) 2015-2016 Teclib'.
http://glpi-project.org
based on GLPI - Gestionnaire Libre de Parc Informatique
Copyright (C) 2003-2014 by the INDEPNET Development Team.
-------------------------------------------------------------------------
LICENSE
This file is part of GLPI.
GLPI is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
GLPI is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GLPI. If not, see <http://www.gnu.org/licenses/>.
--------------------------------------------------------------------------
*/
/** @file
* @brief
*/
class RuleDictionnaryMonitorModel extends RuleDictionnaryDropdown {
/**
* Constructor
**/
function __construct() {
parent::__construct('RuleDictionnaryMonitorModel');
}
/**
* @see Rule::getCriterias()
**/
function getCriterias() {
static $criterias = array();
if (count($criterias)) {
return $criterias;
}
$criterias['name']['field'] = 'name';
$criterias['name']['name'] = __('Model');
$criterias['name']['table'] = 'glpi_monitormodels';
$criterias['manufacturer']['field'] = 'name';
$criterias['manufacturer']['name'] = __('Manufacturer');
$criterias['manufacturer']['table'] = 'glpi_manufacturers';
return $criterias;
}
/**
* @see Rule::getActions()
**/
function getActions() {
$actions = array();
$actions['name']['name'] = __('Model');
$actions['name']['force_actions'] = array('append_regex_result', 'assign','regex_result');
return $actions;
}
}
| gpl-2.0 |
pruiz/asterisk | codecs/codec_g722.c | 6105 | /*
* Asterisk -- An open source telephony toolkit.
*
* Copyright (C) 1999 - 2008, Digium, Inc.
*
* Matthew Fredrickson <[email protected]>
* Russell Bryant <[email protected]>
*
* Special thanks to Steve Underwood for the implementation
* and for doing the 8khz<->g.722 direct translation code.
*
* See http://www.asterisk.org for more information about
* the Asterisk project. Please do not directly contact
* any of the maintainers of this project for assistance;
* the project provides a web site, mailing lists and IRC
* channels for your use.
*
* This program is free software, distributed under the terms of
* the GNU General Public License Version 2. See the LICENSE file
* at the top of the source tree.
*/
/*! \file
*
* \brief codec_g722.c - translate between signed linear and ITU G.722-64kbps
*
* \author Matthew Fredrickson <[email protected]>
* \author Russell Bryant <[email protected]>
*
* \arg http://soft-switch.org/downloads/non-gpl-bits.tgz
* \arg http://lists.digium.com/pipermail/asterisk-dev/2006-September/022866.html
*
* \ingroup codecs
*/
/*** MODULEINFO
<support_level>core</support_level>
***/
#include "asterisk.h"
ASTERISK_FILE_VERSION(__FILE__, "$Revision$")
#include "asterisk/linkedlists.h"
#include "asterisk/module.h"
#include "asterisk/config.h"
#include "asterisk/translate.h"
#include "asterisk/utils.h"
#define BUFFER_SAMPLES 8096 /* size for the translation buffers */
#define BUF_SHIFT 5
#include "g722/g722.h"
/* Sample frame data */
#include "asterisk/slin.h"
#include "ex_g722.h"
struct g722_encoder_pvt {
g722_encode_state_t g722;
};
struct g722_decoder_pvt {
g722_decode_state_t g722;
};
/*! \brief init a new instance of g722_encoder_pvt. */
static int lintog722_new(struct ast_trans_pvt *pvt)
{
struct g722_encoder_pvt *tmp = pvt->pvt;
g722_encode_init(&tmp->g722, 64000, G722_SAMPLE_RATE_8000);
return 0;
}
static int lin16tog722_new(struct ast_trans_pvt *pvt)
{
struct g722_encoder_pvt *tmp = pvt->pvt;
g722_encode_init(&tmp->g722, 64000, 0);
return 0;
}
/*! \brief init a new instance of g722_encoder_pvt. */
static int g722tolin_new(struct ast_trans_pvt *pvt)
{
struct g722_decoder_pvt *tmp = pvt->pvt;
g722_decode_init(&tmp->g722, 64000, G722_SAMPLE_RATE_8000);
return 0;
}
static int g722tolin16_new(struct ast_trans_pvt *pvt)
{
struct g722_decoder_pvt *tmp = pvt->pvt;
g722_decode_init(&tmp->g722, 64000, 0);
return 0;
}
static int g722tolin_framein(struct ast_trans_pvt *pvt, struct ast_frame *f)
{
struct g722_decoder_pvt *tmp = pvt->pvt;
int out_samples;
int in_samples;
/* g722_decode expects the samples to be in the invalid samples / 2 format */
in_samples = f->samples / 2;
out_samples = g722_decode(&tmp->g722, &pvt->outbuf.i16[pvt->samples * sizeof(int16_t)],
(uint8_t *) f->data.ptr, in_samples);
pvt->samples += out_samples;
pvt->datalen += (out_samples * sizeof(int16_t));
return 0;
}
static int lintog722_framein(struct ast_trans_pvt *pvt, struct ast_frame *f)
{
struct g722_encoder_pvt *tmp = pvt->pvt;
int outlen;
outlen = g722_encode(&tmp->g722, (&pvt->outbuf.ui8[pvt->datalen]),
(int16_t *) f->data.ptr, f->samples);
pvt->samples += outlen * 2;
pvt->datalen += outlen;
return 0;
}
static struct ast_translator g722tolin = {
.name = "g722tolin",
.src_codec = {
.name = "g722",
.type = AST_MEDIA_TYPE_AUDIO,
.sample_rate = 16000,
},
.dst_codec = {
.name = "slin",
.type = AST_MEDIA_TYPE_AUDIO,
.sample_rate = 8000,
},
.format = "slin",
.newpvt = g722tolin_new, /* same for both directions */
.framein = g722tolin_framein,
.sample = g722_sample,
.desc_size = sizeof(struct g722_decoder_pvt),
.buffer_samples = BUFFER_SAMPLES / sizeof(int16_t),
.buf_size = BUFFER_SAMPLES,
};
static struct ast_translator lintog722 = {
.name = "lintog722",
.src_codec = {
.name = "slin",
.type = AST_MEDIA_TYPE_AUDIO,
.sample_rate = 8000,
},
.dst_codec = {
.name = "g722",
.type = AST_MEDIA_TYPE_AUDIO,
.sample_rate = 16000,
},
.format = "g722",
.newpvt = lintog722_new, /* same for both directions */
.framein = lintog722_framein,
.sample = slin8_sample,
.desc_size = sizeof(struct g722_encoder_pvt),
.buffer_samples = BUFFER_SAMPLES * 2,
.buf_size = BUFFER_SAMPLES,
};
static struct ast_translator g722tolin16 = {
.name = "g722tolin16",
.src_codec = {
.name = "g722",
.type = AST_MEDIA_TYPE_AUDIO,
.sample_rate = 16000,
},
.dst_codec = {
.name = "slin",
.type = AST_MEDIA_TYPE_AUDIO,
.sample_rate = 16000,
},
.format = "slin16",
.newpvt = g722tolin16_new, /* same for both directions */
.framein = g722tolin_framein,
.sample = g722_sample,
.desc_size = sizeof(struct g722_decoder_pvt),
.buffer_samples = BUFFER_SAMPLES / sizeof(int16_t),
.buf_size = BUFFER_SAMPLES,
};
static struct ast_translator lin16tog722 = {
.name = "lin16tog722",
.src_codec = {
.name = "slin",
.type = AST_MEDIA_TYPE_AUDIO,
.sample_rate = 16000,
},
.dst_codec = {
.name = "g722",
.type = AST_MEDIA_TYPE_AUDIO,
.sample_rate = 16000,
},
.format = "g722",
.newpvt = lin16tog722_new, /* same for both directions */
.framein = lintog722_framein,
.sample = slin16_sample,
.desc_size = sizeof(struct g722_encoder_pvt),
.buffer_samples = BUFFER_SAMPLES * 2,
.buf_size = BUFFER_SAMPLES,
};
static int unload_module(void)
{
int res = 0;
res |= ast_unregister_translator(&g722tolin);
res |= ast_unregister_translator(&lintog722);
res |= ast_unregister_translator(&g722tolin16);
res |= ast_unregister_translator(&lin16tog722);
return res;
}
static int load_module(void)
{
int res = 0;
res |= ast_register_translator(&g722tolin);
res |= ast_register_translator(&lintog722);
res |= ast_register_translator(&g722tolin16);
res |= ast_register_translator(&lin16tog722);
if (res) {
unload_module();
return AST_MODULE_LOAD_FAILURE;
}
return AST_MODULE_LOAD_SUCCESS;
}
AST_MODULE_INFO(ASTERISK_GPL_KEY, AST_MODFLAG_DEFAULT, "ITU G.722-64kbps G722 Transcoder",
.support_level = AST_MODULE_SUPPORT_CORE,
.load = load_module,
.unload = unload_module,
);
| gpl-2.0 |
unfinite/Alphasquare | src/application/models/employee_model.php | 2577 | <?php
if ( ! defined('BASEPATH')) exit('No direct script access allowed');
/**
* Employee Model
* Check if user is staff, perform basic moderation actions
* @package Models
*/
class Employee_model extends CI_Model {
/**
* Allow loggedin user access if session var
*
* @return bool
*
*/
public function allow_access() {
// check sess vars
$staff = $this->php_session->get('employee');
if ($staff == 0) {
return false;
} else {
return true;
}
}
/**
* Check if user is staff
*
* @param string $username The user's username
* @return bool If the user is staff or not
*
*/
public function is_staff($username) {
// query db to check if user is employee
$this->db->select('employee')
->from('users')
->where('username', $username);
$result = $this->db->get()->row_array();
// return 0 or 1
switch($result['employee']) {
case 1:
return true;
case 0:
return false;
}
}
/**
* Get all users
*
* @return array, user's data
*
*/
public function get_users() {
// query db to list usernames and stuff
$this->db->select('name, username, official, employee, avatar, points, email, id')
->from('users');
return $this->db->get()->result_array();
}
/**
* Delete user
*
* @return boolean, status of deletion
*
*/
public function delete($id) {
// query db
if (is_numeric($id)) {
$this->db->select('employee')
->from('users')
->where('id', $id);
$arr = $this->db->get()->row_array();
$status = $arr['employee'];
if($status == 1) {
return false;
} else {
$this->db->delete('users', array('id'=>$id));
return true;
}
}
}
public function delete_f($id) {
if (is_numeric($id)) {
return $this->db->delete('users', array('id'=>$id));
} else {
return false;
}
}
public function delete_post($id) {
return $this->db->delete('debates', array('id'=>$id));
}
public function official($id) {
if (is_numeric($id)) {
$this->db->where('id', $id);
$st = 1;
$this->db->update('users', array('official'=>$st));
}
}
public function staff($id) {
if (is_numeric($id)) {
$this->db->where('id', $id);
$st = 1;
$this->db->update('users', array('employee'=>$st));
}
}
}
?> | gpl-2.0 |
bharcode/MachineLearning | CustomMahout/core/src/main/java/org/apache/mahout/cf/taste/hadoop/item/AggregateAndRecommendReducer.java | 8512 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mahout.cf.taste.hadoop.item;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.mahout.cf.taste.hadoop.MutableRecommendedItem;
import org.apache.mahout.cf.taste.hadoop.RecommendedItemsWritable;
import org.apache.mahout.cf.taste.hadoop.TasteHadoopUtils;
import org.apache.mahout.cf.taste.hadoop.TopItemsQueue;
import org.apache.mahout.cf.taste.impl.common.FastIDSet;
import org.apache.mahout.cf.taste.recommender.RecommendedItem;
import org.apache.mahout.common.HadoopUtil;
import org.apache.mahout.common.iterator.FileLineIterable;
import org.apache.mahout.math.RandomAccessSparseVector;
import org.apache.mahout.math.VarLongWritable;
import org.apache.mahout.math.Vector;
import org.apache.mahout.math.Vector.Element;
import org.apache.mahout.math.function.Functions;
import org.apache.mahout.math.map.OpenIntLongHashMap;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p>computes prediction values for each user</p>
*
* <pre>
* u = a user
* i = an item not yet rated by u
* N = all items similar to i (where similarity is usually computed by pairwisely comparing the item-vectors
* of the user-item matrix)
*
* Prediction(u,i) = sum(all n from N: similarity(i,n) * rating(u,n)) / sum(all n from N: abs(similarity(i,n)))
* </pre>
*/
public final class AggregateAndRecommendReducer extends
Reducer<VarLongWritable,PrefAndSimilarityColumnWritable,VarLongWritable,RecommendedItemsWritable> {
private static final Logger log = LoggerFactory.getLogger(AggregateAndRecommendReducer.class);
static final String ITEMID_INDEX_PATH = "itemIDIndexPath";
static final String NUM_RECOMMENDATIONS = "numRecommendations";
static final int DEFAULT_NUM_RECOMMENDATIONS = 10;
static final String ITEMS_FILE = "itemsFile";
private boolean booleanData;
private int recommendationsPerUser;
private FastIDSet itemsToRecommendFor;
private OpenIntLongHashMap indexItemIDMap;
private final RecommendedItemsWritable recommendedItems = new RecommendedItemsWritable();
private static final float BOOLEAN_PREF_VALUE = 1.0f;
@Override
protected void setup(Context context) throws IOException {
Configuration conf = context.getConfiguration();
recommendationsPerUser = conf.getInt(NUM_RECOMMENDATIONS, DEFAULT_NUM_RECOMMENDATIONS);
booleanData = conf.getBoolean(RecommenderJob.BOOLEAN_DATA, false);
indexItemIDMap = TasteHadoopUtils.readIDIndexMap(conf.get(ITEMID_INDEX_PATH), conf);
String itemFilePathString = conf.get(ITEMS_FILE);
if (itemFilePathString != null) {
itemsToRecommendFor = new FastIDSet();
for (String line : new FileLineIterable(HadoopUtil.openStream(new Path(itemFilePathString), conf))) {
try {
itemsToRecommendFor.add(Long.parseLong(line));
} catch (NumberFormatException nfe) {
log.warn("itemsFile line ignored: {}", line);
}
}
}
}
@Override
protected void reduce(VarLongWritable userID,
Iterable<PrefAndSimilarityColumnWritable> values,
Context context) throws IOException, InterruptedException {
if (booleanData) {
reduceBooleanData(userID, values, context);
} else {
reduceNonBooleanData(userID, values, context);
}
}
private void reduceBooleanData(VarLongWritable userID,
Iterable<PrefAndSimilarityColumnWritable> values,
Context context) throws IOException, InterruptedException {
/* having boolean data, each estimated preference can only be 1,
* however we can't use this to rank the recommended items,
* so we use the sum of similarities for that. */
Iterator<PrefAndSimilarityColumnWritable> columns = values.iterator();
Vector predictions = columns.next().getSimilarityColumn();
while (columns.hasNext()) {
predictions.assign(columns.next().getSimilarityColumn(), Functions.PLUS);
}
writeRecommendedItems(userID, predictions, context);
}
private void reduceNonBooleanData(VarLongWritable userID,
Iterable<PrefAndSimilarityColumnWritable> values,
Context context) throws IOException, InterruptedException {
/* each entry here is the sum in the numerator of the prediction formula */
Vector numerators = null;
/* each entry here is the sum in the denominator of the prediction formula */
Vector denominators = null;
/* each entry here is the number of similar items used in the prediction formula */
Vector numberOfSimilarItemsUsed = new RandomAccessSparseVector(Integer.MAX_VALUE, 100);
for (PrefAndSimilarityColumnWritable prefAndSimilarityColumn : values) {
Vector simColumn = prefAndSimilarityColumn.getSimilarityColumn();
float prefValue = prefAndSimilarityColumn.getPrefValue();
/* count the number of items used for each prediction */
for (Element e : simColumn.nonZeroes()) {
int itemIDIndex = e.index();
numberOfSimilarItemsUsed.setQuick(itemIDIndex, numberOfSimilarItemsUsed.getQuick(itemIDIndex) + 1);
}
if (denominators == null) {
denominators = simColumn.clone();
} else {
denominators.assign(simColumn, Functions.PLUS_ABS);
}
if (numerators == null) {
numerators = simColumn.clone();
if (prefValue != BOOLEAN_PREF_VALUE) {
numerators.assign(Functions.MULT, prefValue);
}
} else {
if (prefValue != BOOLEAN_PREF_VALUE) {
simColumn.assign(Functions.MULT, prefValue);
}
numerators.assign(simColumn, Functions.PLUS);
}
}
if (numerators == null) {
return;
}
Vector recommendationVector = new RandomAccessSparseVector(Integer.MAX_VALUE, 100);
for (Element element : numerators.nonZeroes()) {
int itemIDIndex = element.index();
/* preference estimations must be based on at least 2 datapoints */
if (numberOfSimilarItemsUsed.getQuick(itemIDIndex) > 1) {
/* compute normalized prediction */
double prediction = element.get() / denominators.getQuick(itemIDIndex);
recommendationVector.setQuick(itemIDIndex, prediction);
}
}
writeRecommendedItems(userID, recommendationVector, context);
}
/**
* find the top entries in recommendationVector, map them to the real itemIDs and write back the result
*/
private void writeRecommendedItems(VarLongWritable userID, Vector recommendationVector, Context context)
throws IOException, InterruptedException {
TopItemsQueue topKItems = new TopItemsQueue(recommendationsPerUser);
for (Element element : recommendationVector.nonZeroes()) {
int index = element.index();
long itemID;
if (indexItemIDMap != null && !indexItemIDMap.isEmpty()) {
itemID = indexItemIDMap.get(index);
} else { //we don't have any mappings, so just use the original
itemID = index;
}
if (itemsToRecommendFor == null || itemsToRecommendFor.contains(itemID)) {
float value = (float) element.get();
if (!Float.isNaN(value)) {
MutableRecommendedItem topItem = topKItems.top();
if (value > topItem.getValue()) {
topItem.set(itemID, value);
topKItems.updateTop();
}
}
}
}
List<RecommendedItem> topItems = topKItems.getTopItems();
if (!topItems.isEmpty()) {
recommendedItems.set(topItems);
context.write(userID, recommendedItems);
}
}
}
| gpl-2.0 |
drbyte/zc-v1-series | includes/templates/template_default/templates/tpl_order_status_default.php | 6773 | <?php
/**
* Page Template
*
* Loaded automatically by index.php?main_page=order_status.<br />
* Displays information related to a single specific order
*
* @package templateSystem
* @copyright Copyright 2003-2015 Zen Cart Development Team
* @copyright Portions Copyright 2003 osCommerce
* @copyright Portions copyright COWOA authors see https://www.zen-cart.com/downloads.php?do=file&id=1115
* @license http://www.zen-cart.com/license/2_0.txt GNU Public License V2.0
* @version $Id: New in V1.6.0 $
*/
?> <!-- TPL_ORDER_STATUS_DEFAULT.PHP -->
<div class="centerColumn" id="accountHistInfo">
<h1 id="orderHistoryHeading"><?php echo HEADING_TITLE; ?></h1>
<br />
<?php
if (isset($_POST['action']) && $_POST['action'] == "process" && ($errorInvalidID || $errorInvalidEmail || $errorNoMatch)) {
?>
<div class="messageStackWarning larger">
<?php
if($errorInvalidID) echo ERROR_INVALID_ORDER;
if($errorInvalidEmail) echo ERROR_INVALID_EMAIL;
if($errorNoMatch) echo ERROR_NO_MATCH;
?>
</div>
<?php } ?>
<?php if (isset($order)) { ?>
<table border="0" width="100%" cellspacing="0" cellpadding="0" summary="Itemized listing of previous order, includes number ordered, items and prices">
<h2 id="orderHistoryDetailedOrder"><?php echo SUB_HEADING_TITLE . ORDER_HEADING_DIVIDER . sprintf(HEADING_ORDER_NUMBER, $_POST['order_id']); ?></h2>
<div class="forward"><?php echo HEADING_ORDER_DATE . ' ' . zen_date_long($order->info['date_purchased']); ?></div>
<br class="clearBoth" />
<tr class="tableHeading">
<th scope="col" id="myAccountQuantity"><?php echo HEADING_QUANTITY; ?></th>
<th scope="col" id="myAccountProducts"><?php echo HEADING_PRODUCTS; ?></th>
<?php
if (sizeof($order->info['tax_groups']) > 1) {
?>
<th scope="col" id="myAccountTax"><?php echo HEADING_TAX; ?></th>
<?php
}
?>
<th scope="col" id="myAccountTotal"><?php echo HEADING_TOTAL; ?></th>
</tr>
<?php
for ($i=0, $n=sizeof($order->products); $i<$n; $i++) {
?>
<tr>
<td class="accountQuantityDisplay"><?php echo $order->products[$i]['qty'] . QUANTITY_SUFFIX; ?></td>
<td class="accountProductDisplay"><?php echo $order->products[$i]['name'];
if ( (isset($order->products[$i]['attributes'])) && (sizeof($order->products[$i]['attributes']) > 0) ) {
echo '<ul class="orderAttribsList">';
for ($j=0, $n2=sizeof($order->products[$i]['attributes']); $j<$n2; $j++) {
echo '<li>' . $order->products[$i]['attributes'][$j]['option'] . TEXT_OPTION_DIVIDER . nl2br($order->products[$i]['attributes'][$j]['value']) . '</li>';
}
echo '</ul>';
}
?>
</td>
<?php
if (sizeof($order->info['tax_groups']) > 1) {
?>
<td class="accountTaxDisplay"><?php echo zen_display_tax_value($order->products[$i]['tax']) . '%' ?></td>
<?php
}
?>
<td class="accountTotalDisplay"><?php echo $currencies->format(zen_add_tax($order->products[$i]['final_price'], $order->products[$i]['tax']) * $order->products[$i]['qty'], true, $order->info['currency'], $order->info['currency_value']) . ($order->products[$i]['onetime_charges'] != 0 ? '<br />' . $currencies->format(zen_add_tax($order->products[$i]['onetime_charges'], $order->products[$i]['tax']), true, $order->info['currency'], $order->info['currency_value']) : '') ?></td>
</tr>
<?php
}
?>
</table>
<hr />
<div id="orderTotals">
<?php
for ($i=0, $n=sizeof($order->totals); $i<$n; $i++) {
?>
<div class="amount larger forward"><?php echo $order->totals[$i]['text'] ?></div>
<div class="lineTitle larger forward"><?php echo $order->totals[$i]['title'] ?></div>
<br class="clearBoth" />
<?php
}
?>
</div>
<?php
/**
* Used to display any downloads associated with the cutomers account
*/
if (DOWNLOAD_ENABLED == 'true') require($template->get_template_dir('tpl_modules_downloads.php',DIR_WS_TEMPLATE, $current_page_base,'templates'). '/tpl_modules_downloads.php');
?>
<?php
/**
* Used to loop thru and display order status information
*/
if (sizeof($statusArray)) {
?>
<table border="0" width="100%" cellspacing="0" cellpadding="0" id="myAccountOrdersStatus" summary="Table contains the date, order status and any comments regarding the order">
<caption><h2 id="orderHistoryStatus"><?php echo HEADING_ORDER_HISTORY; ?></h2></caption>
<tr class="tableHeading">
<th scope="col" id="myAccountStatusDate"><?php echo TABLE_HEADING_STATUS_DATE; ?></th>
<th scope="col" id="myAccountStatus"><?php echo TABLE_HEADING_STATUS_ORDER_STATUS; ?></th>
<th scope="col" id="myAccountStatusComments"><?php echo TABLE_HEADING_STATUS_COMMENTS; ?></th>
</tr>
<?php
foreach ($statusArray as $statuses) {
?>
<tr>
<td><?php echo zen_date_short($statuses['date_added']); ?></td>
<td><?php echo $statuses['orders_status_name']; ?></td>
<td><?php echo (empty($statuses['comments']) ? ' ' : nl2br(zen_output_string_protected($statuses['comments']))); ?></td>
</tr>
<?php
}
?>
</table>
<?php } ?>
<hr />
<div id="myAccountShipInfo" class="floatingBox back">
<?php
if (zen_not_null($order->info['shipping_method'])) {
?>
<h4><?php echo HEADING_SHIPPING_METHOD; ?></h4>
<div><?php echo $order->info['shipping_method']; ?></div>
<?php } else { // temporary just remove these 4 lines ?>
<div>WARNING: Missing Shipping Information</div>
<?php
}
?>
</div>
<div id="myAccountPaymentInfo" class="floatingBox forward">
<h4><?php echo HEADING_PAYMENT_METHOD; ?></h4>
<div><?php echo $order->info['payment_method']; ?></div>
</div>
<br class="clearBoth" />
<br />
<?php } ?>
<?php
echo zen_draw_form('order_status', zen_href_link(FILENAME_ORDER_STATUS, '', 'SSL'), 'post') . zen_draw_hidden_field('action', 'process');
?>
<fieldset>
<legend><?php echo HEADING_TITLE; ?></legend>
<?php echo TEXT_LOOKUP_INSTRUCTIONS; ?>
<br /><br />
<label class="inputLabel"><?php echo ENTRY_ORDER_NUMBER; ?></label>
<?php echo zen_draw_input_field('order_id', (int)$_GET['order_id'], 'size="10" id="order_id" required autofocus', 'number'); ?>
<br />
<br />
<label class="inputLabel"><?php echo ENTRY_EMAIL; ?></label>
<?php echo zen_draw_input_field('query_email_address', '', 'size="35" id="query_email_address" required', 'email'); ?>
<br />
<div class="buttonRow forward"><?php echo zen_image_submit(BUTTON_IMAGE_CONTINUE, BUTTON_CONTINUE_ALT); ?></div>
</fieldset>
</form>
</div>
| gpl-2.0 |
project-zerus/mariadb | storage/xtradb/lock/lock0lock.c | 168540 | /*****************************************************************************
Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*****************************************************************************/
/**************************************************//**
@file lock/lock0lock.c
The transaction lock system
Created 5/7/1996 Heikki Tuuri
*******************************************************/
#define LOCK_MODULE_IMPLEMENTATION
#include "lock0lock.h"
#include "lock0priv.h"
#ifdef UNIV_NONINL
#include "lock0lock.ic"
#include "lock0priv.ic"
#endif
#include "ha_prototypes.h"
#include "usr0sess.h"
#include "trx0purge.h"
#include "dict0mem.h"
#include "trx0sys.h"
#include "btr0btr.h"
/* Restricts the length of search we will do in the waits-for
graph of transactions */
#define LOCK_MAX_N_STEPS_IN_DEADLOCK_CHECK 1000000
/* Restricts the recursion depth of the search we will do in the waits-for
graph of transactions */
#define LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK 200
/* When releasing transaction locks, this specifies how often we release
the kernel mutex for a moment to give also others access to it */
#define LOCK_RELEASE_KERNEL_INTERVAL 1000
/* Safety margin when creating a new record lock: this many extra records
can be inserted to the page without need to create a lock with a bigger
bitmap */
#define LOCK_PAGE_BITMAP_MARGIN 64
/* An explicit record lock affects both the record and the gap before it.
An implicit x-lock does not affect the gap, it only locks the index
record from read or update.
If a transaction has modified or inserted an index record, then
it owns an implicit x-lock on the record. On a secondary index record,
a transaction has an implicit x-lock also if it has modified the
clustered index record, the max trx id of the page where the secondary
index record resides is >= trx id of the transaction (or database recovery
is running), and there are no explicit non-gap lock requests on the
secondary index record.
This complicated definition for a secondary index comes from the
implementation: we want to be able to determine if a secondary index
record has an implicit x-lock, just by looking at the present clustered
index record, not at the historical versions of the record. The
complicated definition can be explained to the user so that there is
nondeterminism in the access path when a query is answered: we may,
or may not, access the clustered index record and thus may, or may not,
bump into an x-lock set there.
Different transaction can have conflicting locks set on the gap at the
same time. The locks on the gap are purely inhibitive: an insert cannot
be made, or a select cursor may have to wait if a different transaction
has a conflicting lock on the gap. An x-lock on the gap does not give
the right to insert into the gap.
An explicit lock can be placed on a user record or the supremum record of
a page. The locks on the supremum record are always thought to be of the gap
type, though the gap bit is not set. When we perform an update of a record
where the size of the record changes, we may temporarily store its explicit
locks on the infimum record of the page, though the infimum otherwise never
carries locks.
A waiting record lock can also be of the gap type. A waiting lock request
can be granted when there is no conflicting mode lock request by another
transaction ahead of it in the explicit lock queue.
In version 4.0.5 we added yet another explicit lock type: LOCK_REC_NOT_GAP.
It only locks the record it is placed on, not the gap before the record.
This lock type is necessary to emulate an Oracle-like READ COMMITTED isolation
level.
-------------------------------------------------------------------------
RULE 1: If there is an implicit x-lock on a record, and there are non-gap
-------
lock requests waiting in the queue, then the transaction holding the implicit
x-lock also has an explicit non-gap record x-lock. Therefore, as locks are
released, we can grant locks to waiting lock requests purely by looking at
the explicit lock requests in the queue.
RULE 3: Different transactions cannot have conflicting granted non-gap locks
-------
on a record at the same time. However, they can have conflicting granted gap
locks.
RULE 4: If a there is a waiting lock request in a queue, no lock request,
-------
gap or not, can be inserted ahead of it in the queue. In record deletes
and page splits new gap type locks can be created by the database manager
for a transaction, and without rule 4, the waits-for graph of transactions
might become cyclic without the database noticing it, as the deadlock check
is only performed when a transaction itself requests a lock!
-------------------------------------------------------------------------
An insert is allowed to a gap if there are no explicit lock requests by
other transactions on the next record. It does not matter if these lock
requests are granted or waiting, gap bit set or not, with the exception
that a gap type request set by another transaction to wait for
its turn to do an insert is ignored. On the other hand, an
implicit x-lock by another transaction does not prevent an insert, which
allows for more concurrency when using an Oracle-style sequence number
generator for the primary key with many transactions doing inserts
concurrently.
A modify of a record is allowed if the transaction has an x-lock on the
record, or if other transactions do not have any non-gap lock requests on the
record.
A read of a single user record with a cursor is allowed if the transaction
has a non-gap explicit, or an implicit lock on the record, or if the other
transactions have no x-lock requests on the record. At a page supremum a
read is always allowed.
In summary, an implicit lock is seen as a granted x-lock only on the
record, not on the gap. An explicit lock with no gap bit set is a lock
both on the record and the gap. If the gap bit is set, the lock is only
on the gap. Different transaction cannot own conflicting locks on the
record at the same time, but they may own conflicting locks on the gap.
Granted locks on a record give an access right to the record, but gap type
locks just inhibit operations.
NOTE: Finding out if some transaction has an implicit x-lock on a secondary
index record can be cumbersome. We may have to look at previous versions of
the corresponding clustered index record to find out if a delete marked
secondary index record was delete marked by an active transaction, not by
a committed one.
FACT A: If a transaction has inserted a row, it can delete it any time
without need to wait for locks.
PROOF: The transaction has an implicit x-lock on every index record inserted
for the row, and can thus modify each record without the need to wait. Q.E.D.
FACT B: If a transaction has read some result set with a cursor, it can read
it again, and retrieves the same result set, if it has not modified the
result set in the meantime. Hence, there is no phantom problem. If the
biggest record, in the alphabetical order, touched by the cursor is removed,
a lock wait may occur, otherwise not.
PROOF: When a read cursor proceeds, it sets an s-lock on each user record
it passes, and a gap type s-lock on each page supremum. The cursor must
wait until it has these locks granted. Then no other transaction can
have a granted x-lock on any of the user records, and therefore cannot
modify the user records. Neither can any other transaction insert into
the gaps which were passed over by the cursor. Page splits and merges,
and removal of obsolete versions of records do not affect this, because
when a user record or a page supremum is removed, the next record inherits
its locks as gap type locks, and therefore blocks inserts to the same gap.
Also, if a page supremum is inserted, it inherits its locks from the successor
record. When the cursor is positioned again at the start of the result set,
the records it will touch on its course are either records it touched
during the last pass or new inserted page supremums. It can immediately
access all these records, and when it arrives at the biggest record, it
notices that the result set is complete. If the biggest record was removed,
lock wait can occur because the next record only inherits a gap type lock,
and a wait may be needed. Q.E.D. */
/* If an index record should be changed or a new inserted, we must check
the lock on the record or the next. When a read cursor starts reading,
we will set a record level s-lock on each record it passes, except on the
initial record on which the cursor is positioned before we start to fetch
records. Our index tree search has the convention that the B-tree
cursor is positioned BEFORE the first possibly matching record in
the search. Optimizations are possible here: if the record is searched
on an equality condition to a unique key, we could actually set a special
lock on the record, a lock which would not prevent any insert before
this record. In the next key locking an x-lock set on a record also
prevents inserts just before that record.
There are special infimum and supremum records on each page.
A supremum record can be locked by a read cursor. This records cannot be
updated but the lock prevents insert of a user record to the end of
the page.
Next key locks will prevent the phantom problem where new rows
could appear to SELECT result sets after the select operation has been
performed. Prevention of phantoms ensures the serilizability of
transactions.
What should we check if an insert of a new record is wanted?
Only the lock on the next record on the same page, because also the
supremum record can carry a lock. An s-lock prevents insertion, but
what about an x-lock? If it was set by a searched update, then there
is implicitly an s-lock, too, and the insert should be prevented.
What if our transaction owns an x-lock to the next record, but there is
a waiting s-lock request on the next record? If this s-lock was placed
by a read cursor moving in the ascending order in the index, we cannot
do the insert immediately, because when we finally commit our transaction,
the read cursor should see also the new inserted record. So we should
move the read cursor backward from the next record for it to pass over
the new inserted record. This move backward may be too cumbersome to
implement. If we in this situation just enqueue a second x-lock request
for our transaction on the next record, then the deadlock mechanism
notices a deadlock between our transaction and the s-lock request
transaction. This seems to be an ok solution.
We could have the convention that granted explicit record locks,
lock the corresponding records from changing, and also lock the gaps
before them from inserting. A waiting explicit lock request locks the gap
before from inserting. Implicit record x-locks, which we derive from the
transaction id in the clustered index record, only lock the record itself
from modification, not the gap before it from inserting.
How should we store update locks? If the search is done by a unique
key, we could just modify the record trx id. Otherwise, we could put a record
x-lock on the record. If the update changes ordering fields of the
clustered index record, the inserted new record needs no record lock in
lock table, the trx id is enough. The same holds for a secondary index
record. Searched delete is similar to update.
PROBLEM:
What about waiting lock requests? If a transaction is waiting to make an
update to a record which another modified, how does the other transaction
know to send the end-lock-wait signal to the waiting transaction? If we have
the convention that a transaction may wait for just one lock at a time, how
do we preserve it if lock wait ends?
PROBLEM:
Checking the trx id label of a secondary index record. In the case of a
modification, not an insert, is this necessary? A secondary index record
is modified only by setting or resetting its deleted flag. A secondary index
record contains fields to uniquely determine the corresponding clustered
index record. A secondary index record is therefore only modified if we
also modify the clustered index record, and the trx id checking is done
on the clustered index record, before we come to modify the secondary index
record. So, in the case of delete marking or unmarking a secondary index
record, we do not have to care about trx ids, only the locks in the lock
table must be checked. In the case of a select from a secondary index, the
trx id is relevant, and in this case we may have to search the clustered
index record.
PROBLEM: How to update record locks when page is split or merged, or
--------------------------------------------------------------------
a record is deleted or updated?
If the size of fields in a record changes, we perform the update by
a delete followed by an insert. How can we retain the locks set or
waiting on the record? Because a record lock is indexed in the bitmap
by the heap number of the record, when we remove the record from the
record list, it is possible still to keep the lock bits. If the page
is reorganized, we could make a table of old and new heap numbers,
and permute the bitmaps in the locks accordingly. We can add to the
table a row telling where the updated record ended. If the update does
not require a reorganization of the page, we can simply move the lock
bits for the updated record to the position determined by its new heap
number (we may have to allocate a new lock, if we run out of the bitmap
in the old one).
A more complicated case is the one where the reinsertion of the
updated record is done pessimistically, because the structure of the
tree may change.
PROBLEM: If a supremum record is removed in a page merge, or a record
---------------------------------------------------------------------
removed in a purge, what to do to the waiting lock requests? In a split to
the right, we just move the lock requests to the new supremum. If a record
is removed, we could move the waiting lock request to its inheritor, the
next record in the index. But, the next record may already have lock
requests on its own queue. A new deadlock check should be made then. Maybe
it is easier just to release the waiting transactions. They can then enqueue
new lock requests on appropriate records.
PROBLEM: When a record is inserted, what locks should it inherit from the
-------------------------------------------------------------------------
upper neighbor? An insert of a new supremum record in a page split is
always possible, but an insert of a new user record requires that the upper
neighbor does not have any lock requests by other transactions, granted or
waiting, in its lock queue. Solution: We can copy the locks as gap type
locks, so that also the waiting locks are transformed to granted gap type
locks on the inserted record. */
/* LOCK COMPATIBILITY MATRIX
* IS IX S X AI
* IS + + + - +
* IX + + - - +
* S + - + - -
* X - - - - -
* AI + + - - -
*
* Note that for rows, InnoDB only acquires S or X locks.
* For tables, InnoDB normally acquires IS or IX locks.
* S or X table locks are only acquired for LOCK TABLES.
* Auto-increment (AI) locks are needed because of
* statement-level MySQL binlog.
* See also lock_mode_compatible().
*/
#define LK(a,b) (1 << ((a) * LOCK_NUM + (b)))
#define LKS(a,b) LK(a,b) | LK(b,a)
/* Define the lock compatibility matrix in a ulint. The first line below
defines the diagonal entries. The following lines define the compatibility
for LOCK_IX, LOCK_S, and LOCK_AUTO_INC using LKS(), since the matrix
is symmetric. */
#define LOCK_MODE_COMPATIBILITY 0 \
| LK(LOCK_IS, LOCK_IS) | LK(LOCK_IX, LOCK_IX) | LK(LOCK_S, LOCK_S) \
| LKS(LOCK_IX, LOCK_IS) | LKS(LOCK_IS, LOCK_AUTO_INC) \
| LKS(LOCK_S, LOCK_IS) \
| LKS(LOCK_AUTO_INC, LOCK_IS) | LKS(LOCK_AUTO_INC, LOCK_IX)
/* STRONGER-OR-EQUAL RELATION (mode1=row, mode2=column)
* IS IX S X AI
* IS + - - - -
* IX + + - - -
* S + - + - -
* X + + + + +
* AI - - - - +
* See lock_mode_stronger_or_eq().
*/
/* Define the stronger-or-equal lock relation in a ulint. This relation
contains all pairs LK(mode1, mode2) where mode1 is stronger than or
equal to mode2. */
#define LOCK_MODE_STRONGER_OR_EQ 0 \
| LK(LOCK_IS, LOCK_IS) \
| LK(LOCK_IX, LOCK_IS) | LK(LOCK_IX, LOCK_IX) \
| LK(LOCK_S, LOCK_IS) | LK(LOCK_S, LOCK_S) \
| LK(LOCK_AUTO_INC, LOCK_AUTO_INC) \
| LK(LOCK_X, LOCK_IS) | LK(LOCK_X, LOCK_IX) | LK(LOCK_X, LOCK_S) \
| LK(LOCK_X, LOCK_AUTO_INC) | LK(LOCK_X, LOCK_X)
#ifdef UNIV_DEBUG
UNIV_INTERN ibool lock_print_waits = FALSE;
/*********************************************************************//**
Validates the lock system.
@return TRUE if ok */
static
ibool
lock_validate(void);
/*===============*/
/*********************************************************************//**
Validates the record lock queues on a page.
@return TRUE if ok */
static
ibool
lock_rec_validate_page(
/*===================*/
const buf_block_t* block) /*!< in: buffer block */
__attribute__((nonnull, warn_unused_result));
#endif /* UNIV_DEBUG */
/* The lock system */
UNIV_INTERN lock_sys_t* lock_sys = NULL;
/* We store info on the latest deadlock error to this buffer. InnoDB
Monitor will then fetch it and print */
UNIV_INTERN ibool lock_deadlock_found = FALSE;
UNIV_INTERN FILE* lock_latest_err_file;
/* Flags for recursive deadlock search */
#define LOCK_VICTIM_IS_START 1
#define LOCK_VICTIM_IS_OTHER 2
#define LOCK_EXCEED_MAX_DEPTH 3
/********************************************************************//**
Checks if a lock request results in a deadlock.
@return TRUE if a deadlock was detected and we chose trx as a victim;
FALSE if no deadlock, or there was a deadlock, but we chose other
transaction(s) as victim(s) */
static
ibool
lock_deadlock_occurs(
/*=================*/
lock_t* lock, /*!< in: lock the transaction is requesting */
trx_t* trx); /*!< in: transaction */
/********************************************************************//**
Looks recursively for a deadlock.
@return 0 if no deadlock found, LOCK_VICTIM_IS_START if there was a
deadlock and we chose 'start' as the victim, LOCK_VICTIM_IS_OTHER if a
deadlock was found and we chose some other trx as a victim: we must do
the search again in this last case because there may be another
deadlock!
LOCK_EXCEED_MAX_DEPTH if the lock search exceeds max steps or max depth. */
static
ulint
lock_deadlock_recursive(
/*====================*/
trx_t* start, /*!< in: recursion starting point */
trx_t* trx, /*!< in: a transaction waiting for a lock */
lock_t* wait_lock, /*!< in: lock that is waiting to be granted */
ulint* cost, /*!< in/out: number of calculation steps thus
far: if this exceeds LOCK_MAX_N_STEPS_...
we return LOCK_EXCEED_MAX_DEPTH */
ulint depth); /*!< in: recursion depth: if this exceeds
LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK, we
return LOCK_EXCEED_MAX_DEPTH */
/*********************************************************************//**
Gets the nth bit of a record lock.
@return TRUE if bit set also if i == ULINT_UNDEFINED return FALSE*/
UNIV_INLINE
ibool
lock_rec_get_nth_bit(
/*=================*/
const lock_t* lock, /*!< in: record lock */
ulint i) /*!< in: index of the bit */
{
ulint byte_index;
ulint bit_index;
ut_ad(lock);
ut_ad(lock_get_type_low(lock) == LOCK_REC);
if (i >= lock->un_member.rec_lock.n_bits) {
return(FALSE);
}
byte_index = i / 8;
bit_index = i % 8;
return(1 & ((const byte*) &lock[1])[byte_index] >> bit_index);
}
/*************************************************************************/
#define lock_mutex_enter_kernel() mutex_enter(&kernel_mutex)
#define lock_mutex_exit_kernel() mutex_exit(&kernel_mutex)
/*********************************************************************//**
Checks that a transaction id is sensible, i.e., not in the future.
@return TRUE if ok */
UNIV_INTERN
ibool
lock_check_trx_id_sanity(
/*=====================*/
trx_id_t trx_id, /*!< in: trx id */
const rec_t* rec, /*!< in: user record */
dict_index_t* index, /*!< in: index */
const ulint* offsets, /*!< in: rec_get_offsets(rec, index) */
ibool has_kernel_mutex)/*!< in: TRUE if the caller owns the
kernel mutex */
{
ibool is_ok = TRUE;
ut_ad(rec_offs_validate(rec, index, offsets));
if (!has_kernel_mutex) {
mutex_enter(&kernel_mutex);
}
/* A sanity check: the trx_id in rec must be smaller than the global
trx id counter */
if (UNIV_UNLIKELY(trx_id >= trx_sys->max_trx_id)) {
ut_print_timestamp(stderr);
fputs(" InnoDB: Error: transaction id associated"
" with record\n",
stderr);
rec_print_new(stderr, rec, offsets);
fputs("InnoDB: in ", stderr);
dict_index_name_print(stderr, NULL, index);
fprintf(stderr, "\n"
"InnoDB: is " TRX_ID_FMT " which is higher than the"
" global trx id counter " TRX_ID_FMT "!\n"
"InnoDB: The table is corrupt. You have to do"
" dump + drop + reimport.\n",
(ullint) trx_id, (ullint) trx_sys->max_trx_id);
is_ok = FALSE;
}
if (!has_kernel_mutex) {
mutex_exit(&kernel_mutex);
}
return(is_ok);
}
/*********************************************************************//**
Checks that a record is seen in a consistent read.
@return TRUE if sees, or FALSE if an earlier version of the record
should be retrieved */
UNIV_INTERN
ibool
lock_clust_rec_cons_read_sees(
/*==========================*/
const rec_t* rec, /*!< in: user record which should be read or
passed over by a read cursor */
dict_index_t* index, /*!< in: clustered index */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
read_view_t* view) /*!< in: consistent read view */
{
trx_id_t trx_id;
ut_ad(dict_index_is_clust(index));
ut_ad(page_rec_is_user_rec(rec));
ut_ad(rec_offs_validate(rec, index, offsets));
/* NOTE that we call this function while holding the search
system latch. To obey the latching order we must NOT reserve the
kernel mutex here! */
trx_id = row_get_rec_trx_id(rec, index, offsets);
return(read_view_sees_trx_id(view, trx_id));
}
/*********************************************************************//**
Checks that a non-clustered index record is seen in a consistent read.
NOTE that a non-clustered index page contains so little information on
its modifications that also in the case FALSE, the present version of
rec may be the right, but we must check this from the clustered index
record.
@return TRUE if certainly sees, or FALSE if an earlier version of the
clustered index record might be needed */
UNIV_INTERN
ulint
lock_sec_rec_cons_read_sees(
/*========================*/
const rec_t* rec, /*!< in: user record which
should be read or passed over
by a read cursor */
const read_view_t* view) /*!< in: consistent read view */
{
trx_id_t max_trx_id;
ut_ad(page_rec_is_user_rec(rec));
/* NOTE that we might call this function while holding the search
system latch. To obey the latching order we must NOT reserve the
kernel mutex here! */
if (recv_recovery_is_on()) {
return(FALSE);
}
max_trx_id = page_get_max_trx_id(page_align(rec));
ut_ad(max_trx_id);
return(max_trx_id < view->up_limit_id);
}
/*********************************************************************//**
Creates the lock system at database start. */
UNIV_INTERN
void
lock_sys_create(
/*============*/
ulint n_cells) /*!< in: number of slots in lock hash table */
{
lock_sys = mem_alloc(sizeof(lock_sys_t));
lock_sys->rec_hash = hash_create(n_cells);
lock_sys->rec_num = 0;
/* hash_create_mutexes(lock_sys->rec_hash, 2, SYNC_REC_LOCK); */
lock_latest_err_file = os_file_create_tmpfile();
ut_a(lock_latest_err_file);
}
/*********************************************************************//**
Closes the lock system at database shutdown. */
UNIV_INTERN
void
lock_sys_close(void)
/*================*/
{
if (lock_latest_err_file != NULL) {
fclose(lock_latest_err_file);
lock_latest_err_file = NULL;
}
hash_table_free(lock_sys->rec_hash);
mem_free(lock_sys);
lock_sys = NULL;
}
/*********************************************************************//**
Gets the size of a lock struct.
@return size in bytes */
UNIV_INTERN
ulint
lock_get_size(void)
/*===============*/
{
return((ulint)sizeof(lock_t));
}
/*********************************************************************//**
Gets the mode of a lock.
@return mode */
UNIV_INLINE
enum lock_mode
lock_get_mode(
/*==========*/
const lock_t* lock) /*!< in: lock */
{
ut_ad(lock);
return(lock->type_mode & LOCK_MODE_MASK);
}
/*********************************************************************//**
Gets the wait flag of a lock.
@return TRUE if waiting */
UNIV_INLINE
ibool
lock_get_wait(
/*==========*/
const lock_t* lock) /*!< in: lock */
{
ut_ad(lock);
if (UNIV_UNLIKELY(lock->type_mode & LOCK_WAIT)) {
return(TRUE);
}
return(FALSE);
}
/*********************************************************************//**
Gets the source table of an ALTER TABLE transaction. The table must be
covered by an IX or IS table lock.
@return the source table of transaction, if it is covered by an IX or
IS table lock; dest if there is no source table, and NULL if the
transaction is locking more than two tables or an inconsistency is
found */
UNIV_INTERN
dict_table_t*
lock_get_src_table(
/*===============*/
trx_t* trx, /*!< in: transaction */
dict_table_t* dest, /*!< in: destination of ALTER TABLE */
enum lock_mode* mode) /*!< out: lock mode of the source table */
{
dict_table_t* src;
lock_t* lock;
src = NULL;
*mode = LOCK_NONE;
for (lock = UT_LIST_GET_FIRST(trx->trx_locks);
lock;
lock = UT_LIST_GET_NEXT(trx_locks, lock)) {
lock_table_t* tab_lock;
enum lock_mode lock_mode;
if (!(lock_get_type_low(lock) & LOCK_TABLE)) {
/* We are only interested in table locks. */
continue;
}
tab_lock = &lock->un_member.tab_lock;
if (dest == tab_lock->table) {
/* We are not interested in the destination table. */
continue;
} else if (!src) {
/* This presumably is the source table. */
src = tab_lock->table;
if (UT_LIST_GET_LEN(src->locks) != 1
|| UT_LIST_GET_FIRST(src->locks) != lock) {
/* We only support the case when
there is only one lock on this table. */
return(NULL);
}
} else if (src != tab_lock->table) {
/* The transaction is locking more than
two tables (src and dest): abort */
return(NULL);
}
/* Check that the source table is locked by
LOCK_IX or LOCK_IS. */
lock_mode = lock_get_mode(lock);
if (lock_mode == LOCK_IX || lock_mode == LOCK_IS) {
if (*mode != LOCK_NONE && *mode != lock_mode) {
/* There are multiple locks on src. */
return(NULL);
}
*mode = lock_mode;
}
}
if (!src) {
/* No source table lock found: flag the situation to caller */
src = dest;
}
return(src);
}
/*********************************************************************//**
Determine if the given table is exclusively "owned" by the given
transaction, i.e., transaction holds LOCK_IX and possibly LOCK_AUTO_INC
on the table.
@return TRUE if table is only locked by trx, with LOCK_IX, and
possibly LOCK_AUTO_INC */
UNIV_INTERN
ibool
lock_is_table_exclusive(
/*====================*/
dict_table_t* table, /*!< in: table */
trx_t* trx) /*!< in: transaction */
{
const lock_t* lock;
ibool ok = FALSE;
ut_ad(table);
ut_ad(trx);
lock_mutex_enter_kernel();
for (lock = UT_LIST_GET_FIRST(table->locks);
lock;
lock = UT_LIST_GET_NEXT(locks, &lock->un_member.tab_lock)) {
if (lock->trx != trx) {
/* A lock on the table is held
by some other transaction. */
goto not_ok;
}
if (!(lock_get_type_low(lock) & LOCK_TABLE)) {
/* We are interested in table locks only. */
continue;
}
switch (lock_get_mode(lock)) {
case LOCK_IX:
ok = TRUE;
break;
case LOCK_AUTO_INC:
/* It is allowed for trx to hold an
auto_increment lock. */
break;
default:
not_ok:
/* Other table locks than LOCK_IX are not allowed. */
ok = FALSE;
goto func_exit;
}
}
func_exit:
lock_mutex_exit_kernel();
return(ok);
}
/*********************************************************************//**
Sets the wait flag of a lock and the back pointer in trx to lock. */
UNIV_INLINE
void
lock_set_lock_and_trx_wait(
/*=======================*/
lock_t* lock, /*!< in: lock */
trx_t* trx) /*!< in: trx */
{
ut_ad(lock);
ut_ad(trx->wait_lock == NULL);
trx->wait_lock = lock;
lock->type_mode |= LOCK_WAIT;
}
/**********************************************************************//**
The back pointer to a waiting lock request in the transaction is set to NULL
and the wait bit in lock type_mode is reset. */
UNIV_INLINE
void
lock_reset_lock_and_trx_wait(
/*=========================*/
lock_t* lock) /*!< in: record lock */
{
ut_ad(lock_get_wait(lock));
/* Reset the back pointer in trx to this waiting lock request */
if (!(lock->type_mode & LOCK_CONV_BY_OTHER)) {
ut_ad((lock->trx)->wait_lock == lock);
(lock->trx)->wait_lock = NULL;
} else {
ut_ad(lock_get_type_low(lock) == LOCK_REC);
}
lock->type_mode &= ~LOCK_WAIT;
}
/*********************************************************************//**
Gets the gap flag of a record lock.
@return TRUE if gap flag set */
UNIV_INLINE
ibool
lock_rec_get_gap(
/*=============*/
const lock_t* lock) /*!< in: record lock */
{
ut_ad(lock);
ut_ad(lock_get_type_low(lock) == LOCK_REC);
if (lock->type_mode & LOCK_GAP) {
return(TRUE);
}
return(FALSE);
}
/*********************************************************************//**
Gets the LOCK_REC_NOT_GAP flag of a record lock.
@return TRUE if LOCK_REC_NOT_GAP flag set */
UNIV_INLINE
ibool
lock_rec_get_rec_not_gap(
/*=====================*/
const lock_t* lock) /*!< in: record lock */
{
ut_ad(lock);
ut_ad(lock_get_type_low(lock) == LOCK_REC);
if (lock->type_mode & LOCK_REC_NOT_GAP) {
return(TRUE);
}
return(FALSE);
}
/*********************************************************************//**
Gets the waiting insert flag of a record lock.
@return TRUE if gap flag set */
UNIV_INLINE
ibool
lock_rec_get_insert_intention(
/*==========================*/
const lock_t* lock) /*!< in: record lock */
{
ut_ad(lock);
ut_ad(lock_get_type_low(lock) == LOCK_REC);
if (lock->type_mode & LOCK_INSERT_INTENTION) {
return(TRUE);
}
return(FALSE);
}
/*********************************************************************//**
Calculates if lock mode 1 is stronger or equal to lock mode 2.
@return nonzero if mode1 stronger or equal to mode2 */
UNIV_INLINE
ulint
lock_mode_stronger_or_eq(
/*=====================*/
enum lock_mode mode1, /*!< in: lock mode */
enum lock_mode mode2) /*!< in: lock mode */
{
ut_ad(mode1 == LOCK_X || mode1 == LOCK_S || mode1 == LOCK_IX
|| mode1 == LOCK_IS || mode1 == LOCK_AUTO_INC);
ut_ad(mode2 == LOCK_X || mode2 == LOCK_S || mode2 == LOCK_IX
|| mode2 == LOCK_IS || mode2 == LOCK_AUTO_INC);
return((LOCK_MODE_STRONGER_OR_EQ) & LK(mode1, mode2));
}
/*********************************************************************//**
Calculates if lock mode 1 is compatible with lock mode 2.
@return nonzero if mode1 compatible with mode2 */
UNIV_INLINE
ulint
lock_mode_compatible(
/*=================*/
enum lock_mode mode1, /*!< in: lock mode */
enum lock_mode mode2) /*!< in: lock mode */
{
ut_ad(mode1 == LOCK_X || mode1 == LOCK_S || mode1 == LOCK_IX
|| mode1 == LOCK_IS || mode1 == LOCK_AUTO_INC);
ut_ad(mode2 == LOCK_X || mode2 == LOCK_S || mode2 == LOCK_IX
|| mode2 == LOCK_IS || mode2 == LOCK_AUTO_INC);
return((LOCK_MODE_COMPATIBILITY) & LK(mode1, mode2));
}
/*********************************************************************//**
Checks if a lock request for a new lock has to wait for request lock2.
@return TRUE if new lock has to wait for lock2 to be removed */
UNIV_INLINE
ibool
lock_rec_has_to_wait(
/*=================*/
const trx_t* trx, /*!< in: trx of new lock */
ulint type_mode,/*!< in: precise mode of the new lock
to set: LOCK_S or LOCK_X, possibly
ORed to LOCK_GAP or LOCK_REC_NOT_GAP,
LOCK_INSERT_INTENTION */
const lock_t* lock2, /*!< in: another record lock; NOTE that
it is assumed that this has a lock bit
set on the same record as in the new
lock we are setting */
ibool lock_is_on_supremum) /*!< in: TRUE if we are setting the
lock on the 'supremum' record of an
index page: we know then that the lock
request is really for a 'gap' type lock */
{
ut_ad(trx && lock2);
ut_ad(lock_get_type_low(lock2) == LOCK_REC);
if (trx != lock2->trx
&& !lock_mode_compatible(LOCK_MODE_MASK & type_mode,
lock_get_mode(lock2))) {
/* We have somewhat complex rules when gap type record locks
cause waits */
if ((lock_is_on_supremum || (type_mode & LOCK_GAP))
&& !(type_mode & LOCK_INSERT_INTENTION)) {
/* Gap type locks without LOCK_INSERT_INTENTION flag
do not need to wait for anything. This is because
different users can have conflicting lock types
on gaps. */
return(FALSE);
}
if (!(type_mode & LOCK_INSERT_INTENTION)
&& lock_rec_get_gap(lock2)) {
/* Record lock (LOCK_ORDINARY or LOCK_REC_NOT_GAP
does not need to wait for a gap type lock */
return(FALSE);
}
if ((type_mode & LOCK_GAP)
&& lock_rec_get_rec_not_gap(lock2)) {
/* Lock on gap does not need to wait for
a LOCK_REC_NOT_GAP type lock */
return(FALSE);
}
if (lock_rec_get_insert_intention(lock2)) {
/* No lock request needs to wait for an insert
intention lock to be removed. This is ok since our
rules allow conflicting locks on gaps. This eliminates
a spurious deadlock caused by a next-key lock waiting
for an insert intention lock; when the insert
intention lock was granted, the insert deadlocked on
the waiting next-key lock.
Also, insert intention locks do not disturb each
other. */
return(FALSE);
}
return(TRUE);
}
return(FALSE);
}
/*********************************************************************//**
Checks if a lock request lock1 has to wait for request lock2.
@return TRUE if lock1 has to wait for lock2 to be removed */
UNIV_INTERN
ibool
lock_has_to_wait(
/*=============*/
const lock_t* lock1, /*!< in: waiting lock */
const lock_t* lock2) /*!< in: another lock; NOTE that it is
assumed that this has a lock bit set
on the same record as in lock1 if the
locks are record locks */
{
ut_ad(lock1 && lock2);
if (lock1->trx != lock2->trx
&& !lock_mode_compatible(lock_get_mode(lock1),
lock_get_mode(lock2))) {
if (lock_get_type_low(lock1) == LOCK_REC) {
ut_ad(lock_get_type_low(lock2) == LOCK_REC);
/* If this lock request is for a supremum record
then the second bit on the lock bitmap is set */
return(lock_rec_has_to_wait(lock1->trx,
lock1->type_mode, lock2,
lock_rec_get_nth_bit(
lock1, 1)));
}
return(TRUE);
}
return(FALSE);
}
/*============== RECORD LOCK BASIC FUNCTIONS ============================*/
/*********************************************************************//**
Gets the number of bits in a record lock bitmap.
@return number of bits */
UNIV_INLINE
ulint
lock_rec_get_n_bits(
/*================*/
const lock_t* lock) /*!< in: record lock */
{
return(lock->un_member.rec_lock.n_bits);
}
/**********************************************************************//**
Sets the nth bit of a record lock to TRUE. */
UNIV_INLINE
void
lock_rec_set_nth_bit(
/*=================*/
lock_t* lock, /*!< in: record lock */
ulint i) /*!< in: index of the bit */
{
ulint byte_index;
ulint bit_index;
ut_ad(lock);
ut_ad(lock_get_type_low(lock) == LOCK_REC);
ut_ad(i < lock->un_member.rec_lock.n_bits);
byte_index = i / 8;
bit_index = i % 8;
((byte*) &lock[1])[byte_index] |= 1 << bit_index;
}
/**********************************************************************//**
Looks for a set bit in a record lock bitmap. Returns ULINT_UNDEFINED,
if none found.
@return bit index == heap number of the record, or ULINT_UNDEFINED if
none found */
UNIV_INTERN
ulint
lock_rec_find_set_bit(
/*==================*/
const lock_t* lock) /*!< in: record lock with at least one bit set */
{
ulint i;
for (i = 0; i < lock_rec_get_n_bits(lock); i++) {
if (lock_rec_get_nth_bit(lock, i)) {
return(i);
}
}
return(ULINT_UNDEFINED);
}
/**********************************************************************//**
Resets the nth bit of a record lock. */
UNIV_INLINE
void
lock_rec_reset_nth_bit(
/*===================*/
lock_t* lock, /*!< in: record lock */
ulint i) /*!< in: index of the bit which must be set to TRUE
when this function is called */
{
ulint byte_index;
ulint bit_index;
ut_ad(lock);
ut_ad(lock_get_type_low(lock) == LOCK_REC);
ut_ad(i < lock->un_member.rec_lock.n_bits);
byte_index = i / 8;
bit_index = i % 8;
((byte*) &lock[1])[byte_index] &= ~(1 << bit_index);
}
/*********************************************************************//**
Gets the first or next record lock on a page.
@return next lock, NULL if none exists */
UNIV_INLINE
const lock_t*
lock_rec_get_next_on_page_const(
/*============================*/
const lock_t* lock) /*!< in: a record lock */
{
ulint space;
ulint page_no;
ut_ad(mutex_own(&kernel_mutex));
ut_ad(lock_get_type_low(lock) == LOCK_REC);
space = lock->un_member.rec_lock.space;
page_no = lock->un_member.rec_lock.page_no;
for (;;) {
lock = HASH_GET_NEXT(hash, lock);
if (!lock) {
break;
}
if ((lock->un_member.rec_lock.space == space)
&& (lock->un_member.rec_lock.page_no == page_no)) {
break;
}
}
return(lock);
}
/*********************************************************************//**
Gets the first or next record lock on a page.
@return next lock, NULL if none exists */
UNIV_INLINE
lock_t*
lock_rec_get_next_on_page(
/*======================*/
lock_t* lock) /*!< in: a record lock */
{
return((lock_t*) lock_rec_get_next_on_page_const(lock));
}
/*********************************************************************//**
Gets the first record lock on a page, where the page is identified by its
file address.
@return first lock, NULL if none exists */
UNIV_INLINE
lock_t*
lock_rec_get_first_on_page_addr(
/*============================*/
ulint space, /*!< in: space */
ulint page_no)/*!< in: page number */
{
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
lock = HASH_GET_FIRST(lock_sys->rec_hash,
lock_rec_hash(space, page_no));
while (lock) {
if ((lock->un_member.rec_lock.space == space)
&& (lock->un_member.rec_lock.page_no == page_no)) {
break;
}
lock = HASH_GET_NEXT(hash, lock);
}
return(lock);
}
/*********************************************************************//**
Returns TRUE if there are explicit record locks on a page.
@return TRUE if there are explicit record locks on the page */
UNIV_INTERN
ibool
lock_rec_expl_exist_on_page(
/*========================*/
ulint space, /*!< in: space id */
ulint page_no)/*!< in: page number */
{
ibool ret;
mutex_enter(&kernel_mutex);
if (lock_rec_get_first_on_page_addr(space, page_no)) {
ret = TRUE;
} else {
ret = FALSE;
}
mutex_exit(&kernel_mutex);
return(ret);
}
/*********************************************************************//**
Gets the first record lock on a page, where the page is identified by a
pointer to it.
@return first lock, NULL if none exists */
UNIV_INLINE
lock_t*
lock_rec_get_first_on_page(
/*=======================*/
const buf_block_t* block) /*!< in: buffer block */
{
ulint hash;
lock_t* lock;
ulint space = buf_block_get_space(block);
ulint page_no = buf_block_get_page_no(block);
ut_ad(mutex_own(&kernel_mutex));
hash = buf_block_get_lock_hash_val(block);
lock = HASH_GET_FIRST(lock_sys->rec_hash, hash);
while (lock) {
if ((lock->un_member.rec_lock.space == space)
&& (lock->un_member.rec_lock.page_no == page_no)) {
break;
}
lock = HASH_GET_NEXT(hash, lock);
}
return(lock);
}
/*********************************************************************//**
Gets the next explicit lock request on a record.
@return next lock, NULL if none exists or if heap_no == ULINT_UNDEFINED */
UNIV_INLINE
lock_t*
lock_rec_get_next(
/*==============*/
ulint heap_no,/*!< in: heap number of the record */
lock_t* lock) /*!< in: lock */
{
ut_ad(mutex_own(&kernel_mutex));
do {
ut_ad(lock_get_type_low(lock) == LOCK_REC);
lock = lock_rec_get_next_on_page(lock);
} while (lock && !lock_rec_get_nth_bit(lock, heap_no));
return(lock);
}
/*********************************************************************//**
Gets the first explicit lock request on a record.
@return first lock, NULL if none exists */
UNIV_INLINE
lock_t*
lock_rec_get_first(
/*===============*/
const buf_block_t* block, /*!< in: block containing the record */
ulint heap_no)/*!< in: heap number of the record */
{
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
for (lock = lock_rec_get_first_on_page(block); lock;
lock = lock_rec_get_next_on_page(lock)) {
if (lock_rec_get_nth_bit(lock, heap_no)) {
break;
}
}
return(lock);
}
/*********************************************************************//**
Resets the record lock bitmap to zero. NOTE: does not touch the wait_lock
pointer in the transaction! This function is used in lock object creation
and resetting. */
static
void
lock_rec_bitmap_reset(
/*==================*/
lock_t* lock) /*!< in: record lock */
{
ulint n_bytes;
ut_ad(lock_get_type_low(lock) == LOCK_REC);
/* Reset to zero the bitmap which resides immediately after the lock
struct */
n_bytes = lock_rec_get_n_bits(lock) / 8;
ut_ad((lock_rec_get_n_bits(lock) % 8) == 0);
memset(&lock[1], 0, n_bytes);
}
/*********************************************************************//**
Copies a record lock to heap.
@return copy of lock */
static
lock_t*
lock_rec_copy(
/*==========*/
const lock_t* lock, /*!< in: record lock */
mem_heap_t* heap) /*!< in: memory heap */
{
ulint size;
ut_ad(lock_get_type_low(lock) == LOCK_REC);
size = sizeof(lock_t) + lock_rec_get_n_bits(lock) / 8;
return(mem_heap_dup(heap, lock, size));
}
/*********************************************************************//**
Gets the previous record lock set on a record.
@return previous lock on the same record, NULL if none exists */
UNIV_INTERN
const lock_t*
lock_rec_get_prev(
/*==============*/
const lock_t* in_lock,/*!< in: record lock */
ulint heap_no)/*!< in: heap number of the record */
{
lock_t* lock;
ulint space;
ulint page_no;
lock_t* found_lock = NULL;
ut_ad(mutex_own(&kernel_mutex));
ut_ad(lock_get_type_low(in_lock) == LOCK_REC);
space = in_lock->un_member.rec_lock.space;
page_no = in_lock->un_member.rec_lock.page_no;
lock = lock_rec_get_first_on_page_addr(space, page_no);
for (;;) {
ut_ad(lock);
if (lock == in_lock) {
return(found_lock);
}
if (lock_rec_get_nth_bit(lock, heap_no)) {
found_lock = lock;
}
lock = lock_rec_get_next_on_page(lock);
}
}
/*============= FUNCTIONS FOR ANALYZING TABLE LOCK QUEUE ================*/
/*********************************************************************//**
Checks if a transaction has the specified table lock, or stronger.
@return lock or NULL */
UNIV_INLINE
lock_t*
lock_table_has(
/*===========*/
trx_t* trx, /*!< in: transaction */
dict_table_t* table, /*!< in: table */
enum lock_mode mode) /*!< in: lock mode */
{
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
/* Look for stronger locks the same trx already has on the table */
lock = UT_LIST_GET_LAST(table->locks);
while (lock != NULL) {
if (lock->trx == trx
&& lock_mode_stronger_or_eq(lock_get_mode(lock), mode)) {
/* The same trx already has locked the table in
a mode stronger or equal to the mode given */
ut_ad(!lock_get_wait(lock));
return(lock);
}
lock = UT_LIST_GET_PREV(un_member.tab_lock.locks, lock);
}
return(NULL);
}
/*============= FUNCTIONS FOR ANALYZING RECORD LOCK QUEUE ================*/
/*********************************************************************//**
Checks if a transaction has a GRANTED explicit lock on rec stronger or equal
to precise_mode.
@return lock or NULL */
UNIV_INLINE
lock_t*
lock_rec_has_expl(
/*==============*/
ulint precise_mode,/*!< in: LOCK_S or LOCK_X
possibly ORed to LOCK_GAP or
LOCK_REC_NOT_GAP, for a
supremum record we regard this
always a gap type request */
const buf_block_t* block, /*!< in: buffer block containing
the record */
ulint heap_no,/*!< in: heap number of the record */
trx_t* trx) /*!< in: transaction */
{
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
ut_ad((precise_mode & LOCK_MODE_MASK) == LOCK_S
|| (precise_mode & LOCK_MODE_MASK) == LOCK_X);
ut_ad(!(precise_mode & LOCK_INSERT_INTENTION));
lock = lock_rec_get_first(block, heap_no);
while (lock) {
if (lock->trx == trx
&& !lock_is_wait_not_by_other(lock->type_mode)
&& lock_mode_stronger_or_eq(lock_get_mode(lock),
precise_mode & LOCK_MODE_MASK)
&& (!lock_rec_get_rec_not_gap(lock)
|| (precise_mode & LOCK_REC_NOT_GAP)
|| heap_no == PAGE_HEAP_NO_SUPREMUM)
&& (!lock_rec_get_gap(lock)
|| (precise_mode & LOCK_GAP)
|| heap_no == PAGE_HEAP_NO_SUPREMUM)
&& (!lock_rec_get_insert_intention(lock))) {
return(lock);
}
lock = lock_rec_get_next(heap_no, lock);
}
return(NULL);
}
#ifdef UNIV_DEBUG
/*********************************************************************//**
Checks if some other transaction has a lock request in the queue.
@return lock or NULL */
static
lock_t*
lock_rec_other_has_expl_req(
/*========================*/
enum lock_mode mode, /*!< in: LOCK_S or LOCK_X */
ulint gap, /*!< in: LOCK_GAP if also gap
locks are taken into account,
or 0 if not */
ulint wait, /*!< in: LOCK_WAIT if also
waiting locks are taken into
account, or 0 if not */
const buf_block_t* block, /*!< in: buffer block containing
the record */
ulint heap_no,/*!< in: heap number of the record */
const trx_t* trx) /*!< in: transaction, or NULL if
requests by all transactions
are taken into account */
{
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
ut_ad(mode == LOCK_X || mode == LOCK_S);
ut_ad(gap == 0 || gap == LOCK_GAP);
ut_ad(wait == 0 || wait == LOCK_WAIT);
lock = lock_rec_get_first(block, heap_no);
while (lock) {
if (lock->trx != trx
&& (gap
|| !(lock_rec_get_gap(lock)
|| heap_no == PAGE_HEAP_NO_SUPREMUM))
&& (wait || !lock_get_wait(lock))
&& lock_mode_stronger_or_eq(lock_get_mode(lock), mode)) {
return(lock);
}
lock = lock_rec_get_next(heap_no, lock);
}
return(NULL);
}
#endif /* UNIV_DEBUG */
/*********************************************************************//**
Checks if some other transaction has a conflicting explicit lock request
in the queue, so that we have to wait.
@return lock or NULL */
static
lock_t*
lock_rec_other_has_conflicting(
/*===========================*/
enum lock_mode mode, /*!< in: LOCK_S or LOCK_X,
possibly ORed to LOCK_GAP or
LOC_REC_NOT_GAP,
LOCK_INSERT_INTENTION */
const buf_block_t* block, /*!< in: buffer block containing
the record */
ulint heap_no,/*!< in: heap number of the record */
trx_t* trx) /*!< in: our transaction */
{
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
lock = lock_rec_get_first(block, heap_no);
if (UNIV_LIKELY_NULL(lock)) {
if (UNIV_UNLIKELY(heap_no == PAGE_HEAP_NO_SUPREMUM)) {
do {
if (lock_rec_has_to_wait(trx, mode, lock,
TRUE)) {
return(lock);
}
lock = lock_rec_get_next(heap_no, lock);
} while (lock);
} else {
do {
if (lock_rec_has_to_wait(trx, mode, lock,
FALSE)) {
return(lock);
}
lock = lock_rec_get_next(heap_no, lock);
} while (lock);
}
}
return(NULL);
}
/*********************************************************************//**
Looks for a suitable type record lock struct by the same trx on the same page.
This can be used to save space when a new record lock should be set on a page:
no new struct is needed, if a suitable old is found.
@return lock or NULL */
UNIV_INLINE
lock_t*
lock_rec_find_similar_on_page(
/*==========================*/
ulint type_mode, /*!< in: lock type_mode field */
ulint heap_no, /*!< in: heap number of the record */
lock_t* lock, /*!< in: lock_rec_get_first_on_page() */
const trx_t* trx) /*!< in: transaction */
{
ut_ad(mutex_own(&kernel_mutex));
while (lock != NULL) {
if (lock->trx == trx
&& lock->type_mode == type_mode
&& lock_rec_get_n_bits(lock) > heap_no) {
return(lock);
}
lock = lock_rec_get_next_on_page(lock);
}
return(NULL);
}
/*********************************************************************//**
Checks if some transaction has an implicit x-lock on a record in a secondary
index.
@return transaction which has the x-lock, or NULL */
static
trx_t*
lock_sec_rec_some_has_impl_off_kernel(
/*==================================*/
const rec_t* rec, /*!< in: user record */
dict_index_t* index, /*!< in: secondary index */
const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */
{
const page_t* page = page_align(rec);
ut_ad(mutex_own(&kernel_mutex));
ut_ad(!dict_index_is_clust(index));
ut_ad(page_rec_is_user_rec(rec));
ut_ad(rec_offs_validate(rec, index, offsets));
/* Some transaction may have an implicit x-lock on the record only
if the max trx id for the page >= min trx id for the trx list, or
database recovery is running. We do not write the changes of a page
max trx id to the log, and therefore during recovery, this value
for a page may be incorrect. */
if (page_get_max_trx_id(page) < trx_list_get_min_trx_id()
&& !recv_recovery_is_on()) {
return(NULL);
}
/* Ok, in this case it is possible that some transaction has an
implicit x-lock. We have to look in the clustered index. */
if (!lock_check_trx_id_sanity(page_get_max_trx_id(page),
rec, index, offsets, TRUE)) {
buf_page_print(page, 0, 0);
/* The page is corrupt: try to avoid a crash by returning
NULL */
return(NULL);
}
return(row_vers_impl_x_locked_off_kernel(rec, index, offsets));
}
/*********************************************************************//**
Return approximate number or record locks (bits set in the bitmap) for
this transaction. Since delete-marked records may be removed, the
record count will not be precise. */
UNIV_INTERN
ulint
lock_number_of_rows_locked(
/*=======================*/
const trx_t* trx) /*!< in: transaction */
{
lock_t* lock;
ulint n_records = 0;
ulint n_bits;
ulint n_bit;
lock = UT_LIST_GET_FIRST(trx->trx_locks);
while (lock) {
if (lock_get_type_low(lock) == LOCK_REC) {
n_bits = lock_rec_get_n_bits(lock);
for (n_bit = 0; n_bit < n_bits; n_bit++) {
if (lock_rec_get_nth_bit(lock, n_bit)) {
n_records++;
}
}
}
lock = UT_LIST_GET_NEXT(trx_locks, lock);
}
return (n_records);
}
/*============== RECORD LOCK CREATION AND QUEUE MANAGEMENT =============*/
/*********************************************************************//**
Creates a new record lock and inserts it to the lock queue. Does NOT check
for deadlocks or lock compatibility!
@return created lock */
static
lock_t*
lock_rec_create(
/*============*/
ulint type_mode,/*!< in: lock mode and wait
flag, type is ignored and
replaced by LOCK_REC */
const buf_block_t* block, /*!< in: buffer block containing
the record */
ulint heap_no,/*!< in: heap number of the record */
dict_index_t* index, /*!< in: index of record */
trx_t* trx) /*!< in: transaction */
{
lock_t* lock;
ulint page_no;
ulint space;
ulint n_bits;
ulint n_bytes;
const page_t* page;
ut_ad(mutex_own(&kernel_mutex));
space = buf_block_get_space(block);
page_no = buf_block_get_page_no(block);
page = block->frame;
btr_assert_not_corrupted(block, index);
/* If rec is the supremum record, then we reset the gap and
LOCK_REC_NOT_GAP bits, as all locks on the supremum are
automatically of the gap type */
if (UNIV_UNLIKELY(heap_no == PAGE_HEAP_NO_SUPREMUM)) {
ut_ad(!(type_mode & LOCK_REC_NOT_GAP));
type_mode = type_mode & ~(LOCK_GAP | LOCK_REC_NOT_GAP);
}
/* Make lock bitmap bigger by a safety margin */
n_bits = page_dir_get_n_heap(page) + LOCK_PAGE_BITMAP_MARGIN;
n_bytes = 1 + n_bits / 8;
lock = mem_heap_alloc(trx->lock_heap, sizeof(lock_t) + n_bytes);
UT_LIST_ADD_LAST(trx_locks, trx->trx_locks, lock);
lock->trx = trx;
lock->type_mode = (type_mode & ~LOCK_TYPE_MASK) | LOCK_REC;
lock->index = index;
lock->un_member.rec_lock.space = space;
lock->un_member.rec_lock.page_no = page_no;
lock->un_member.rec_lock.n_bits = n_bytes * 8;
/* Reset to zero the bitmap which resides immediately after the
lock struct */
lock_rec_bitmap_reset(lock);
/* Set the bit corresponding to rec */
lock_rec_set_nth_bit(lock, heap_no);
HASH_INSERT(lock_t, hash, lock_sys->rec_hash,
lock_rec_fold(space, page_no), lock);
lock_sys->rec_num++;
if (lock_is_wait_not_by_other(type_mode)) {
lock_set_lock_and_trx_wait(lock, trx);
}
return(lock);
}
/*********************************************************************//**
Enqueues a waiting request for a lock which cannot be granted immediately.
Checks for deadlocks.
@return DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED, or
DB_SUCCESS_LOCKED_REC; DB_SUCCESS_LOCKED_REC means that
there was a deadlock, but another transaction was chosen as a victim,
and we got the lock immediately: no need to wait then */
static
enum db_err
lock_rec_enqueue_waiting(
/*=====================*/
ulint type_mode,/*!< in: lock mode this
transaction is requesting:
LOCK_S or LOCK_X, possibly
ORed with LOCK_GAP or
LOCK_REC_NOT_GAP, ORed with
LOCK_INSERT_INTENTION if this
waiting lock request is set
when performing an insert of
an index record */
const buf_block_t* block, /*!< in: buffer block containing
the record */
ulint heap_no,/*!< in: heap number of the record */
lock_t* lock, /*!< in: lock object; NULL if a new
one should be created. */
dict_index_t* index, /*!< in: index of record */
que_thr_t* thr) /*!< in: query thread */
{
trx_t* trx;
ulint sec;
ulint ms;
ut_ad(mutex_own(&kernel_mutex));
/* Test if there already is some other reason to suspend thread:
we do not enqueue a lock request if the query thread should be
stopped anyway */
if (UNIV_UNLIKELY(que_thr_stop(thr))) {
ut_error;
return(DB_QUE_THR_SUSPENDED);
}
trx = thr_get_trx(thr);
switch (trx_get_dict_operation(trx)) {
case TRX_DICT_OP_NONE:
break;
case TRX_DICT_OP_TABLE:
case TRX_DICT_OP_INDEX:
ut_print_timestamp(stderr);
fputs(" InnoDB: Error: a record lock wait happens"
" in a dictionary operation!\n"
"InnoDB: ", stderr);
dict_index_name_print(stderr, trx, index);
fputs(".\n"
"InnoDB: Submit a detailed bug report"
" to http://bugs.mysql.com\n",
stderr);
ut_ad(0);
}
if (lock == NULL) {
/* Enqueue the lock request that will wait to be granted */
lock = lock_rec_create(type_mode | LOCK_WAIT,
block, heap_no, index, trx);
} else {
ut_ad(lock->type_mode & LOCK_WAIT);
ut_ad(lock->type_mode & LOCK_CONV_BY_OTHER);
lock->type_mode &= ~LOCK_CONV_BY_OTHER;
lock_set_lock_and_trx_wait(lock, trx);
}
/* Check if a deadlock occurs: if yes, remove the lock request and
return an error code */
if (UNIV_UNLIKELY(lock_deadlock_occurs(lock, trx))) {
lock_reset_lock_and_trx_wait(lock);
lock_rec_reset_nth_bit(lock, heap_no);
return(DB_DEADLOCK);
}
/* If there was a deadlock but we chose another transaction as a
victim, it is possible that we already have the lock now granted! */
if (trx->wait_lock == NULL) {
return(DB_SUCCESS_LOCKED_REC);
}
trx->que_state = TRX_QUE_LOCK_WAIT;
trx->was_chosen_as_deadlock_victim = FALSE;
trx->wait_started = time(NULL);
if (UNIV_UNLIKELY(trx->take_stats)) {
ut_usectime(&sec, &ms);
trx->lock_que_wait_ustarted = (ib_uint64_t)sec * 1000000 + ms;
}
ut_a(que_thr_stop(thr));
#ifdef UNIV_DEBUG
if (lock_print_waits) {
fprintf(stderr, "Lock wait for trx " TRX_ID_FMT " in index ",
(ullint) trx->id);
ut_print_name(stderr, trx, FALSE, index->name);
}
#endif /* UNIV_DEBUG */
return(DB_LOCK_WAIT);
}
/*********************************************************************//**
Adds a record lock request in the record queue. The request is normally
added as the last in the queue, but if there are no waiting lock requests
on the record, and the request to be added is not a waiting request, we
can reuse a suitable record lock object already existing on the same page,
just setting the appropriate bit in its bitmap. This is a low-level function
which does NOT check for deadlocks or lock compatibility!
@return lock where the bit was set */
static
lock_t*
lock_rec_add_to_queue(
/*==================*/
ulint type_mode,/*!< in: lock mode, wait, gap
etc. flags; type is ignored
and replaced by LOCK_REC */
const buf_block_t* block, /*!< in: buffer block containing
the record */
ulint heap_no,/*!< in: heap number of the record */
dict_index_t* index, /*!< in: index of record */
trx_t* trx) /*!< in: transaction */
{
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
#ifdef UNIV_DEBUG
switch (type_mode & LOCK_MODE_MASK) {
case LOCK_X:
case LOCK_S:
break;
default:
ut_error;
}
if (!(type_mode & (LOCK_WAIT | LOCK_GAP))) {
enum lock_mode mode = (type_mode & LOCK_MODE_MASK) == LOCK_S
? LOCK_X
: LOCK_S;
lock_t* other_lock
= lock_rec_other_has_expl_req(mode, 0, LOCK_WAIT,
block, heap_no, trx);
ut_a(!other_lock);
}
#endif /* UNIV_DEBUG */
type_mode |= LOCK_REC;
/* If rec is the supremum record, then we can reset the gap bit, as
all locks on the supremum are automatically of the gap type, and we
try to avoid unnecessary memory consumption of a new record lock
struct for a gap type lock */
if (UNIV_UNLIKELY(heap_no == PAGE_HEAP_NO_SUPREMUM)) {
ut_ad(!(type_mode & LOCK_REC_NOT_GAP));
/* There should never be LOCK_REC_NOT_GAP on a supremum
record, but let us play safe */
type_mode = type_mode & ~(LOCK_GAP | LOCK_REC_NOT_GAP);
}
/* Look for a waiting lock request on the same record or on a gap */
lock = lock_rec_get_first_on_page(block);
while (lock != NULL) {
if (lock_get_wait(lock)
&& (lock_rec_get_nth_bit(lock, heap_no))) {
goto somebody_waits;
}
lock = lock_rec_get_next_on_page(lock);
}
if (UNIV_LIKELY(!(type_mode & LOCK_WAIT))) {
/* Look for a similar record lock on the same page:
if one is found and there are no waiting lock requests,
we can just set the bit */
lock = lock_rec_find_similar_on_page(
type_mode, heap_no,
lock_rec_get_first_on_page(block), trx);
if (lock) {
lock_rec_set_nth_bit(lock, heap_no);
return(lock);
}
}
somebody_waits:
return(lock_rec_create(type_mode, block, heap_no, index, trx));
}
/** Record locking request status */
enum lock_rec_req_status {
/** Failed to acquire a lock */
LOCK_REC_FAIL,
/** Succeeded in acquiring a lock (implicit or already acquired) */
LOCK_REC_SUCCESS,
/** Explicitly created a new lock */
LOCK_REC_SUCCESS_CREATED
};
/*********************************************************************//**
This is a fast routine for locking a record in the most common cases:
there are no explicit locks on the page, or there is just one lock, owned
by this transaction, and of the right type_mode. This is a low-level function
which does NOT look at implicit locks! Checks lock compatibility within
explicit locks. This function sets a normal next-key lock, or in the case of
a page supremum record, a gap type lock.
@return whether the locking succeeded */
UNIV_INLINE
enum lock_rec_req_status
lock_rec_lock_fast(
/*===============*/
ibool impl, /*!< in: if TRUE, no lock is set
if no wait is necessary: we
assume that the caller will
set an implicit lock */
ulint mode, /*!< in: lock mode: LOCK_X or
LOCK_S possibly ORed to either
LOCK_GAP or LOCK_REC_NOT_GAP */
const buf_block_t* block, /*!< in: buffer block containing
the record */
ulint heap_no,/*!< in: heap number of record */
dict_index_t* index, /*!< in: index of record */
que_thr_t* thr) /*!< in: query thread */
{
lock_t* lock;
trx_t* trx;
ut_ad(mutex_own(&kernel_mutex));
ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
|| lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
ut_ad((LOCK_MODE_MASK & mode) != LOCK_X
|| lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
ut_ad((LOCK_MODE_MASK & mode) == LOCK_S
|| (LOCK_MODE_MASK & mode) == LOCK_X);
ut_ad(mode - (LOCK_MODE_MASK & mode) == LOCK_GAP
|| mode - (LOCK_MODE_MASK & mode) == 0
|| mode - (LOCK_MODE_MASK & mode) == LOCK_REC_NOT_GAP);
DBUG_EXECUTE_IF("innodb_report_deadlock", return(LOCK_REC_FAIL););
lock = lock_rec_get_first_on_page(block);
trx = thr_get_trx(thr);
if (lock == NULL) {
if (!impl) {
lock_rec_create(mode, block, heap_no, index, trx);
}
return(LOCK_REC_SUCCESS_CREATED);
}
if (lock_rec_get_next_on_page(lock)) {
return(LOCK_REC_FAIL);
}
if (lock->trx != trx
|| lock->type_mode != (mode | LOCK_REC)
|| lock_rec_get_n_bits(lock) <= heap_no) {
return(LOCK_REC_FAIL);
}
if (!impl) {
/* If the nth bit of the record lock is already set then we
do not set a new lock bit, otherwise we do set */
if (!lock_rec_get_nth_bit(lock, heap_no)) {
lock_rec_set_nth_bit(lock, heap_no);
return(LOCK_REC_SUCCESS_CREATED);
}
}
return(LOCK_REC_SUCCESS);
}
/*********************************************************************//**
This is the general, and slower, routine for locking a record. This is a
low-level function which does NOT look at implicit locks! Checks lock
compatibility within explicit locks. This function sets a normal next-key
lock, or in the case of a page supremum record, a gap type lock.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK,
or DB_QUE_THR_SUSPENDED */
static
enum db_err
lock_rec_lock_slow(
/*===============*/
ibool impl, /*!< in: if TRUE, no lock is set
if no wait is necessary: we
assume that the caller will
set an implicit lock */
ulint mode, /*!< in: lock mode: LOCK_X or
LOCK_S possibly ORed to either
LOCK_GAP or LOCK_REC_NOT_GAP */
const buf_block_t* block, /*!< in: buffer block containing
the record */
ulint heap_no,/*!< in: heap number of record */
dict_index_t* index, /*!< in: index of record */
que_thr_t* thr) /*!< in: query thread */
{
trx_t* trx;
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
|| lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
ut_ad((LOCK_MODE_MASK & mode) != LOCK_X
|| lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
ut_ad((LOCK_MODE_MASK & mode) == LOCK_S
|| (LOCK_MODE_MASK & mode) == LOCK_X);
ut_ad(mode - (LOCK_MODE_MASK & mode) == LOCK_GAP
|| mode - (LOCK_MODE_MASK & mode) == 0
|| mode - (LOCK_MODE_MASK & mode) == LOCK_REC_NOT_GAP);
trx = thr_get_trx(thr);
DBUG_EXECUTE_IF("innodb_report_deadlock", return(DB_DEADLOCK););
lock = lock_rec_has_expl(mode, block, heap_no, trx);
if (lock) {
if (lock->type_mode & LOCK_CONV_BY_OTHER) {
/* This lock or lock waiting was created by the other
transaction, not by the transaction (trx) itself.
So, the transaction (trx) should treat it collectly
according as whether granted or not. */
if (lock->type_mode & LOCK_WAIT) {
/* This lock request was not granted yet.
Should wait for granted. */
goto enqueue_waiting;
} else {
/* This lock request was already granted.
Just clearing the flag. */
lock->type_mode &= ~LOCK_CONV_BY_OTHER;
}
}
/* The trx already has a strong enough lock on rec: do
nothing */
} else if (lock_rec_other_has_conflicting(mode, block, heap_no, trx)) {
/* If another transaction has a non-gap conflicting request in
the queue, as this transaction does not have a lock strong
enough already granted on the record, we have to wait. */
ut_ad(lock == NULL);
enqueue_waiting:
return(lock_rec_enqueue_waiting(mode, block, heap_no,
lock, index, thr));
} else if (!impl) {
/* Set the requested lock on the record */
lock_rec_add_to_queue(LOCK_REC | mode, block,
heap_no, index, trx);
return(DB_SUCCESS_LOCKED_REC);
}
return(DB_SUCCESS);
}
/*********************************************************************//**
Tries to lock the specified record in the mode requested. If not immediately
possible, enqueues a waiting lock request. This is a low-level function
which does NOT look at implicit locks! Checks lock compatibility within
explicit locks. This function sets a normal next-key lock, or in the case
of a page supremum record, a gap type lock.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK,
or DB_QUE_THR_SUSPENDED */
static
enum db_err
lock_rec_lock(
/*==========*/
ibool impl, /*!< in: if TRUE, no lock is set
if no wait is necessary: we
assume that the caller will
set an implicit lock */
ulint mode, /*!< in: lock mode: LOCK_X or
LOCK_S possibly ORed to either
LOCK_GAP or LOCK_REC_NOT_GAP */
const buf_block_t* block, /*!< in: buffer block containing
the record */
ulint heap_no,/*!< in: heap number of record */
dict_index_t* index, /*!< in: index of record */
que_thr_t* thr) /*!< in: query thread */
{
ut_ad(mutex_own(&kernel_mutex));
ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
|| lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
ut_ad((LOCK_MODE_MASK & mode) != LOCK_X
|| lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
ut_ad((LOCK_MODE_MASK & mode) == LOCK_S
|| (LOCK_MODE_MASK & mode) == LOCK_X);
ut_ad(mode - (LOCK_MODE_MASK & mode) == LOCK_GAP
|| mode - (LOCK_MODE_MASK & mode) == LOCK_REC_NOT_GAP
|| mode - (LOCK_MODE_MASK & mode) == 0);
/* We try a simplified and faster subroutine for the most
common cases */
switch (lock_rec_lock_fast(impl, mode, block, heap_no, index, thr)) {
case LOCK_REC_SUCCESS:
return(DB_SUCCESS);
case LOCK_REC_SUCCESS_CREATED:
return(DB_SUCCESS_LOCKED_REC);
case LOCK_REC_FAIL:
return(lock_rec_lock_slow(impl, mode, block,
heap_no, index, thr));
}
ut_error;
return(DB_ERROR);
}
/*********************************************************************//**
Checks if a waiting record lock request still has to wait in a queue.
@return TRUE if still has to wait */
static
ibool
lock_rec_has_to_wait_in_queue(
/*==========================*/
lock_t* wait_lock) /*!< in: waiting record lock */
{
lock_t* lock;
ulint space;
ulint page_no;
ulint heap_no;
ut_ad(mutex_own(&kernel_mutex));
ut_ad(lock_get_wait(wait_lock));
ut_ad(lock_get_type_low(wait_lock) == LOCK_REC);
space = wait_lock->un_member.rec_lock.space;
page_no = wait_lock->un_member.rec_lock.page_no;
heap_no = lock_rec_find_set_bit(wait_lock);
lock = lock_rec_get_first_on_page_addr(space, page_no);
while (lock != wait_lock) {
if (lock_rec_get_nth_bit(lock, heap_no)
&& lock_has_to_wait(wait_lock, lock)) {
return(TRUE);
}
lock = lock_rec_get_next_on_page(lock);
}
return(FALSE);
}
/*************************************************************//**
Grants a lock to a waiting lock request and releases the waiting
transaction. */
static
void
lock_grant(
/*=======*/
lock_t* lock) /*!< in/out: waiting lock request */
{
ut_ad(mutex_own(&kernel_mutex));
lock_reset_lock_and_trx_wait(lock);
if (lock_get_mode(lock) == LOCK_AUTO_INC) {
trx_t* trx = lock->trx;
dict_table_t* table = lock->un_member.tab_lock.table;
if (table->autoinc_trx == trx) {
fprintf(stderr,
"InnoDB: Error: trx already had"
" an AUTO-INC lock!\n");
} else {
table->autoinc_trx = trx;
ib_vector_push(trx->autoinc_locks, lock);
}
}
#ifdef UNIV_DEBUG
if (lock_print_waits) {
fprintf(stderr, "Lock wait for trx " TRX_ID_FMT " ends\n",
(ullint) lock->trx->id);
}
#endif /* UNIV_DEBUG */
/* If we are resolving a deadlock by choosing another transaction
as a victim, then our original transaction may not be in the
TRX_QUE_LOCK_WAIT state, and there is no need to end the lock wait
for it */
if (!(lock->type_mode & LOCK_CONV_BY_OTHER)
&& lock->trx->que_state == TRX_QUE_LOCK_WAIT) {
trx_end_lock_wait(lock->trx);
}
}
/*************************************************************//**
Cancels a waiting record lock request and releases the waiting transaction
that requested it. NOTE: does NOT check if waiting lock requests behind this
one can now be granted! */
static
void
lock_rec_cancel(
/*============*/
lock_t* lock) /*!< in: waiting record lock request */
{
ut_ad(mutex_own(&kernel_mutex));
ut_ad(lock_get_type_low(lock) == LOCK_REC);
ut_ad(!(lock->type_mode & LOCK_CONV_BY_OTHER));
/* Reset the bit (there can be only one set bit) in the lock bitmap */
lock_rec_reset_nth_bit(lock, lock_rec_find_set_bit(lock));
/* Reset the wait flag and the back pointer to lock in trx */
lock_reset_lock_and_trx_wait(lock);
/* The following function releases the trx from lock wait */
trx_end_lock_wait(lock->trx);
}
/*************************************************************//**
Removes a record lock request, waiting or granted, from the queue and
grants locks to other transactions in the queue if they now are entitled
to a lock. NOTE: all record locks contained in in_lock are removed. */
static
void
lock_rec_dequeue_from_page(
/*=======================*/
lock_t* in_lock)/*!< in: record lock object: all record locks which
are contained in this lock object are removed;
transactions waiting behind will get their lock
requests granted, if they are now qualified to it */
{
ulint space;
ulint page_no;
lock_t* lock;
trx_t* trx;
ut_ad(mutex_own(&kernel_mutex));
ut_ad(lock_get_type_low(in_lock) == LOCK_REC);
trx = in_lock->trx;
space = in_lock->un_member.rec_lock.space;
page_no = in_lock->un_member.rec_lock.page_no;
HASH_DELETE(lock_t, hash, lock_sys->rec_hash,
lock_rec_fold(space, page_no), in_lock);
lock_sys->rec_num--;
UT_LIST_REMOVE(trx_locks, trx->trx_locks, in_lock);
/* Check if waiting locks in the queue can now be granted: grant
locks if there are no conflicting locks ahead. */
lock = lock_rec_get_first_on_page_addr(space, page_no);
while (lock != NULL) {
if (lock_get_wait(lock)
&& !lock_rec_has_to_wait_in_queue(lock)) {
/* Grant the lock */
lock_grant(lock);
}
lock = lock_rec_get_next_on_page(lock);
}
}
/*************************************************************//**
Removes a record lock request, waiting or granted, from the queue. */
static
void
lock_rec_discard(
/*=============*/
lock_t* in_lock)/*!< in: record lock object: all record locks which
are contained in this lock object are removed */
{
ulint space;
ulint page_no;
trx_t* trx;
ut_ad(mutex_own(&kernel_mutex));
ut_ad(lock_get_type_low(in_lock) == LOCK_REC);
trx = in_lock->trx;
space = in_lock->un_member.rec_lock.space;
page_no = in_lock->un_member.rec_lock.page_no;
HASH_DELETE(lock_t, hash, lock_sys->rec_hash,
lock_rec_fold(space, page_no), in_lock);
lock_sys->rec_num--;
UT_LIST_REMOVE(trx_locks, trx->trx_locks, in_lock);
}
/*************************************************************//**
Removes record lock objects set on an index page which is discarded. This
function does not move locks, or check for waiting locks, therefore the
lock bitmaps must already be reset when this function is called. */
static
void
lock_rec_free_all_from_discard_page(
/*================================*/
const buf_block_t* block) /*!< in: page to be discarded */
{
ulint space;
ulint page_no;
lock_t* lock;
lock_t* next_lock;
ut_ad(mutex_own(&kernel_mutex));
space = buf_block_get_space(block);
page_no = buf_block_get_page_no(block);
lock = lock_rec_get_first_on_page_addr(space, page_no);
while (lock != NULL) {
ut_ad(lock_rec_find_set_bit(lock) == ULINT_UNDEFINED);
ut_ad(!lock_get_wait(lock));
next_lock = lock_rec_get_next_on_page(lock);
lock_rec_discard(lock);
lock = next_lock;
}
}
/*============= RECORD LOCK MOVING AND INHERITING ===================*/
/*************************************************************//**
Resets the lock bits for a single record. Releases transactions waiting for
lock requests here. */
static
void
lock_rec_reset_and_release_wait(
/*============================*/
const buf_block_t* block, /*!< in: buffer block containing
the record */
ulint heap_no)/*!< in: heap number of record */
{
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
lock = lock_rec_get_first(block, heap_no);
while (lock != NULL) {
if (lock_is_wait_not_by_other(lock->type_mode)) {
lock_rec_cancel(lock);
} else if (lock_get_wait(lock)) {
/* just reset LOCK_WAIT */
lock_rec_reset_nth_bit(lock, heap_no);
lock_reset_lock_and_trx_wait(lock);
} else {
lock_rec_reset_nth_bit(lock, heap_no);
}
lock = lock_rec_get_next(heap_no, lock);
}
}
/*************************************************************//**
Makes a record to inherit the locks (except LOCK_INSERT_INTENTION type)
of another record as gap type locks, but does not reset the lock bits of
the other record. Also waiting lock requests on rec are inherited as
GRANTED gap locks. */
static
void
lock_rec_inherit_to_gap(
/*====================*/
const buf_block_t* heir_block, /*!< in: block containing the
record which inherits */
const buf_block_t* block, /*!< in: block containing the
record from which inherited;
does NOT reset the locks on
this record */
ulint heir_heap_no, /*!< in: heap_no of the
inheriting record */
ulint heap_no) /*!< in: heap_no of the
donating record */
{
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
lock = lock_rec_get_first(block, heap_no);
/* If srv_locks_unsafe_for_binlog is TRUE or session is using
READ COMMITTED isolation level, we do not want locks set
by an UPDATE or a DELETE to be inherited as gap type locks. But we
DO want S-locks set by a consistency constraint to be inherited also
then. */
while (lock != NULL) {
if (!lock_rec_get_insert_intention(lock)
&& !((srv_locks_unsafe_for_binlog
|| lock->trx->isolation_level
<= TRX_ISO_READ_COMMITTED)
&& lock_get_mode(lock) == LOCK_X)) {
lock_rec_add_to_queue(LOCK_REC | LOCK_GAP
| lock_get_mode(lock),
heir_block, heir_heap_no,
lock->index, lock->trx);
}
lock = lock_rec_get_next(heap_no, lock);
}
}
/*************************************************************//**
Makes a record to inherit the gap locks (except LOCK_INSERT_INTENTION type)
of another record as gap type locks, but does not reset the lock bits of the
other record. Also waiting lock requests are inherited as GRANTED gap locks. */
static
void
lock_rec_inherit_to_gap_if_gap_lock(
/*================================*/
const buf_block_t* block, /*!< in: buffer block */
ulint heir_heap_no, /*!< in: heap_no of
record which inherits */
ulint heap_no) /*!< in: heap_no of record
from which inherited;
does NOT reset the locks
on this record */
{
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
lock = lock_rec_get_first(block, heap_no);
while (lock != NULL) {
if (!lock_rec_get_insert_intention(lock)
&& (heap_no == PAGE_HEAP_NO_SUPREMUM
|| !lock_rec_get_rec_not_gap(lock))) {
lock_rec_add_to_queue(LOCK_REC | LOCK_GAP
| lock_get_mode(lock),
block, heir_heap_no,
lock->index, lock->trx);
}
lock = lock_rec_get_next(heap_no, lock);
}
}
/*************************************************************//**
Moves the locks of a record to another record and resets the lock bits of
the donating record. */
static
void
lock_rec_move(
/*==========*/
const buf_block_t* receiver, /*!< in: buffer block containing
the receiving record */
const buf_block_t* donator, /*!< in: buffer block containing
the donating record */
ulint receiver_heap_no,/*!< in: heap_no of the record
which gets the locks; there
must be no lock requests
on it! */
ulint donator_heap_no)/*!< in: heap_no of the record
which gives the locks */
{
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
lock = lock_rec_get_first(donator, donator_heap_no);
ut_ad(lock_rec_get_first(receiver, receiver_heap_no) == NULL);
while (lock != NULL) {
const ulint type_mode = lock->type_mode;
lock_rec_reset_nth_bit(lock, donator_heap_no);
if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
lock_reset_lock_and_trx_wait(lock);
}
/* Note that we FIRST reset the bit, and then set the lock:
the function works also if donator == receiver */
lock_rec_add_to_queue(type_mode, receiver, receiver_heap_no,
lock->index, lock->trx);
lock = lock_rec_get_next(donator_heap_no, lock);
}
ut_ad(lock_rec_get_first(donator, donator_heap_no) == NULL);
}
/*************************************************************//**
Updates the lock table when we have reorganized a page. NOTE: we copy
also the locks set on the infimum of the page; the infimum may carry
locks if an update of a record is occurring on the page, and its locks
were temporarily stored on the infimum. */
UNIV_INTERN
void
lock_move_reorganize_page(
/*======================*/
const buf_block_t* block, /*!< in: old index page, now
reorganized */
const buf_block_t* oblock) /*!< in: copy of the old, not
reorganized page */
{
lock_t* lock;
UT_LIST_BASE_NODE_T(lock_t) old_locks;
mem_heap_t* heap = NULL;
ulint comp;
lock_mutex_enter_kernel();
lock = lock_rec_get_first_on_page(block);
if (lock == NULL) {
lock_mutex_exit_kernel();
return;
}
heap = mem_heap_create(256);
/* Copy first all the locks on the page to heap and reset the
bitmaps in the original locks; chain the copies of the locks
using the trx_locks field in them. */
UT_LIST_INIT(old_locks);
do {
/* Make a copy of the lock */
lock_t* old_lock = lock_rec_copy(lock, heap);
UT_LIST_ADD_LAST(trx_locks, old_locks, old_lock);
/* Reset bitmap of lock */
lock_rec_bitmap_reset(lock);
if (lock_get_wait(lock)) {
lock_reset_lock_and_trx_wait(lock);
}
lock = lock_rec_get_next_on_page(lock);
} while (lock != NULL);
comp = page_is_comp(block->frame);
ut_ad(comp == page_is_comp(oblock->frame));
for (lock = UT_LIST_GET_FIRST(old_locks); lock;
lock = UT_LIST_GET_NEXT(trx_locks, lock)) {
/* NOTE: we copy also the locks set on the infimum and
supremum of the page; the infimum may carry locks if an
update of a record is occurring on the page, and its locks
were temporarily stored on the infimum */
page_cur_t cur1;
page_cur_t cur2;
page_cur_set_before_first(block, &cur1);
page_cur_set_before_first(oblock, &cur2);
/* Set locks according to old locks */
for (;;) {
ulint old_heap_no;
ulint new_heap_no;
ut_ad(comp || !memcmp(page_cur_get_rec(&cur1),
page_cur_get_rec(&cur2),
rec_get_data_size_old(
page_cur_get_rec(
&cur2))));
if (UNIV_LIKELY(comp)) {
old_heap_no = rec_get_heap_no_new(
page_cur_get_rec(&cur2));
new_heap_no = rec_get_heap_no_new(
page_cur_get_rec(&cur1));
} else {
old_heap_no = rec_get_heap_no_old(
page_cur_get_rec(&cur2));
new_heap_no = rec_get_heap_no_old(
page_cur_get_rec(&cur1));
}
if (lock_rec_get_nth_bit(lock, old_heap_no)) {
/* Clear the bit in old_lock. */
ut_d(lock_rec_reset_nth_bit(lock,
old_heap_no));
/* NOTE that the old lock bitmap could be too
small for the new heap number! */
lock_rec_add_to_queue(lock->type_mode, block,
new_heap_no,
lock->index, lock->trx);
/* if (new_heap_no == PAGE_HEAP_NO_SUPREMUM
&& lock_get_wait(lock)) {
fprintf(stderr,
"---\n--\n!!!Lock reorg: supr type %lu\n",
lock->type_mode);
} */
}
if (UNIV_UNLIKELY
(new_heap_no == PAGE_HEAP_NO_SUPREMUM)) {
ut_ad(old_heap_no == PAGE_HEAP_NO_SUPREMUM);
break;
}
page_cur_move_to_next(&cur1);
page_cur_move_to_next(&cur2);
}
#ifdef UNIV_DEBUG
{
ulint i = lock_rec_find_set_bit(lock);
/* Check that all locks were moved. */
if (UNIV_UNLIKELY(i != ULINT_UNDEFINED)) {
fprintf(stderr,
"lock_move_reorganize_page():"
" %lu not moved in %p\n",
(ulong) i, (void*) lock);
ut_error;
}
}
#endif /* UNIV_DEBUG */
}
lock_mutex_exit_kernel();
mem_heap_free(heap);
#ifdef UNIV_DEBUG_LOCK_VALIDATE
ut_ad(lock_rec_validate_page(block));
#endif
}
/*************************************************************//**
Moves the explicit locks on user records to another page if a record
list end is moved to another page. */
UNIV_INTERN
void
lock_move_rec_list_end(
/*===================*/
const buf_block_t* new_block, /*!< in: index page to move to */
const buf_block_t* block, /*!< in: index page */
const rec_t* rec) /*!< in: record on page: this
is the first record moved */
{
lock_t* lock;
const ulint comp = page_rec_is_comp(rec);
lock_mutex_enter_kernel();
/* Note: when we move locks from record to record, waiting locks
and possible granted gap type locks behind them are enqueued in
the original order, because new elements are inserted to a hash
table to the end of the hash chain, and lock_rec_add_to_queue
does not reuse locks if there are waiters in the queue. */
for (lock = lock_rec_get_first_on_page(block); lock;
lock = lock_rec_get_next_on_page(lock)) {
page_cur_t cur1;
page_cur_t cur2;
const ulint type_mode = lock->type_mode;
page_cur_position(rec, block, &cur1);
if (page_cur_is_before_first(&cur1)) {
page_cur_move_to_next(&cur1);
}
page_cur_set_before_first(new_block, &cur2);
page_cur_move_to_next(&cur2);
/* Copy lock requests on user records to new page and
reset the lock bits on the old */
while (!page_cur_is_after_last(&cur1)) {
ulint heap_no;
if (comp) {
heap_no = rec_get_heap_no_new(
page_cur_get_rec(&cur1));
} else {
heap_no = rec_get_heap_no_old(
page_cur_get_rec(&cur1));
ut_ad(!memcmp(page_cur_get_rec(&cur1),
page_cur_get_rec(&cur2),
rec_get_data_size_old(
page_cur_get_rec(&cur2))));
}
if (lock_rec_get_nth_bit(lock, heap_no)) {
lock_rec_reset_nth_bit(lock, heap_no);
if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
lock_reset_lock_and_trx_wait(lock);
}
if (comp) {
heap_no = rec_get_heap_no_new(
page_cur_get_rec(&cur2));
} else {
heap_no = rec_get_heap_no_old(
page_cur_get_rec(&cur2));
}
lock_rec_add_to_queue(type_mode,
new_block, heap_no,
lock->index, lock->trx);
}
page_cur_move_to_next(&cur1);
page_cur_move_to_next(&cur2);
}
}
lock_mutex_exit_kernel();
#ifdef UNIV_DEBUG_LOCK_VALIDATE
ut_ad(lock_rec_validate_page(block));
ut_ad(lock_rec_validate_page(new_block));
#endif
}
/*************************************************************//**
Moves the explicit locks on user records to another page if a record
list start is moved to another page. */
UNIV_INTERN
void
lock_move_rec_list_start(
/*=====================*/
const buf_block_t* new_block, /*!< in: index page to move to */
const buf_block_t* block, /*!< in: index page */
const rec_t* rec, /*!< in: record on page:
this is the first
record NOT copied */
const rec_t* old_end) /*!< in: old
previous-to-last
record on new_page
before the records
were copied */
{
lock_t* lock;
const ulint comp = page_rec_is_comp(rec);
ut_ad(block->frame == page_align(rec));
ut_ad(new_block->frame == page_align(old_end));
lock_mutex_enter_kernel();
for (lock = lock_rec_get_first_on_page(block); lock;
lock = lock_rec_get_next_on_page(lock)) {
page_cur_t cur1;
page_cur_t cur2;
const ulint type_mode = lock->type_mode;
page_cur_set_before_first(block, &cur1);
page_cur_move_to_next(&cur1);
page_cur_position(old_end, new_block, &cur2);
page_cur_move_to_next(&cur2);
/* Copy lock requests on user records to new page and
reset the lock bits on the old */
while (page_cur_get_rec(&cur1) != rec) {
ulint heap_no;
if (comp) {
heap_no = rec_get_heap_no_new(
page_cur_get_rec(&cur1));
} else {
heap_no = rec_get_heap_no_old(
page_cur_get_rec(&cur1));
ut_ad(!memcmp(page_cur_get_rec(&cur1),
page_cur_get_rec(&cur2),
rec_get_data_size_old(
page_cur_get_rec(
&cur2))));
}
if (lock_rec_get_nth_bit(lock, heap_no)) {
lock_rec_reset_nth_bit(lock, heap_no);
if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
lock_reset_lock_and_trx_wait(lock);
}
if (comp) {
heap_no = rec_get_heap_no_new(
page_cur_get_rec(&cur2));
} else {
heap_no = rec_get_heap_no_old(
page_cur_get_rec(&cur2));
}
lock_rec_add_to_queue(type_mode,
new_block, heap_no,
lock->index, lock->trx);
}
page_cur_move_to_next(&cur1);
page_cur_move_to_next(&cur2);
}
#ifdef UNIV_DEBUG
if (page_rec_is_supremum(rec)) {
ulint i;
for (i = PAGE_HEAP_NO_USER_LOW;
i < lock_rec_get_n_bits(lock); i++) {
if (UNIV_UNLIKELY
(lock_rec_get_nth_bit(lock, i))) {
fprintf(stderr,
"lock_move_rec_list_start():"
" %lu not moved in %p\n",
(ulong) i, (void*) lock);
ut_error;
}
}
}
#endif /* UNIV_DEBUG */
}
lock_mutex_exit_kernel();
#ifdef UNIV_DEBUG_LOCK_VALIDATE
ut_ad(lock_rec_validate_page(block));
#endif
}
/*************************************************************//**
Updates the lock table when a page is split to the right. */
UNIV_INTERN
void
lock_update_split_right(
/*====================*/
const buf_block_t* right_block, /*!< in: right page */
const buf_block_t* left_block) /*!< in: left page */
{
ulint heap_no = lock_get_min_heap_no(right_block);
lock_mutex_enter_kernel();
/* Move the locks on the supremum of the left page to the supremum
of the right page */
lock_rec_move(right_block, left_block,
PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
/* Inherit the locks to the supremum of left page from the successor
of the infimum on right page */
lock_rec_inherit_to_gap(left_block, right_block,
PAGE_HEAP_NO_SUPREMUM, heap_no);
lock_mutex_exit_kernel();
}
/*************************************************************//**
Updates the lock table when a page is merged to the right. */
UNIV_INTERN
void
lock_update_merge_right(
/*====================*/
const buf_block_t* right_block, /*!< in: right page to
which merged */
const rec_t* orig_succ, /*!< in: original
successor of infimum
on the right page
before merge */
const buf_block_t* left_block) /*!< in: merged index
page which will be
discarded */
{
lock_mutex_enter_kernel();
/* Inherit the locks from the supremum of the left page to the
original successor of infimum on the right page, to which the left
page was merged */
lock_rec_inherit_to_gap(right_block, left_block,
page_rec_get_heap_no(orig_succ),
PAGE_HEAP_NO_SUPREMUM);
/* Reset the locks on the supremum of the left page, releasing
waiting transactions */
lock_rec_reset_and_release_wait(left_block,
PAGE_HEAP_NO_SUPREMUM);
lock_rec_free_all_from_discard_page(left_block);
lock_mutex_exit_kernel();
}
/*************************************************************//**
Updates the lock table when the root page is copied to another in
btr_root_raise_and_insert. Note that we leave lock structs on the
root page, even though they do not make sense on other than leaf
pages: the reason is that in a pessimistic update the infimum record
of the root page will act as a dummy carrier of the locks of the record
to be updated. */
UNIV_INTERN
void
lock_update_root_raise(
/*===================*/
const buf_block_t* block, /*!< in: index page to which copied */
const buf_block_t* root) /*!< in: root page */
{
lock_mutex_enter_kernel();
/* Move the locks on the supremum of the root to the supremum
of block */
lock_rec_move(block, root,
PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
lock_mutex_exit_kernel();
}
/*************************************************************//**
Updates the lock table when a page is copied to another and the original page
is removed from the chain of leaf pages, except if page is the root! */
UNIV_INTERN
void
lock_update_copy_and_discard(
/*=========================*/
const buf_block_t* new_block, /*!< in: index page to
which copied */
const buf_block_t* block) /*!< in: index page;
NOT the root! */
{
lock_mutex_enter_kernel();
/* Move the locks on the supremum of the old page to the supremum
of new_page */
lock_rec_move(new_block, block,
PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
lock_rec_free_all_from_discard_page(block);
lock_mutex_exit_kernel();
}
/*************************************************************//**
Updates the lock table when a page is split to the left. */
UNIV_INTERN
void
lock_update_split_left(
/*===================*/
const buf_block_t* right_block, /*!< in: right page */
const buf_block_t* left_block) /*!< in: left page */
{
ulint heap_no = lock_get_min_heap_no(right_block);
lock_mutex_enter_kernel();
/* Inherit the locks to the supremum of the left page from the
successor of the infimum on the right page */
lock_rec_inherit_to_gap(left_block, right_block,
PAGE_HEAP_NO_SUPREMUM, heap_no);
lock_mutex_exit_kernel();
}
/*************************************************************//**
Updates the lock table when a page is merged to the left. */
UNIV_INTERN
void
lock_update_merge_left(
/*===================*/
const buf_block_t* left_block, /*!< in: left page to
which merged */
const rec_t* orig_pred, /*!< in: original predecessor
of supremum on the left page
before merge */
const buf_block_t* right_block) /*!< in: merged index page
which will be discarded */
{
const rec_t* left_next_rec;
ut_ad(left_block->frame == page_align(orig_pred));
lock_mutex_enter_kernel();
left_next_rec = page_rec_get_next_const(orig_pred);
if (!page_rec_is_supremum(left_next_rec)) {
/* Inherit the locks on the supremum of the left page to the
first record which was moved from the right page */
lock_rec_inherit_to_gap(left_block, left_block,
page_rec_get_heap_no(left_next_rec),
PAGE_HEAP_NO_SUPREMUM);
/* Reset the locks on the supremum of the left page,
releasing waiting transactions */
lock_rec_reset_and_release_wait(left_block,
PAGE_HEAP_NO_SUPREMUM);
}
/* Move the locks from the supremum of right page to the supremum
of the left page */
lock_rec_move(left_block, right_block,
PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
lock_rec_free_all_from_discard_page(right_block);
lock_mutex_exit_kernel();
}
/*************************************************************//**
Resets the original locks on heir and replaces them with gap type locks
inherited from rec. */
UNIV_INTERN
void
lock_rec_reset_and_inherit_gap_locks(
/*=================================*/
const buf_block_t* heir_block, /*!< in: block containing the
record which inherits */
const buf_block_t* block, /*!< in: block containing the
record from which inherited;
does NOT reset the locks on
this record */
ulint heir_heap_no, /*!< in: heap_no of the
inheriting record */
ulint heap_no) /*!< in: heap_no of the
donating record */
{
mutex_enter(&kernel_mutex);
lock_rec_reset_and_release_wait(heir_block, heir_heap_no);
lock_rec_inherit_to_gap(heir_block, block, heir_heap_no, heap_no);
mutex_exit(&kernel_mutex);
}
/*************************************************************//**
Updates the lock table when a page is discarded. */
UNIV_INTERN
void
lock_update_discard(
/*================*/
const buf_block_t* heir_block, /*!< in: index page
which will inherit the locks */
ulint heir_heap_no, /*!< in: heap_no of the record
which will inherit the locks */
const buf_block_t* block) /*!< in: index page
which will be discarded */
{
const page_t* page = block->frame;
const rec_t* rec;
ulint heap_no;
lock_mutex_enter_kernel();
if (!lock_rec_get_first_on_page(block)) {
/* No locks exist on page, nothing to do */
lock_mutex_exit_kernel();
return;
}
/* Inherit all the locks on the page to the record and reset all
the locks on the page */
if (page_is_comp(page)) {
rec = page + PAGE_NEW_INFIMUM;
do {
heap_no = rec_get_heap_no_new(rec);
lock_rec_inherit_to_gap(heir_block, block,
heir_heap_no, heap_no);
lock_rec_reset_and_release_wait(block, heap_no);
rec = page + rec_get_next_offs(rec, TRUE);
} while (heap_no != PAGE_HEAP_NO_SUPREMUM);
} else {
rec = page + PAGE_OLD_INFIMUM;
do {
heap_no = rec_get_heap_no_old(rec);
lock_rec_inherit_to_gap(heir_block, block,
heir_heap_no, heap_no);
lock_rec_reset_and_release_wait(block, heap_no);
rec = page + rec_get_next_offs(rec, FALSE);
} while (heap_no != PAGE_HEAP_NO_SUPREMUM);
}
lock_rec_free_all_from_discard_page(block);
lock_mutex_exit_kernel();
}
/*************************************************************//**
Updates the lock table when a new user record is inserted. */
UNIV_INTERN
void
lock_update_insert(
/*===============*/
const buf_block_t* block, /*!< in: buffer block containing rec */
const rec_t* rec) /*!< in: the inserted record */
{
ulint receiver_heap_no;
ulint donator_heap_no;
ut_ad(block->frame == page_align(rec));
/* Inherit the gap-locking locks for rec, in gap mode, from the next
record */
if (page_rec_is_comp(rec)) {
receiver_heap_no = rec_get_heap_no_new(rec);
donator_heap_no = rec_get_heap_no_new(
page_rec_get_next_low(rec, TRUE));
} else {
receiver_heap_no = rec_get_heap_no_old(rec);
donator_heap_no = rec_get_heap_no_old(
page_rec_get_next_low(rec, FALSE));
}
lock_mutex_enter_kernel();
lock_rec_inherit_to_gap_if_gap_lock(block,
receiver_heap_no, donator_heap_no);
lock_mutex_exit_kernel();
}
/*************************************************************//**
Updates the lock table when a record is removed. */
UNIV_INTERN
void
lock_update_delete(
/*===============*/
const buf_block_t* block, /*!< in: buffer block containing rec */
const rec_t* rec) /*!< in: the record to be removed */
{
const page_t* page = block->frame;
ulint heap_no;
ulint next_heap_no;
ut_ad(page == page_align(rec));
if (page_is_comp(page)) {
heap_no = rec_get_heap_no_new(rec);
next_heap_no = rec_get_heap_no_new(page
+ rec_get_next_offs(rec,
TRUE));
} else {
heap_no = rec_get_heap_no_old(rec);
next_heap_no = rec_get_heap_no_old(page
+ rec_get_next_offs(rec,
FALSE));
}
lock_mutex_enter_kernel();
/* Let the next record inherit the locks from rec, in gap mode */
lock_rec_inherit_to_gap(block, block, next_heap_no, heap_no);
/* Reset the lock bits on rec and release waiting transactions */
lock_rec_reset_and_release_wait(block, heap_no);
lock_mutex_exit_kernel();
}
/*********************************************************************//**
Stores on the page infimum record the explicit locks of another record.
This function is used to store the lock state of a record when it is
updated and the size of the record changes in the update. The record
is moved in such an update, perhaps to another page. The infimum record
acts as a dummy carrier record, taking care of lock releases while the
actual record is being moved. */
UNIV_INTERN
void
lock_rec_store_on_page_infimum(
/*===========================*/
const buf_block_t* block, /*!< in: buffer block containing rec */
const rec_t* rec) /*!< in: record whose lock state
is stored on the infimum
record of the same page; lock
bits are reset on the
record */
{
ulint heap_no = page_rec_get_heap_no(rec);
ut_ad(block->frame == page_align(rec));
lock_mutex_enter_kernel();
lock_rec_move(block, block, PAGE_HEAP_NO_INFIMUM, heap_no);
lock_mutex_exit_kernel();
}
/*********************************************************************//**
Restores the state of explicit lock requests on a single record, where the
state was stored on the infimum of the page. */
UNIV_INTERN
void
lock_rec_restore_from_page_infimum(
/*===============================*/
const buf_block_t* block, /*!< in: buffer block containing rec */
const rec_t* rec, /*!< in: record whose lock state
is restored */
const buf_block_t* donator)/*!< in: page (rec is not
necessarily on this page)
whose infimum stored the lock
state; lock bits are reset on
the infimum */
{
ulint heap_no = page_rec_get_heap_no(rec);
lock_mutex_enter_kernel();
lock_rec_move(block, donator, heap_no, PAGE_HEAP_NO_INFIMUM);
lock_mutex_exit_kernel();
}
/*=========== DEADLOCK CHECKING ======================================*/
/*********************************************************************//**
rewind(3) the file used for storing the latest detected deadlock and
print a heading message to stderr if printing of all deadlocks to stderr
is enabled. */
UNIV_INLINE
void
lock_deadlock_start_print()
/*=======================*/
{
rewind(lock_latest_err_file);
ut_print_timestamp(lock_latest_err_file);
if (srv_print_all_deadlocks) {
fprintf(stderr, "InnoDB: transactions deadlock detected, "
"dumping detailed information.\n");
ut_print_timestamp(stderr);
}
}
/*********************************************************************//**
Print a message to the deadlock file and possibly to stderr. */
UNIV_INLINE
void
lock_deadlock_fputs(
/*================*/
const char* msg) /*!< in: message to print */
{
fputs(msg, lock_latest_err_file);
if (srv_print_all_deadlocks) {
fputs(msg, stderr);
}
}
/*********************************************************************//**
Print transaction data to the deadlock file and possibly to stderr. */
UNIV_INLINE
void
lock_deadlock_trx_print(
/*====================*/
trx_t* trx, /*!< in: transaction */
ulint max_query_len) /*!< in: max query length to print, or 0 to
use the default max length */
{
trx_print(lock_latest_err_file, trx, max_query_len);
if (srv_print_all_deadlocks) {
trx_print(stderr, trx, max_query_len);
}
}
/*********************************************************************//**
Print lock data to the deadlock file and possibly to stderr. */
UNIV_INLINE
void
lock_deadlock_lock_print(
/*=====================*/
const lock_t* lock) /*!< in: record or table type lock */
{
if (lock_get_type_low(lock) == LOCK_REC) {
lock_rec_print(lock_latest_err_file, lock);
if (srv_print_all_deadlocks) {
lock_rec_print(stderr, lock);
}
} else {
lock_table_print(lock_latest_err_file, lock);
if (srv_print_all_deadlocks) {
lock_table_print(stderr, lock);
}
}
}
/********************************************************************//**
Checks if a lock request results in a deadlock.
@return TRUE if a deadlock was detected and we chose trx as a victim;
FALSE if no deadlock, or there was a deadlock, but we chose other
transaction(s) as victim(s) */
static
ibool
lock_deadlock_occurs(
/*=================*/
lock_t* lock, /*!< in: lock the transaction is requesting */
trx_t* trx) /*!< in: transaction */
{
trx_t* mark_trx;
ulint ret;
ulint cost = 0;
ut_ad(trx);
ut_ad(lock);
ut_ad(mutex_own(&kernel_mutex));
retry:
/* We check that adding this trx to the waits-for graph
does not produce a cycle. First mark all active transactions
with 0: */
mark_trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
while (mark_trx) {
mark_trx->deadlock_mark = 0;
mark_trx = UT_LIST_GET_NEXT(trx_list, mark_trx);
}
ret = lock_deadlock_recursive(trx, trx, lock, &cost, 0);
switch (ret) {
case LOCK_VICTIM_IS_OTHER:
/* We chose some other trx as a victim: retry if there still
is a deadlock */
goto retry;
case LOCK_EXCEED_MAX_DEPTH:
/* If the lock search exceeds the max step
or the max depth, the current trx will be
the victim. Print its information. */
lock_deadlock_start_print();
lock_deadlock_fputs(
"TOO DEEP OR LONG SEARCH IN THE LOCK TABLE"
" WAITS-FOR GRAPH, WE WILL ROLL BACK"
" FOLLOWING TRANSACTION \n\n"
"*** TRANSACTION:\n");
lock_deadlock_trx_print(trx, 3000);
lock_deadlock_fputs(
"*** WAITING FOR THIS LOCK TO BE GRANTED:\n");
lock_deadlock_lock_print(lock);
break;
case LOCK_VICTIM_IS_START:
srv_n_lock_deadlock_count++;
lock_deadlock_fputs("*** WE ROLL BACK TRANSACTION (2)\n");
break;
default:
/* No deadlock detected*/
return(FALSE);
}
lock_deadlock_found = TRUE;
return(TRUE);
}
/********************************************************************//**
Looks recursively for a deadlock.
@return 0 if no deadlock found, LOCK_VICTIM_IS_START if there was a
deadlock and we chose 'start' as the victim, LOCK_VICTIM_IS_OTHER if a
deadlock was found and we chose some other trx as a victim: we must do
the search again in this last case because there may be another
deadlock!
LOCK_EXCEED_MAX_DEPTH if the lock search exceeds max steps or max depth. */
static
ulint
lock_deadlock_recursive(
/*====================*/
trx_t* start, /*!< in: recursion starting point */
trx_t* trx, /*!< in: a transaction waiting for a lock */
lock_t* wait_lock, /*!< in: lock that is waiting to be granted */
ulint* cost, /*!< in/out: number of calculation steps thus
far: if this exceeds LOCK_MAX_N_STEPS_...
we return LOCK_EXCEED_MAX_DEPTH */
ulint depth) /*!< in: recursion depth: if this exceeds
LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK, we
return LOCK_EXCEED_MAX_DEPTH */
{
ulint ret;
lock_t* lock;
trx_t* lock_trx;
ulint heap_no = ULINT_UNDEFINED;
ut_a(trx);
ut_a(start);
ut_a(wait_lock);
ut_ad(mutex_own(&kernel_mutex));
if (trx->deadlock_mark == 1) {
/* We have already exhaustively searched the subtree starting
from this trx */
return(0);
}
*cost = *cost + 1;
if (lock_get_type_low(wait_lock) == LOCK_REC) {
ulint space;
ulint page_no;
heap_no = lock_rec_find_set_bit(wait_lock);
ut_a(heap_no != ULINT_UNDEFINED);
space = wait_lock->un_member.rec_lock.space;
page_no = wait_lock->un_member.rec_lock.page_no;
lock = lock_rec_get_first_on_page_addr(space, page_no);
/* Position the iterator on the first matching record lock. */
while (lock != NULL
&& lock != wait_lock
&& !lock_rec_get_nth_bit(lock, heap_no)) {
lock = lock_rec_get_next_on_page(lock);
}
if (lock == wait_lock) {
lock = NULL;
}
ut_ad(lock == NULL || lock_rec_get_nth_bit(lock, heap_no));
} else {
lock = wait_lock;
}
/* Look at the locks ahead of wait_lock in the lock queue */
for (;;) {
/* Get previous table lock. */
if (heap_no == ULINT_UNDEFINED) {
lock = UT_LIST_GET_PREV(
un_member.tab_lock.locks, lock);
}
if (lock == NULL) {
/* We can mark this subtree as searched */
trx->deadlock_mark = 1;
return(FALSE);
}
if (lock_has_to_wait(wait_lock, lock)) {
ibool too_far
= depth > LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK
|| *cost > LOCK_MAX_N_STEPS_IN_DEADLOCK_CHECK;
lock_trx = lock->trx;
if (lock_trx == start) {
/* We came back to the recursion starting
point: a deadlock detected; or we have
searched the waits-for graph too long */
lock_deadlock_start_print();
lock_deadlock_fputs("\n*** (1) TRANSACTION:\n");
lock_deadlock_trx_print(wait_lock->trx, 3000);
lock_deadlock_fputs(
"*** (1) WAITING FOR THIS LOCK"
" TO BE GRANTED:\n");
lock_deadlock_lock_print(wait_lock);
lock_deadlock_fputs("*** (2) TRANSACTION:\n");
lock_deadlock_trx_print(lock->trx, 3000);
lock_deadlock_fputs(
"*** (2) HOLDS THE LOCK(S):\n");
lock_deadlock_lock_print(lock);
lock_deadlock_fputs(
"*** (2) WAITING FOR THIS LOCK"
" TO BE GRANTED:\n");
lock_deadlock_lock_print(start->wait_lock);
#ifdef UNIV_DEBUG
if (lock_print_waits) {
fputs("Deadlock detected\n",
stderr);
}
#endif /* UNIV_DEBUG */
if (trx_weight_ge(wait_lock->trx, start)) {
/* Our recursion starting point
transaction is 'smaller', let us
choose 'start' as the victim and roll
back it */
return(LOCK_VICTIM_IS_START);
}
lock_deadlock_found = TRUE;
/* Let us choose the transaction of wait_lock
as a victim to try to avoid deadlocking our
recursion starting point transaction */
lock_deadlock_fputs(
"*** WE ROLL BACK TRANSACTION (1)\n");
wait_lock->trx->was_chosen_as_deadlock_victim
= TRUE;
lock_cancel_waiting_and_release(wait_lock);
/* Since trx and wait_lock are no longer
in the waits-for graph, we can return FALSE;
note that our selective algorithm can choose
several transactions as victims, but still
we may end up rolling back also the recursion
starting point transaction! */
return(LOCK_VICTIM_IS_OTHER);
}
if (too_far) {
#ifdef UNIV_DEBUG
if (lock_print_waits) {
fputs("Deadlock search exceeds"
" max steps or depth.\n",
stderr);
}
#endif /* UNIV_DEBUG */
/* The information about transaction/lock
to be rolled back is available in the top
level. Do not print anything here. */
return(LOCK_EXCEED_MAX_DEPTH);
}
if (lock_trx->que_state == TRX_QUE_LOCK_WAIT) {
/* Another trx ahead has requested lock in an
incompatible mode, and is itself waiting for
a lock */
ret = lock_deadlock_recursive(
start, lock_trx,
lock_trx->wait_lock, cost, depth + 1);
if (ret != 0) {
return(ret);
}
}
}
/* Get the next record lock to check. */
if (heap_no != ULINT_UNDEFINED) {
ut_a(lock != NULL);
do {
lock = lock_rec_get_next_on_page(lock);
} while (lock != NULL
&& lock != wait_lock
&& !lock_rec_get_nth_bit(lock, heap_no));
if (lock == wait_lock) {
lock = NULL;
}
}
}/* end of the 'for (;;)'-loop */
}
/*========================= TABLE LOCKS ==============================*/
/*********************************************************************//**
Creates a table lock object and adds it as the last in the lock queue
of the table. Does NOT check for deadlocks or lock compatibility.
@return own: new lock object */
UNIV_INLINE
lock_t*
lock_table_create(
/*==============*/
dict_table_t* table, /*!< in: database table in dictionary cache */
ulint type_mode,/*!< in: lock mode possibly ORed with
LOCK_WAIT */
trx_t* trx) /*!< in: trx */
{
lock_t* lock;
ut_ad(table && trx);
ut_ad(mutex_own(&kernel_mutex));
ut_ad(!(type_mode & LOCK_CONV_BY_OTHER));
if ((type_mode & LOCK_MODE_MASK) == LOCK_AUTO_INC) {
++table->n_waiting_or_granted_auto_inc_locks;
}
/* For AUTOINC locking we reuse the lock instance only if
there is no wait involved else we allocate the waiting lock
from the transaction lock heap. */
if (type_mode == LOCK_AUTO_INC) {
lock = table->autoinc_lock;
table->autoinc_trx = trx;
ib_vector_push(trx->autoinc_locks, lock);
} else {
lock = mem_heap_alloc(trx->lock_heap, sizeof(lock_t));
}
UT_LIST_ADD_LAST(trx_locks, trx->trx_locks, lock);
lock->type_mode = type_mode | LOCK_TABLE;
lock->trx = trx;
lock->un_member.tab_lock.table = table;
UT_LIST_ADD_LAST(un_member.tab_lock.locks, table->locks, lock);
if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) {
lock_set_lock_and_trx_wait(lock, trx);
}
return(lock);
}
/*************************************************************//**
Pops autoinc lock requests from the transaction's autoinc_locks. We
handle the case where there are gaps in the array and they need to
be popped off the stack. */
UNIV_INLINE
void
lock_table_pop_autoinc_locks(
/*=========================*/
trx_t* trx) /*!< in/out: transaction that owns the AUTOINC locks */
{
ut_ad(mutex_own(&kernel_mutex));
ut_ad(!ib_vector_is_empty(trx->autoinc_locks));
/* Skip any gaps, gaps are NULL lock entries in the
trx->autoinc_locks vector. */
do {
ib_vector_pop(trx->autoinc_locks);
if (ib_vector_is_empty(trx->autoinc_locks)) {
return;
}
} while (ib_vector_get_last(trx->autoinc_locks) == NULL);
}
/*************************************************************//**
Removes an autoinc lock request from the transaction's autoinc_locks. */
UNIV_INLINE
void
lock_table_remove_autoinc_lock(
/*===========================*/
lock_t* lock, /*!< in: table lock */
trx_t* trx) /*!< in/out: transaction that owns the lock */
{
lock_t* autoinc_lock;
lint i = ib_vector_size(trx->autoinc_locks) - 1;
ut_ad(mutex_own(&kernel_mutex));
ut_ad(lock_get_mode(lock) == LOCK_AUTO_INC);
ut_ad(lock_get_type_low(lock) & LOCK_TABLE);
ut_ad(!ib_vector_is_empty(trx->autoinc_locks));
/* With stored functions and procedures the user may drop
a table within the same "statement". This special case has
to be handled by deleting only those AUTOINC locks that were
held by the table being dropped. */
autoinc_lock = ib_vector_get(trx->autoinc_locks, i);
/* This is the default fast case. */
if (autoinc_lock == lock) {
lock_table_pop_autoinc_locks(trx);
} else {
/* The last element should never be NULL */
ut_a(autoinc_lock != NULL);
/* Handle freeing the locks from within the stack. */
while (--i >= 0) {
autoinc_lock = ib_vector_get(trx->autoinc_locks, i);
if (UNIV_LIKELY(autoinc_lock == lock)) {
ib_vector_set(trx->autoinc_locks, i, NULL);
return;
}
}
/* Must find the autoinc lock. */
ut_error;
}
}
/*************************************************************//**
Removes a table lock request from the queue and the trx list of locks;
this is a low-level function which does NOT check if waiting requests
can now be granted. */
UNIV_INLINE
void
lock_table_remove_low(
/*==================*/
lock_t* lock) /*!< in: table lock */
{
trx_t* trx;
dict_table_t* table;
ut_ad(mutex_own(&kernel_mutex));
trx = lock->trx;
table = lock->un_member.tab_lock.table;
/* Remove the table from the transaction's AUTOINC vector, if
the lock that is being release is an AUTOINC lock. */
if (lock_get_mode(lock) == LOCK_AUTO_INC) {
/* The table's AUTOINC lock can get transferred to
another transaction before we get here. */
if (table->autoinc_trx == trx) {
table->autoinc_trx = NULL;
}
/* The locks must be freed in the reverse order from
the one in which they were acquired. This is to avoid
traversing the AUTOINC lock vector unnecessarily.
We only store locks that were granted in the
trx->autoinc_locks vector (see lock_table_create()
and lock_grant()). Therefore it can be empty and we
need to check for that. */
if (!lock_get_wait(lock)
&& !ib_vector_is_empty(trx->autoinc_locks)) {
lock_table_remove_autoinc_lock(lock, trx);
}
ut_a(table->n_waiting_or_granted_auto_inc_locks > 0);
--table->n_waiting_or_granted_auto_inc_locks;
}
UT_LIST_REMOVE(trx_locks, trx->trx_locks, lock);
UT_LIST_REMOVE(un_member.tab_lock.locks, table->locks, lock);
}
/*********************************************************************//**
Enqueues a waiting request for a table lock which cannot be granted
immediately. Checks for deadlocks.
@return DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED, or
DB_SUCCESS; DB_SUCCESS means that there was a deadlock, but another
transaction was chosen as a victim, and we got the lock immediately:
no need to wait then */
static
ulint
lock_table_enqueue_waiting(
/*=======================*/
ulint mode, /*!< in: lock mode this transaction is
requesting */
dict_table_t* table, /*!< in: table */
que_thr_t* thr) /*!< in: query thread */
{
lock_t* lock;
trx_t* trx;
ulint sec;
ulint ms;
ut_ad(mutex_own(&kernel_mutex));
/* Test if there already is some other reason to suspend thread:
we do not enqueue a lock request if the query thread should be
stopped anyway */
if (que_thr_stop(thr)) {
ut_error;
return(DB_QUE_THR_SUSPENDED);
}
trx = thr_get_trx(thr);
switch (trx_get_dict_operation(trx)) {
case TRX_DICT_OP_NONE:
break;
case TRX_DICT_OP_TABLE:
case TRX_DICT_OP_INDEX:
ut_print_timestamp(stderr);
fputs(" InnoDB: Error: a table lock wait happens"
" in a dictionary operation!\n"
"InnoDB: Table name ", stderr);
ut_print_name(stderr, trx, TRUE, table->name);
fputs(".\n"
"InnoDB: Submit a detailed bug report"
" to http://bugs.mysql.com\n",
stderr);
ut_ad(0);
}
/* Enqueue the lock request that will wait to be granted */
lock = lock_table_create(table, mode | LOCK_WAIT, trx);
/* Check if a deadlock occurs: if yes, remove the lock request and
return an error code */
if (lock_deadlock_occurs(lock, trx)) {
/* The order here is important, we don't want to
lose the state of the lock before calling remove. */
lock_table_remove_low(lock);
lock_reset_lock_and_trx_wait(lock);
return(DB_DEADLOCK);
}
if (trx->wait_lock == NULL) {
/* Deadlock resolution chose another transaction as a victim,
and we accidentally got our lock granted! */
return(DB_SUCCESS);
}
if (UNIV_UNLIKELY(trx->take_stats)) {
ut_usectime(&sec, &ms);
trx->lock_que_wait_ustarted = (ib_uint64_t)sec * 1000000 + ms;
}
trx->que_state = TRX_QUE_LOCK_WAIT;
trx->was_chosen_as_deadlock_victim = FALSE;
trx->wait_started = time(NULL);
ut_a(que_thr_stop(thr));
return(DB_LOCK_WAIT);
}
/*********************************************************************//**
Checks if other transactions have an incompatible mode lock request in
the lock queue.
@return lock or NULL */
UNIV_INLINE
const lock_t*
lock_table_other_has_incompatible(
/*==============================*/
const trx_t* trx, /*!< in: transaction, or NULL if all
transactions should be included */
ulint wait, /*!< in: LOCK_WAIT if also
waiting locks are taken into
account, or 0 if not */
const dict_table_t* table, /*!< in: table */
enum lock_mode mode) /*!< in: lock mode */
{
const lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
lock = UT_LIST_GET_LAST(table->locks);
while (lock != NULL) {
if ((lock->trx != trx)
&& (!lock_mode_compatible(lock_get_mode(lock), mode))
&& (wait || !(lock_get_wait(lock)))) {
return(lock);
}
lock = UT_LIST_GET_PREV(un_member.tab_lock.locks, lock);
}
return(NULL);
}
/*********************************************************************//**
Locks the specified database table in the mode given. If the lock cannot
be granted immediately, the query thread is put to wait.
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
ulint
lock_table(
/*=======*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG bit is set,
does nothing */
dict_table_t* table, /*!< in: database table in dictionary cache */
enum lock_mode mode, /*!< in: lock mode */
que_thr_t* thr) /*!< in: query thread */
{
trx_t* trx;
ulint err;
ut_ad(table && thr);
if (flags & BTR_NO_LOCKING_FLAG) {
return(DB_SUCCESS);
}
ut_a(flags == 0);
trx = thr_get_trx(thr);
if (UNIV_UNLIKELY(trx->fake_changes && mode == LOCK_IX)) {
mode = LOCK_IS;
}
lock_mutex_enter_kernel();
/* Look for stronger locks the same trx already has on the table */
if (lock_table_has(trx, table, mode)) {
lock_mutex_exit_kernel();
return(DB_SUCCESS);
}
/* We have to check if the new lock is compatible with any locks
other transactions have in the table lock queue. */
if (lock_table_other_has_incompatible(trx, LOCK_WAIT, table, mode)) {
/* Another trx has a request on the table in an incompatible
mode: this trx may have to wait */
err = lock_table_enqueue_waiting(mode | flags, table, thr);
lock_mutex_exit_kernel();
return(err);
}
lock_table_create(table, mode | flags, trx);
ut_a(!flags || mode == LOCK_S || mode == LOCK_X);
lock_mutex_exit_kernel();
return(DB_SUCCESS);
}
/*********************************************************************//**
Checks if a waiting table lock request still has to wait in a queue.
@return TRUE if still has to wait */
static
ibool
lock_table_has_to_wait_in_queue(
/*============================*/
const lock_t* wait_lock) /*!< in: waiting table lock */
{
const dict_table_t* table;
const lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
ut_ad(lock_get_wait(wait_lock));
table = wait_lock->un_member.tab_lock.table;
lock = UT_LIST_GET_FIRST(table->locks);
while (lock != wait_lock) {
if (lock_has_to_wait(wait_lock, lock)) {
return(TRUE);
}
lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock);
}
return(FALSE);
}
/*************************************************************//**
Removes a table lock request, waiting or granted, from the queue and grants
locks to other transactions in the queue, if they now are entitled to a
lock. */
static
void
lock_table_dequeue(
/*===============*/
lock_t* in_lock)/*!< in: table lock object; transactions waiting
behind will get their lock requests granted, if
they are now qualified to it */
{
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
ut_a(lock_get_type_low(in_lock) == LOCK_TABLE);
lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, in_lock);
lock_table_remove_low(in_lock);
/* Check if waiting locks in the queue can now be granted: grant
locks if there are no conflicting locks ahead. */
while (lock != NULL) {
if (lock_get_wait(lock)
&& !lock_table_has_to_wait_in_queue(lock)) {
/* Grant the lock */
lock_grant(lock);
}
lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock);
}
}
/*=========================== LOCK RELEASE ==============================*/
/*************************************************************//**
Removes a granted record lock of a transaction from the queue and grants
locks to other transactions waiting in the queue if they now are entitled
to a lock. */
UNIV_INTERN
void
lock_rec_unlock(
/*============*/
trx_t* trx, /*!< in: transaction that has
set a record lock */
const buf_block_t* block, /*!< in: buffer block containing rec */
const rec_t* rec, /*!< in: record */
enum lock_mode lock_mode)/*!< in: LOCK_S or LOCK_X */
{
lock_t* first_lock;
lock_t* lock;
ulint heap_no;
ut_ad(trx && rec);
ut_ad(block->frame == page_align(rec));
ut_ad(trx->state == TRX_ACTIVE);
heap_no = page_rec_get_heap_no(rec);
mutex_enter(&kernel_mutex);
first_lock = lock_rec_get_first(block, heap_no);
/* Find the last lock with the same lock_mode and transaction
from the record. */
for (lock = first_lock; lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
if (lock->trx == trx && lock_get_mode(lock) == lock_mode) {
ut_a(!lock_get_wait(lock));
lock_rec_reset_nth_bit(lock, heap_no);
goto released;
}
}
mutex_exit(&kernel_mutex);
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: unlock row could not"
" find a %lu mode lock on the record\n",
(ulong) lock_mode);
return;
released:
/* Check if we can now grant waiting lock requests */
for (lock = first_lock; lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
if (lock_get_wait(lock)
&& !lock_rec_has_to_wait_in_queue(lock)) {
/* Grant the lock */
lock_grant(lock);
}
}
mutex_exit(&kernel_mutex);
}
/*********************************************************************//**
Releases transaction locks, and releases possible other transactions waiting
because of these locks. */
UNIV_INTERN
void
lock_release_off_kernel(
/*====================*/
trx_t* trx) /*!< in: transaction */
{
dict_table_t* table;
ulint count;
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
lock = UT_LIST_GET_LAST(trx->trx_locks);
count = 0;
while (lock != NULL) {
count++;
if (lock_get_type_low(lock) == LOCK_REC) {
lock_rec_dequeue_from_page(lock);
} else {
ut_ad(lock_get_type_low(lock) & LOCK_TABLE);
if (lock_get_mode(lock) != LOCK_IS
&& trx->undo_no != 0) {
/* The trx may have modified the table. We
block the use of the MySQL query cache for
all currently active transactions. */
table = lock->un_member.tab_lock.table;
table->query_cache_inv_trx_id
= trx_sys->max_trx_id;
}
lock_table_dequeue(lock);
}
if (count == LOCK_RELEASE_KERNEL_INTERVAL) {
/* Release the kernel mutex for a while, so that we
do not monopolize it */
lock_mutex_exit_kernel();
lock_mutex_enter_kernel();
count = 0;
}
lock = UT_LIST_GET_LAST(trx->trx_locks);
}
ut_a(ib_vector_size(trx->autoinc_locks) == 0);
mem_heap_empty(trx->lock_heap);
}
/*********************************************************************//**
Cancels a waiting lock request and releases possible other transactions
waiting behind it. */
UNIV_INTERN
void
lock_cancel_waiting_and_release(
/*============================*/
lock_t* lock) /*!< in: waiting lock request */
{
ut_ad(mutex_own(&kernel_mutex));
ut_ad(!(lock->type_mode & LOCK_CONV_BY_OTHER));
if (lock_get_type_low(lock) == LOCK_REC) {
lock_rec_dequeue_from_page(lock);
} else {
ut_ad(lock_get_type_low(lock) & LOCK_TABLE);
if (lock->trx->autoinc_locks != NULL) {
/* Release the transaction's AUTOINC locks/ */
lock_release_autoinc_locks(lock->trx);
}
lock_table_dequeue(lock);
}
/* Reset the wait flag and the back pointer to lock in trx */
lock_reset_lock_and_trx_wait(lock);
/* The following function releases the trx from lock wait */
trx_end_lock_wait(lock->trx);
}
/* True if a lock mode is S or X */
#define IS_LOCK_S_OR_X(lock) \
(lock_get_mode(lock) == LOCK_S \
|| lock_get_mode(lock) == LOCK_X)
/*********************************************************************//**
Removes locks of a transaction on a table to be dropped.
If remove_also_table_sx_locks is TRUE then table-level S and X locks are
also removed in addition to other table-level and record-level locks.
No lock, that is going to be removed, is allowed to be a wait lock. */
static
void
lock_remove_all_on_table_for_trx(
/*=============================*/
dict_table_t* table, /*!< in: table to be dropped */
trx_t* trx, /*!< in: a transaction */
ibool remove_also_table_sx_locks)/*!< in: also removes
table S and X locks */
{
lock_t* lock;
lock_t* prev_lock;
ut_ad(mutex_own(&kernel_mutex));
lock = UT_LIST_GET_LAST(trx->trx_locks);
while (lock != NULL) {
prev_lock = UT_LIST_GET_PREV(trx_locks, lock);
if (lock_get_type_low(lock) == LOCK_REC
&& lock->index->table == table) {
ut_a(!lock_get_wait(lock));
lock_rec_discard(lock);
} else if (lock_get_type_low(lock) & LOCK_TABLE
&& lock->un_member.tab_lock.table == table
&& (remove_also_table_sx_locks
|| !IS_LOCK_S_OR_X(lock))) {
ut_a(!lock_get_wait(lock));
lock_table_remove_low(lock);
}
lock = prev_lock;
}
}
/*********************************************************************//**
Removes locks on a table to be dropped or truncated.
If remove_also_table_sx_locks is TRUE then table-level S and X locks are
also removed in addition to other table-level and record-level locks.
No lock, that is going to be removed, is allowed to be a wait lock. */
UNIV_INTERN
void
lock_remove_all_on_table(
/*=====================*/
dict_table_t* table, /*!< in: table to be dropped
or truncated */
ibool remove_also_table_sx_locks)/*!< in: also removes
table S and X locks */
{
lock_t* lock;
lock_t* prev_lock;
mutex_enter(&kernel_mutex);
lock = UT_LIST_GET_FIRST(table->locks);
while (lock != NULL) {
prev_lock = UT_LIST_GET_PREV(un_member.tab_lock.locks,
lock);
/* If we should remove all locks (remove_also_table_sx_locks
is TRUE), or if the lock is not table-level S or X lock,
then check we are not going to remove a wait lock. */
if (remove_also_table_sx_locks
|| !(lock_get_type(lock) == LOCK_TABLE
&& IS_LOCK_S_OR_X(lock))) {
ut_a(!lock_get_wait(lock));
}
lock_remove_all_on_table_for_trx(table, lock->trx,
remove_also_table_sx_locks);
if (prev_lock == NULL) {
if (lock == UT_LIST_GET_FIRST(table->locks)) {
/* lock was not removed, pick its successor */
lock = UT_LIST_GET_NEXT(
un_member.tab_lock.locks, lock);
} else {
/* lock was removed, pick the first one */
lock = UT_LIST_GET_FIRST(table->locks);
}
} else if (UT_LIST_GET_NEXT(un_member.tab_lock.locks,
prev_lock) != lock) {
/* If lock was removed by
lock_remove_all_on_table_for_trx() then pick the
successor of prev_lock ... */
lock = UT_LIST_GET_NEXT(
un_member.tab_lock.locks, prev_lock);
} else {
/* ... otherwise pick the successor of lock. */
lock = UT_LIST_GET_NEXT(
un_member.tab_lock.locks, lock);
}
}
mutex_exit(&kernel_mutex);
}
/*===================== VALIDATION AND DEBUGGING ====================*/
/*********************************************************************//**
Prints info of a table lock. */
UNIV_INTERN
void
lock_table_print(
/*=============*/
FILE* file, /*!< in: file where to print */
const lock_t* lock) /*!< in: table type lock */
{
ut_ad(mutex_own(&kernel_mutex));
ut_a(lock_get_type_low(lock) == LOCK_TABLE);
fputs("TABLE LOCK table ", file);
ut_print_name(file, lock->trx, TRUE,
lock->un_member.tab_lock.table->name);
fprintf(file, " trx id " TRX_ID_FMT, (ullint) lock->trx->id);
if (lock_get_mode(lock) == LOCK_S) {
fputs(" lock mode S", file);
} else if (lock_get_mode(lock) == LOCK_X) {
fputs(" lock mode X", file);
} else if (lock_get_mode(lock) == LOCK_IS) {
fputs(" lock mode IS", file);
} else if (lock_get_mode(lock) == LOCK_IX) {
fputs(" lock mode IX", file);
} else if (lock_get_mode(lock) == LOCK_AUTO_INC) {
fputs(" lock mode AUTO-INC", file);
} else {
fprintf(file, " unknown lock mode %lu",
(ulong) lock_get_mode(lock));
}
if (lock_get_wait(lock)) {
fputs(" waiting", file);
}
putc('\n', file);
}
/*********************************************************************//**
Prints info of a record lock. */
UNIV_INTERN
void
lock_rec_print(
/*===========*/
FILE* file, /*!< in: file where to print */
const lock_t* lock) /*!< in: record type lock */
{
const buf_block_t* block;
ulint space;
ulint page_no;
ulint i;
mtr_t mtr;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
rec_offs_init(offsets_);
ut_ad(mutex_own(&kernel_mutex));
ut_a(lock_get_type_low(lock) == LOCK_REC);
space = lock->un_member.rec_lock.space;
page_no = lock->un_member.rec_lock.page_no;
fprintf(file, "RECORD LOCKS space id %lu page no %lu n bits %lu ",
(ulong) space, (ulong) page_no,
(ulong) lock_rec_get_n_bits(lock));
dict_index_name_print(file, lock->trx, lock->index);
fprintf(file, " trx id " TRX_ID_FMT, (ullint) lock->trx->id);
if (lock_get_mode(lock) == LOCK_S) {
fputs(" lock mode S", file);
} else if (lock_get_mode(lock) == LOCK_X) {
fputs(" lock_mode X", file);
} else {
ut_error;
}
if (lock_rec_get_gap(lock)) {
fputs(" locks gap before rec", file);
}
if (lock_rec_get_rec_not_gap(lock)) {
fputs(" locks rec but not gap", file);
}
if (lock_rec_get_insert_intention(lock)) {
fputs(" insert intention", file);
}
if (lock_get_wait(lock)) {
fputs(" waiting", file);
}
mtr_start(&mtr);
putc('\n', file);
if ( srv_show_verbose_locks ) {
block = buf_page_try_get(space, page_no, &mtr);
for (i = 0; i < lock_rec_get_n_bits(lock); ++i) {
if (!lock_rec_get_nth_bit(lock, i)) {
continue;
}
fprintf(file, "Record lock, heap no %lu", (ulong) i);
if (block) {
const rec_t* rec;
rec = page_find_rec_with_heap_no(
buf_block_get_frame(block), i);
offsets = rec_get_offsets(
rec, lock->index, offsets,
ULINT_UNDEFINED, &heap);
putc(' ', file);
rec_print_new(file, rec, offsets);
}
putc('\n', file);
}
}
mtr_commit(&mtr);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
}
#ifdef UNIV_DEBUG
/* Print the number of lock structs from lock_print_info_summary() only
in non-production builds for performance reasons, see
http://bugs.mysql.com/36942 */
#define PRINT_NUM_OF_LOCK_STRUCTS
#endif /* UNIV_DEBUG */
#ifdef PRINT_NUM_OF_LOCK_STRUCTS
/*********************************************************************//**
Calculates the number of record lock structs in the record lock hash table.
@return number of record locks */
static
ulint
lock_get_n_rec_locks(void)
/*======================*/
{
lock_t* lock;
ulint n_locks = 0;
ulint i;
ut_ad(mutex_own(&kernel_mutex));
for (i = 0; i < hash_get_n_cells(lock_sys->rec_hash); i++) {
lock = HASH_GET_FIRST(lock_sys->rec_hash, i);
while (lock) {
n_locks++;
lock = HASH_GET_NEXT(hash, lock);
}
}
return(n_locks);
}
#endif /* PRINT_NUM_OF_LOCK_STRUCTS */
/*********************************************************************//**
Prints info of locks for all transactions.
@return FALSE if not able to obtain kernel mutex
and exits without printing info */
UNIV_INTERN
ibool
lock_print_info_summary(
/*====================*/
FILE* file, /*!< in: file where to print */
ibool nowait) /*!< in: whether to wait for the kernel mutex */
{
/* if nowait is FALSE, wait on the kernel mutex,
otherwise return immediately if fail to obtain the
mutex. */
if (!nowait) {
lock_mutex_enter_kernel();
} else if (mutex_enter_nowait(&kernel_mutex)) {
fputs("FAIL TO OBTAIN KERNEL MUTEX, "
"SKIP LOCK INFO PRINTING\n", file);
return(FALSE);
}
if (lock_deadlock_found) {
fputs("------------------------\n"
"LATEST DETECTED DEADLOCK\n"
"------------------------\n", file);
ut_copy_file(file, lock_latest_err_file);
}
fputs("------------\n"
"TRANSACTIONS\n"
"------------\n", file);
fprintf(file, "Trx id counter " TRX_ID_FMT "\n",
(ullint) trx_sys->max_trx_id);
fprintf(file,
"Purge done for trx's n:o < " TRX_ID_FMT
" undo n:o < " TRX_ID_FMT "\n",
(ullint) purge_sys->purge_trx_no,
(ullint) purge_sys->purge_undo_no);
fprintf(file,
"History list length %lu\n",
(ulong) trx_sys->rseg_history_len);
#ifdef PRINT_NUM_OF_LOCK_STRUCTS
fprintf(file,
"Total number of lock structs in row lock hash table %lu\n",
(ulong) lock_get_n_rec_locks());
#endif /* PRINT_NUM_OF_LOCK_STRUCTS */
return(TRUE);
}
/*********************************************************************//**
Prints info of locks for each transaction. */
UNIV_INTERN
void
lock_print_info_all_transactions(
/*=============================*/
FILE* file) /*!< in: file where to print */
{
lock_t* lock;
ibool load_page_first = TRUE;
ulint nth_trx = 0;
ulint nth_lock = 0;
ulint i;
mtr_t mtr;
trx_t* trx;
fprintf(file, "LIST OF TRANSACTIONS FOR EACH SESSION:\n");
/* First print info on non-active transactions */
trx = UT_LIST_GET_FIRST(trx_sys->mysql_trx_list);
while (trx) {
if (trx->state == TRX_NOT_STARTED) {
fputs("---", file);
trx_print(file, trx, 600);
}
trx = UT_LIST_GET_NEXT(mysql_trx_list, trx);
}
loop:
trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
i = 0;
/* Since we temporarily release the kernel mutex when
reading a database page in below, variable trx may be
obsolete now and we must loop through the trx list to
get probably the same trx, or some other trx. */
while (trx && (i < nth_trx)) {
trx = UT_LIST_GET_NEXT(trx_list, trx);
i++;
}
if (trx == NULL) {
lock_mutex_exit_kernel();
ut_ad(lock_validate());
return;
}
if (nth_lock == 0) {
fputs("---", file);
trx_print(file, trx, 600);
if (trx->read_view) {
fprintf(file,
"Trx read view will not see trx with"
" id >= " TRX_ID_FMT
", sees < " TRX_ID_FMT "\n",
(ullint) trx->read_view->low_limit_id,
(ullint) trx->read_view->up_limit_id);
}
if (trx->que_state == TRX_QUE_LOCK_WAIT) {
fprintf(file,
"------- TRX HAS BEEN WAITING %lu SEC"
" FOR THIS LOCK TO BE GRANTED:\n",
(ulong) difftime(time(NULL),
trx->wait_started));
if (lock_get_type_low(trx->wait_lock) == LOCK_REC) {
lock_rec_print(file, trx->wait_lock);
} else {
lock_table_print(file, trx->wait_lock);
}
fputs("------------------\n", file);
}
}
if (!srv_print_innodb_lock_monitor && !srv_show_locks_held) {
nth_trx++;
goto loop;
}
i = 0;
/* Look at the note about the trx loop above why we loop here:
lock may be an obsolete pointer now. */
lock = UT_LIST_GET_FIRST(trx->trx_locks);
while (lock && (i < nth_lock)) {
lock = UT_LIST_GET_NEXT(trx_locks, lock);
i++;
}
if (lock == NULL) {
nth_trx++;
nth_lock = 0;
goto loop;
}
if (lock_get_type_low(lock) == LOCK_REC) {
if (load_page_first) {
ulint space = lock->un_member.rec_lock.space;
ulint zip_size= fil_space_get_zip_size(space);
ulint page_no = lock->un_member.rec_lock.page_no;
if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) {
/* It is a single table tablespace and
the .ibd file is missing (TRUNCATE
TABLE probably stole the locks): just
print the lock without attempting to
load the page in the buffer pool. */
fprintf(file, "RECORD LOCKS on"
" non-existing space %lu\n",
(ulong) space);
goto print_rec;
}
lock_mutex_exit_kernel();
if (srv_show_verbose_locks) {
mtr_start(&mtr);
buf_page_get_gen(space, zip_size, page_no,
RW_NO_LATCH, NULL,
BUF_GET_POSSIBLY_FREED,
__FILE__, __LINE__, &mtr);
mtr_commit(&mtr);
}
load_page_first = FALSE;
lock_mutex_enter_kernel();
goto loop;
}
print_rec:
lock_rec_print(file, lock);
} else {
ut_ad(lock_get_type_low(lock) & LOCK_TABLE);
lock_table_print(file, lock);
}
load_page_first = TRUE;
nth_lock++;
if (nth_lock >= srv_show_locks_held) {
fputs("TOO MANY LOCKS PRINTED FOR THIS TRX:"
" SUPPRESSING FURTHER PRINTS\n",
file);
nth_trx++;
nth_lock = 0;
goto loop;
}
goto loop;
}
#ifdef UNIV_DEBUG
/*********************************************************************//**
Validates the lock queue on a table.
@return TRUE if ok */
static
ibool
lock_table_queue_validate(
/*======================*/
const dict_table_t* table) /*!< in: table */
{
const lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
lock = UT_LIST_GET_FIRST(table->locks);
while (lock) {
ut_a(((lock->trx)->state == TRX_ACTIVE)
|| ((lock->trx)->state == TRX_PREPARED)
|| ((lock->trx)->state == TRX_COMMITTED_IN_MEMORY));
if (!lock_get_wait(lock)) {
ut_a(!lock_table_other_has_incompatible(
lock->trx, 0, table,
lock_get_mode(lock)));
} else {
ut_a(lock_table_has_to_wait_in_queue(lock));
}
lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock);
}
return(TRUE);
}
/*********************************************************************//**
Validates the lock queue on a single record.
@return TRUE if ok */
static
ibool
lock_rec_queue_validate(
/*====================*/
const buf_block_t* block, /*!< in: buffer block containing rec */
const rec_t* rec, /*!< in: record to look at */
const dict_index_t* index, /*!< in: index, or NULL if not known */
const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */
{
trx_t* impl_trx;
lock_t* lock;
ulint heap_no;
ut_a(rec);
ut_a(block->frame == page_align(rec));
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!page_rec_is_comp(rec) == !rec_offs_comp(offsets));
heap_no = page_rec_get_heap_no(rec);
lock_mutex_enter_kernel();
if (!page_rec_is_user_rec(rec)) {
lock = lock_rec_get_first(block, heap_no);
while (lock) {
switch(lock->trx->state) {
case TRX_ACTIVE:
case TRX_PREPARED:
case TRX_COMMITTED_IN_MEMORY:
break;
default:
ut_error;
}
ut_a(trx_in_trx_list(lock->trx));
if (lock_get_wait(lock)) {
ut_a(lock_rec_has_to_wait_in_queue(lock));
}
if (index) {
ut_a(lock->index == index);
}
lock = lock_rec_get_next(heap_no, lock);
}
lock_mutex_exit_kernel();
return(TRUE);
}
if (!index);
else if (dict_index_is_clust(index)) {
impl_trx = lock_clust_rec_some_has_impl(rec, index, offsets);
if (impl_trx
&& lock_rec_other_has_expl_req(LOCK_S, 0, LOCK_WAIT,
block, heap_no, impl_trx)) {
ut_a(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no, impl_trx));
}
#if 0
} else {
/* The kernel mutex may get released temporarily in the
next function call: we have to release lock table mutex
to obey the latching order */
/* If this thread is holding the file space latch
(fil_space_t::latch), the following check WILL break
latching order and may cause a deadlock of threads. */
/* NOTE: This is a bogus check that would fail in the
following case: Our transaction is updating a
row. After it has updated the clustered index record,
it goes to a secondary index record and finds someone
else holding an explicit S- or X-lock on that
secondary index record, presumably from a locking
read. Our transaction cannot update the secondary
index immediately, but places a waiting X-lock request
on the secondary index record. There is nothing
illegal in this. The assertion is simply too strong. */
/* From the locking point of view, each secondary
index is a separate table. A lock that is held on
secondary index rec does not give any rights to modify
or read the clustered index rec. Therefore, we can
think of the sec index as a separate 'table' from the
clust index 'table'. Conversely, a transaction that
has acquired a lock on and modified a clustered index
record may need to wait for a lock on the
corresponding record in a secondary index. */
impl_trx = lock_sec_rec_some_has_impl_off_kernel(
rec, index, offsets);
if (impl_trx
&& lock_rec_other_has_expl_req(LOCK_S, 0, LOCK_WAIT,
block, heap_no, impl_trx)) {
ut_a(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no, impl_trx));
}
#endif
}
lock = lock_rec_get_first(block, heap_no);
while (lock) {
ut_a(lock->trx->state == TRX_ACTIVE
|| lock->trx->state == TRX_PREPARED
|| lock->trx->state == TRX_COMMITTED_IN_MEMORY);
ut_a(trx_in_trx_list(lock->trx));
if (index) {
ut_a(lock->index == index);
}
if (!lock_rec_get_gap(lock) && !lock_get_wait(lock)) {
enum lock_mode mode;
if (lock_get_mode(lock) == LOCK_S) {
mode = LOCK_X;
} else {
mode = LOCK_S;
}
ut_a(!lock_rec_other_has_expl_req(
mode, 0, 0, block, heap_no, lock->trx));
} else if (lock_get_wait(lock) && !lock_rec_get_gap(lock)) {
ut_a(lock_rec_has_to_wait_in_queue(lock));
}
lock = lock_rec_get_next(heap_no, lock);
}
lock_mutex_exit_kernel();
return(TRUE);
}
/*********************************************************************//**
Validates the record lock queues on a page.
@return TRUE if ok */
static
ibool
lock_rec_validate_page(
/*===================*/
const buf_block_t* block) /*!< in: buffer block */
{
const lock_t* lock;
const rec_t* rec;
dict_index_t* index;
ulint nth_lock = 0;
ulint nth_bit = 0;
ulint i;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
rec_offs_init(offsets_);
ut_ad(!mutex_own(&kernel_mutex));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
lock_mutex_enter_kernel();
loop:
lock = lock_rec_get_first_on_page_addr(buf_block_get_space(block),
buf_block_get_page_no(block));
if (!lock) {
goto function_exit;
}
#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
ut_a(!block->page.file_page_was_freed);
#endif
for (i = 0; i < nth_lock; i++) {
lock = lock_rec_get_next_on_page_const(lock);
if (!lock) {
goto function_exit;
}
}
ut_a(trx_in_trx_list(lock->trx));
ut_a(lock->trx->state == TRX_ACTIVE
|| lock->trx->state == TRX_PREPARED
|| lock->trx->state == TRX_COMMITTED_IN_MEMORY);
# ifdef UNIV_SYNC_DEBUG
/* Only validate the record queues when this thread is not
holding a space->latch. Deadlocks are possible due to
latching order violation when UNIV_DEBUG is defined while
UNIV_SYNC_DEBUG is not. */
if (!sync_thread_levels_contains(SYNC_FSP))
# endif /* UNIV_SYNC_DEBUG */
for (i = nth_bit; i < lock_rec_get_n_bits(lock); i++) {
if (i == 1 || lock_rec_get_nth_bit(lock, i)) {
index = lock->index;
rec = page_find_rec_with_heap_no(block->frame, i);
ut_a(rec);
offsets = rec_get_offsets(rec, lock->index, offsets,
ULINT_UNDEFINED, &heap);
#if 0
fprintf(stderr,
"Validating %u %u\n",
block->page.space, block->page.offset);
#endif
lock_mutex_exit_kernel();
/* If this thread is holding the file space
latch (fil_space_t::latch), the following
check WILL break the latching order and may
cause a deadlock of threads. */
lock_rec_queue_validate(block, rec, index,
offsets);
lock_mutex_enter_kernel();
nth_bit = i + 1;
goto loop;
}
}
nth_bit = 0;
nth_lock++;
goto loop;
function_exit:
lock_mutex_exit_kernel();
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
return(TRUE);
}
/*********************************************************************//**
Validate record locks up to a limit.
@return lock at limit or NULL if no more locks in the hash bucket */
static __attribute__((nonnull, warn_unused_result))
const lock_t*
lock_rec_validate(
/*==============*/
ulint start, /*!< in: lock_sys->rec_hash
bucket */
ib_uint64_t* limit) /*!< in/out: upper limit of
(space, page_no) */
{
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
for (lock = HASH_GET_FIRST(lock_sys->rec_hash, start);
lock != NULL;
lock = HASH_GET_NEXT(hash, lock)) {
ib_uint64_t current;
ut_a(trx_in_trx_list(lock->trx));
ut_a(lock_get_type(lock) == LOCK_REC);
current = ut_ull_create(
lock->un_member.rec_lock.space,
lock->un_member.rec_lock.page_no);
if (current > *limit) {
*limit = current + 1;
return(lock);
}
}
return(NULL);
}
/*********************************************************************//**
Validate a record lock's block */
static
void
lock_rec_block_validate(
/*====================*/
ulint space,
ulint page_no)
{
/* The lock and the block that it is referring to may be freed at
this point. We pass BUF_GET_POSSIBLY_FREED to skip a debug check.
If the lock exists in lock_rec_validate_page() we assert
!block->page.file_page_was_freed. */
mtr_t mtr;
buf_block_t* block;
/* Make sure that the tablespace is not deleted while we are
trying to access the page. */
if (!fil_inc_pending_ops(space)) {
mtr_start(&mtr);
block = buf_page_get_gen(
space, fil_space_get_zip_size(space),
page_no, RW_X_LATCH, NULL,
BUF_GET_POSSIBLY_FREED,
__FILE__, __LINE__, &mtr);
buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
ut_ad(lock_rec_validate_page(block));
mtr_commit(&mtr);
fil_decr_pending_ops(space);
}
}
/*********************************************************************//**
Validates the lock system.
@return TRUE if ok */
static
ibool
lock_validate(void)
/*===============*/
{
const lock_t* lock;
const trx_t* trx;
ulint i;
lock_mutex_enter_kernel();
trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
while (trx) {
lock = UT_LIST_GET_FIRST(trx->trx_locks);
while (lock) {
if (lock_get_type_low(lock) & LOCK_TABLE) {
lock_table_queue_validate(
lock->un_member.tab_lock.table);
}
lock = UT_LIST_GET_NEXT(trx_locks, lock);
}
trx = UT_LIST_GET_NEXT(trx_list, trx);
}
/* Iterate over all the record locks and validate the locks. We
don't want to hog the lock_sys_t::mutex and the trx_sys_t::mutex.
Release both mutexes during the validation check. */
for (i = 0; i < hash_get_n_cells(lock_sys->rec_hash); i++) {
const lock_t* lock;
ib_uint64_t limit = 0;
while ((lock = lock_rec_validate(i, &limit)) != NULL) {
ulint space = lock->un_member.rec_lock.space;
ulint page_no = lock->un_member.rec_lock.page_no;
lock_mutex_exit_kernel();
lock_rec_block_validate(space, page_no);
lock_mutex_enter_kernel();
}
}
lock_mutex_exit_kernel();
return(TRUE);
}
#endif /* UNIV_DEBUG */
/*============ RECORD LOCK CHECKS FOR ROW OPERATIONS ====================*/
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate insert of
a record. If they do, first tests if the query thread should anyway
be suspended for some reason; if not, then puts the transaction and
the query thread to the lock wait state and inserts a waiting request
for a gap x-lock to the lock queue.
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
ulint
lock_rec_insert_check_and_lock(
/*===========================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG bit is
set, does nothing */
const rec_t* rec, /*!< in: record after which to insert */
buf_block_t* block, /*!< in/out: buffer block of rec */
dict_index_t* index, /*!< in: index */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr, /*!< in/out: mini-transaction */
ibool* inherit)/*!< out: set to TRUE if the new
inserted record maybe should inherit
LOCK_GAP type locks from the successor
record */
{
const rec_t* next_rec;
trx_t* trx;
lock_t* lock;
ulint err;
ulint next_rec_heap_no;
ut_ad(block->frame == page_align(rec));
if (flags & BTR_NO_LOCKING_FLAG) {
return(DB_SUCCESS);
}
trx = thr_get_trx(thr);
if (UNIV_UNLIKELY(trx->fake_changes)) {
return(DB_SUCCESS);
}
next_rec = page_rec_get_next_const(rec);
next_rec_heap_no = page_rec_get_heap_no(next_rec);
lock_mutex_enter_kernel();
/* When inserting a record into an index, the table must be at
least IX-locked or we must be building an index, in which case
the table must be at least S-locked. */
ut_ad(lock_table_has(trx, index->table, LOCK_IX)
|| (*index->name == TEMP_INDEX_PREFIX
&& lock_table_has(trx, index->table, LOCK_S)));
lock = lock_rec_get_first(block, next_rec_heap_no);
if (UNIV_LIKELY(lock == NULL)) {
/* We optimize CPU time usage in the simplest case */
lock_mutex_exit_kernel();
if (!dict_index_is_clust(index)) {
/* Update the page max trx id field */
page_update_max_trx_id(block,
buf_block_get_page_zip(block),
trx->id, mtr);
}
*inherit = FALSE;
return(DB_SUCCESS);
}
*inherit = TRUE;
/* If another transaction has an explicit lock request which locks
the gap, waiting or granted, on the successor, the insert has to wait.
An exception is the case where the lock by the another transaction
is a gap type lock which it placed to wait for its turn to insert. We
do not consider that kind of a lock conflicting with our insert. This
eliminates an unnecessary deadlock which resulted when 2 transactions
had to wait for their insert. Both had waiting gap type lock requests
on the successor, which produced an unnecessary deadlock. */
if (lock_rec_other_has_conflicting(
LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION,
block, next_rec_heap_no, trx)) {
/* Note that we may get DB_SUCCESS also here! */
err = lock_rec_enqueue_waiting(LOCK_X | LOCK_GAP
| LOCK_INSERT_INTENTION,
block, next_rec_heap_no,
NULL, index, thr);
} else {
err = DB_SUCCESS;
}
lock_mutex_exit_kernel();
switch (err) {
case DB_SUCCESS_LOCKED_REC:
err = DB_SUCCESS;
/* fall through */
case DB_SUCCESS:
if (dict_index_is_clust(index)) {
break;
}
/* Update the page max trx id field */
page_update_max_trx_id(block,
buf_block_get_page_zip(block),
trx->id, mtr);
}
#ifdef UNIV_DEBUG
{
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
const ulint* offsets;
rec_offs_init(offsets_);
offsets = rec_get_offsets(next_rec, index, offsets_,
ULINT_UNDEFINED, &heap);
ut_ad(lock_rec_queue_validate(block,
next_rec, index, offsets));
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
}
#endif /* UNIV_DEBUG */
return(err);
}
/*********************************************************************//**
If a transaction has an implicit x-lock on a record, but no explicit x-lock
set on the record, sets one for it. NOTE that in the case of a secondary
index, the kernel mutex may get temporarily released. */
static
void
lock_rec_convert_impl_to_expl(
/*==========================*/
const buf_block_t* block, /*!< in: buffer block of rec */
const rec_t* rec, /*!< in: user record on page */
dict_index_t* index, /*!< in: index of record */
const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */
{
trx_t* impl_trx;
ut_ad(mutex_own(&kernel_mutex));
ut_ad(page_rec_is_user_rec(rec));
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!page_rec_is_comp(rec) == !rec_offs_comp(offsets));
if (dict_index_is_clust(index)) {
impl_trx = lock_clust_rec_some_has_impl(rec, index, offsets);
} else {
impl_trx = lock_sec_rec_some_has_impl_off_kernel(
rec, index, offsets);
}
if (impl_trx) {
ulint heap_no = page_rec_get_heap_no(rec);
/* If the transaction has no explicit x-lock set on the
record, set one for it */
if (!lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, block,
heap_no, impl_trx)) {
ulint type_mode = (LOCK_REC | LOCK_X
| LOCK_REC_NOT_GAP);
/* If the delete-marked record was locked already,
we should reserve lock waiting for impl_trx as
implicit lock. Because cannot lock at this moment.*/
if (rec_get_deleted_flag(rec, rec_offs_comp(offsets))
&& lock_rec_other_has_conflicting(
LOCK_X | LOCK_REC_NOT_GAP, block,
heap_no, impl_trx)) {
type_mode |= (LOCK_WAIT | LOCK_CONV_BY_OTHER);
}
lock_rec_add_to_queue(
type_mode, block, heap_no, index, impl_trx);
}
}
}
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate modify (update,
delete mark, or delete unmark) of a clustered index record. If they do,
first tests if the query thread should anyway be suspended for some
reason; if not, then puts the transaction and the query thread to the
lock wait state and inserts a waiting request for a record x-lock to the
lock queue.
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
ulint
lock_clust_rec_modify_check_and_lock(
/*=================================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
bit is set, does nothing */
const buf_block_t* block, /*!< in: buffer block of rec */
const rec_t* rec, /*!< in: record which should be
modified */
dict_index_t* index, /*!< in: clustered index */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
que_thr_t* thr) /*!< in: query thread */
{
ulint err;
ulint heap_no;
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(dict_index_is_clust(index));
ut_ad(block->frame == page_align(rec));
if (flags & BTR_NO_LOCKING_FLAG) {
return(DB_SUCCESS);
}
if (UNIV_UNLIKELY(thr_get_trx(thr)->fake_changes)) {
return(DB_SUCCESS);
}
heap_no = rec_offs_comp(offsets)
? rec_get_heap_no_new(rec)
: rec_get_heap_no_old(rec);
lock_mutex_enter_kernel();
ut_ad(lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
/* If a transaction has no explicit x-lock set on the record, set one
for it */
lock_rec_convert_impl_to_expl(block, rec, index, offsets);
err = lock_rec_lock(TRUE, LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no, index, thr);
lock_mutex_exit_kernel();
ut_ad(lock_rec_queue_validate(block, rec, index, offsets));
if (UNIV_UNLIKELY(err == DB_SUCCESS_LOCKED_REC)) {
err = DB_SUCCESS;
}
return(err);
}
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate modify (delete
mark or delete unmark) of a secondary index record.
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
ulint
lock_sec_rec_modify_check_and_lock(
/*===============================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
bit is set, does nothing */
buf_block_t* block, /*!< in/out: buffer block of rec */
const rec_t* rec, /*!< in: record which should be
modified; NOTE: as this is a secondary
index, we always have to modify the
clustered index record first: see the
comment below */
dict_index_t* index, /*!< in: secondary index */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
ulint err;
ulint heap_no;
ut_ad(!dict_index_is_clust(index));
ut_ad(block->frame == page_align(rec));
if (flags & BTR_NO_LOCKING_FLAG) {
return(DB_SUCCESS);
}
if (UNIV_UNLIKELY(thr_get_trx(thr)->fake_changes)) {
return(DB_SUCCESS);
}
heap_no = page_rec_get_heap_no(rec);
/* Another transaction cannot have an implicit lock on the record,
because when we come here, we already have modified the clustered
index record, and this would not have been possible if another active
transaction had modified this secondary index record. */
lock_mutex_enter_kernel();
ut_ad(lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
err = lock_rec_lock(TRUE, LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no, index, thr);
lock_mutex_exit_kernel();
#ifdef UNIV_DEBUG
{
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
const ulint* offsets;
rec_offs_init(offsets_);
offsets = rec_get_offsets(rec, index, offsets_,
ULINT_UNDEFINED, &heap);
ut_ad(lock_rec_queue_validate(block, rec, index, offsets));
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
}
#endif /* UNIV_DEBUG */
if (err == DB_SUCCESS || err == DB_SUCCESS_LOCKED_REC) {
/* Update the page max trx id field */
/* It might not be necessary to do this if
err == DB_SUCCESS (no new lock created),
but it should not cost too much performance. */
page_update_max_trx_id(block,
buf_block_get_page_zip(block),
thr_get_trx(thr)->id, mtr);
err = DB_SUCCESS;
}
return(err);
}
/*********************************************************************//**
Like lock_clust_rec_read_check_and_lock(), but reads a
secondary index record.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK,
or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
enum db_err
lock_sec_rec_read_check_and_lock(
/*=============================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
bit is set, does nothing */
const buf_block_t* block, /*!< in: buffer block of rec */
const rec_t* rec, /*!< in: user record or page
supremum record which should
be read or passed over by a
read cursor */
dict_index_t* index, /*!< in: secondary index */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
enum lock_mode mode, /*!< in: mode of the lock which
the read cursor should set on
records: LOCK_S or LOCK_X; the
latter is possible in
SELECT FOR UPDATE */
ulint gap_mode,/*!< in: LOCK_ORDINARY, LOCK_GAP, or
LOCK_REC_NOT_GAP */
que_thr_t* thr) /*!< in: query thread */
{
enum db_err err;
ulint heap_no;
ut_ad(!dict_index_is_clust(index));
ut_ad(block->frame == page_align(rec));
ut_ad(page_rec_is_user_rec(rec) || page_rec_is_supremum(rec));
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(mode == LOCK_X || mode == LOCK_S);
if (flags & BTR_NO_LOCKING_FLAG) {
return(DB_SUCCESS);
}
if (UNIV_UNLIKELY(thr_get_trx(thr)->fake_changes)) {
if (!srv_fake_changes_locks) {
return(DB_SUCCESS);
}
if (mode == LOCK_X) {
mode = LOCK_S;
}
}
heap_no = page_rec_get_heap_no(rec);
lock_mutex_enter_kernel();
ut_ad(mode != LOCK_X
|| lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
ut_ad(mode != LOCK_S
|| lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
/* Some transaction may have an implicit x-lock on the record only
if the max trx id for the page >= min trx id for the trx list or a
database recovery is running. */
if ((page_get_max_trx_id(block->frame) >= trx_list_get_min_trx_id()
|| recv_recovery_is_on())
&& !page_rec_is_supremum(rec)) {
lock_rec_convert_impl_to_expl(block, rec, index, offsets);
}
err = lock_rec_lock(FALSE, mode | gap_mode,
block, heap_no, index, thr);
lock_mutex_exit_kernel();
ut_ad(lock_rec_queue_validate(block, rec, index, offsets));
return(err);
}
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate read, or passing
over by a read cursor, of a clustered index record. If they do, first tests
if the query thread should anyway be suspended for some reason; if not, then
puts the transaction and the query thread to the lock wait state and inserts a
waiting request for a record lock to the lock queue. Sets the requested mode
lock on the record.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK,
or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
enum db_err
lock_clust_rec_read_check_and_lock(
/*===============================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
bit is set, does nothing */
const buf_block_t* block, /*!< in: buffer block of rec */
const rec_t* rec, /*!< in: user record or page
supremum record which should
be read or passed over by a
read cursor */
dict_index_t* index, /*!< in: clustered index */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
enum lock_mode mode, /*!< in: mode of the lock which
the read cursor should set on
records: LOCK_S or LOCK_X; the
latter is possible in
SELECT FOR UPDATE */
ulint gap_mode,/*!< in: LOCK_ORDINARY, LOCK_GAP, or
LOCK_REC_NOT_GAP */
que_thr_t* thr) /*!< in: query thread */
{
enum db_err err;
ulint heap_no;
ut_ad(dict_index_is_clust(index));
ut_ad(block->frame == page_align(rec));
ut_ad(page_rec_is_user_rec(rec) || page_rec_is_supremum(rec));
ut_ad(gap_mode == LOCK_ORDINARY || gap_mode == LOCK_GAP
|| gap_mode == LOCK_REC_NOT_GAP);
ut_ad(rec_offs_validate(rec, index, offsets));
if (flags & BTR_NO_LOCKING_FLAG) {
return(DB_SUCCESS);
}
if (UNIV_UNLIKELY(thr_get_trx(thr)->fake_changes)) {
if (!srv_fake_changes_locks) {
return(DB_SUCCESS);
}
if (mode == LOCK_X) {
mode = LOCK_S;
}
}
heap_no = page_rec_get_heap_no(rec);
lock_mutex_enter_kernel();
ut_ad(mode != LOCK_X
|| lock_table_has(thr_get_trx(thr), index->table, LOCK_IX));
ut_ad(mode != LOCK_S
|| lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
if (UNIV_LIKELY(heap_no != PAGE_HEAP_NO_SUPREMUM)) {
lock_rec_convert_impl_to_expl(block, rec, index, offsets);
}
err = lock_rec_lock(FALSE, mode | gap_mode,
block, heap_no, index, thr);
lock_mutex_exit_kernel();
ut_ad(lock_rec_queue_validate(block, rec, index, offsets));
return(err);
}
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate read, or passing
over by a read cursor, of a clustered index record. If they do, first tests
if the query thread should anyway be suspended for some reason; if not, then
puts the transaction and the query thread to the lock wait state and inserts a
waiting request for a record lock to the lock queue. Sets the requested mode
lock on the record. This is an alternative version of
lock_clust_rec_read_check_and_lock() that does not require the parameter
"offsets".
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
ulint
lock_clust_rec_read_check_and_lock_alt(
/*===================================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
bit is set, does nothing */
const buf_block_t* block, /*!< in: buffer block of rec */
const rec_t* rec, /*!< in: user record or page
supremum record which should
be read or passed over by a
read cursor */
dict_index_t* index, /*!< in: clustered index */
enum lock_mode mode, /*!< in: mode of the lock which
the read cursor should set on
records: LOCK_S or LOCK_X; the
latter is possible in
SELECT FOR UPDATE */
ulint gap_mode,/*!< in: LOCK_ORDINARY, LOCK_GAP, or
LOCK_REC_NOT_GAP */
que_thr_t* thr) /*!< in: query thread */
{
mem_heap_t* tmp_heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
ulint err;
rec_offs_init(offsets_);
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &tmp_heap);
err = lock_clust_rec_read_check_and_lock(flags, block, rec, index,
offsets, mode, gap_mode, thr);
if (tmp_heap) {
mem_heap_free(tmp_heap);
}
if (UNIV_UNLIKELY(err == DB_SUCCESS_LOCKED_REC)) {
err = DB_SUCCESS;
}
return(err);
}
/*******************************************************************//**
Release the last lock from the transaction's autoinc locks. */
UNIV_INLINE
void
lock_release_autoinc_last_lock(
/*===========================*/
ib_vector_t* autoinc_locks) /*!< in/out: vector of AUTOINC locks */
{
ulint last;
lock_t* lock;
ut_ad(mutex_own(&kernel_mutex));
ut_a(!ib_vector_is_empty(autoinc_locks));
/* The lock to be release must be the last lock acquired. */
last = ib_vector_size(autoinc_locks) - 1;
lock = ib_vector_get(autoinc_locks, last);
/* Should have only AUTOINC locks in the vector. */
ut_a(lock_get_mode(lock) == LOCK_AUTO_INC);
ut_a(lock_get_type(lock) == LOCK_TABLE);
ut_a(lock->un_member.tab_lock.table != NULL);
/* This will remove the lock from the trx autoinc_locks too. */
lock_table_dequeue(lock);
}
/*******************************************************************//**
Check if a transaction holds any autoinc locks.
@return TRUE if the transaction holds any AUTOINC locks. */
UNIV_INTERN
ibool
lock_trx_holds_autoinc_locks(
/*=========================*/
const trx_t* trx) /*!< in: transaction */
{
ut_a(trx->autoinc_locks != NULL);
return(!ib_vector_is_empty(trx->autoinc_locks));
}
/*******************************************************************//**
Release all the transaction's autoinc locks. */
UNIV_INTERN
void
lock_release_autoinc_locks(
/*=======================*/
trx_t* trx) /*!< in/out: transaction */
{
ut_ad(mutex_own(&kernel_mutex));
ut_a(trx->autoinc_locks != NULL);
/* We release the locks in the reverse order. This is to
avoid searching the vector for the element to delete at
the lower level. See (lock_table_remove_low()) for details. */
while (!ib_vector_is_empty(trx->autoinc_locks)) {
/* lock_table_remove_low() will also remove the lock from
the transaction's autoinc_locks vector. */
lock_release_autoinc_last_lock(trx->autoinc_locks);
}
/* Should release all locks. */
ut_a(ib_vector_is_empty(trx->autoinc_locks));
}
/*******************************************************************//**
Gets the type of a lock. Non-inline version for using outside of the
lock module.
@return LOCK_TABLE or LOCK_REC */
UNIV_INTERN
ulint
lock_get_type(
/*==========*/
const lock_t* lock) /*!< in: lock */
{
return(lock_get_type_low(lock));
}
/*******************************************************************//**
Gets the id of the transaction owning a lock.
@return transaction id */
UNIV_INTERN
trx_id_t
lock_get_trx_id(
/*============*/
const lock_t* lock) /*!< in: lock */
{
return(lock->trx->id);
}
/*******************************************************************//**
Gets the mode of a lock in a human readable string.
The string should not be free()'d or modified.
@return lock mode */
UNIV_INTERN
const char*
lock_get_mode_str(
/*==============*/
const lock_t* lock) /*!< in: lock */
{
ibool is_gap_lock;
is_gap_lock = lock_get_type_low(lock) == LOCK_REC
&& lock_rec_get_gap(lock);
switch (lock_get_mode(lock)) {
case LOCK_S:
if (is_gap_lock) {
return("S,GAP");
} else {
return("S");
}
case LOCK_X:
if (is_gap_lock) {
return("X,GAP");
} else {
return("X");
}
case LOCK_IS:
if (is_gap_lock) {
return("IS,GAP");
} else {
return("IS");
}
case LOCK_IX:
if (is_gap_lock) {
return("IX,GAP");
} else {
return("IX");
}
case LOCK_AUTO_INC:
return("AUTO_INC");
default:
return("UNKNOWN");
}
}
/*******************************************************************//**
Gets the type of a lock in a human readable string.
The string should not be free()'d or modified.
@return lock type */
UNIV_INTERN
const char*
lock_get_type_str(
/*==============*/
const lock_t* lock) /*!< in: lock */
{
switch (lock_get_type_low(lock)) {
case LOCK_REC:
return("RECORD");
case LOCK_TABLE:
return("TABLE");
default:
return("UNKNOWN");
}
}
/*******************************************************************//**
Gets the table on which the lock is.
@return table */
UNIV_INLINE
dict_table_t*
lock_get_table(
/*===========*/
const lock_t* lock) /*!< in: lock */
{
switch (lock_get_type_low(lock)) {
case LOCK_REC:
return(lock->index->table);
case LOCK_TABLE:
return(lock->un_member.tab_lock.table);
default:
ut_error;
return(NULL);
}
}
/*******************************************************************//**
Gets the id of the table on which the lock is.
@return id of the table */
UNIV_INTERN
table_id_t
lock_get_table_id(
/*==============*/
const lock_t* lock) /*!< in: lock */
{
dict_table_t* table;
table = lock_get_table(lock);
return(table->id);
}
/*******************************************************************//**
Gets the name of the table on which the lock is.
The string should not be free()'d or modified.
@return name of the table */
UNIV_INTERN
const char*
lock_get_table_name(
/*================*/
const lock_t* lock) /*!< in: lock */
{
dict_table_t* table;
table = lock_get_table(lock);
return(table->name);
}
/*******************************************************************//**
For a record lock, gets the index on which the lock is.
@return index */
UNIV_INTERN
const dict_index_t*
lock_rec_get_index(
/*===============*/
const lock_t* lock) /*!< in: lock */
{
ut_a(lock_get_type_low(lock) == LOCK_REC);
return(lock->index);
}
/*******************************************************************//**
For a record lock, gets the name of the index on which the lock is.
The string should not be free()'d or modified.
@return name of the index */
UNIV_INTERN
const char*
lock_rec_get_index_name(
/*====================*/
const lock_t* lock) /*!< in: lock */
{
ut_a(lock_get_type_low(lock) == LOCK_REC);
return(lock->index->name);
}
/*******************************************************************//**
For a record lock, gets the tablespace number on which the lock is.
@return tablespace number */
UNIV_INTERN
ulint
lock_rec_get_space_id(
/*==================*/
const lock_t* lock) /*!< in: lock */
{
ut_a(lock_get_type_low(lock) == LOCK_REC);
return(lock->un_member.rec_lock.space);
}
/*******************************************************************//**
For a record lock, gets the page number on which the lock is.
@return page number */
UNIV_INTERN
ulint
lock_rec_get_page_no(
/*=================*/
const lock_t* lock) /*!< in: lock */
{
ut_a(lock_get_type_low(lock) == LOCK_REC);
return(lock->un_member.rec_lock.page_no);
}
| gpl-2.0 |
davidedg/check_mk_plugins | sms_skebby/readme.md | 971 | # SMS Skebby Notifications #
----------------------------
Check_MK plugin for SMS Notifications via Skebby.it
## Installation ##
As site user, download latest MKP
Install the package.
$ cmk -vP install sms_skebby-<version>.mkp
## WATO Configuration ##
In WATO, Users, create or modify a user for which you want to enable SMS Notifications, then choose Properties.
Set PAGER ADDRESS with the mobile number, without '+' or '00'.
Example: 393331234567
-> Italy (+39), TIM (333), Number (1234567)
You can also specify multiple addresses, separated by comma
Then enable a Flexible Custom Notification using `SMS using Skebby.it via https` and use these parameters:
Parameter | Value
------------- | -------------
Username | Your Skebby.it username
Password | Your Skebby.it password
SMS Type (optional) | One of: *send_sms, send_sms_basic, `send_sms_classic`, send_sms_classic_report, test_send_sms_classic, test_send_sms_classic_report, test_send_sms_basic*
| gpl-2.0 |
teamfx/openjfx-9-dev-rt | modules/javafx.graphics/src/main/native-glass/win/ViewContainer.cpp | 58952 | /*
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "common.h"
#include <UIAutomation.h>
#include "GlassApplication.h"
#include "ViewContainer.h"
#include "GlassView.h"
#include "KeyTable.h"
#include "Utils.h"
#include "GlassDnD.h"
#include "GlassInputTextInfo.h"
#include "ManipulationEvents.h"
#include "BaseWnd.h"
#include "com_sun_glass_events_ViewEvent.h"
#include "com_sun_glass_events_KeyEvent.h"
#include "com_sun_glass_events_MouseEvent.h"
#include "com_sun_glass_events_DndEvent.h"
#include "com_sun_glass_events_TouchEvent.h"
static UINT LangToCodePage(LANGID idLang)
{
WCHAR strCodePage[8];
// use the LANGID to create a LCID
LCID idLocale = MAKELCID(idLang, SORT_DEFAULT);
// get the ANSI code page associated with this locale
if (::GetLocaleInfo(idLocale, LOCALE_IDEFAULTANSICODEPAGE,
strCodePage, sizeof(strCodePage) / sizeof(WCHAR)) > 0)
{
return _wtoi(strCodePage);
} else {
return ::GetACP();
}
}
namespace {
bool IsTouchEvent()
{
// Read this link if you wonder why we need to hard code the mask and signature:
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms703320(v=vs.85).aspx
//"The lower 8 bits returned from GetMessageExtraInfo are variable.
// Of those bits, 7 (the lower 7, masked by 0x7F) are used to represent the cursor ID,
// zero for the mouse or a variable value for the pen ID.
// Additionally, in Windows Vista, the eighth bit, masked by 0x80, is used to
// differentiate touch input from pen input (0 = pen, 1 = touch)."
UINT SIGNATURE = 0xFF515780;
UINT MASK = 0xFFFFFF80;
UINT v = (UINT) GetMessageExtraInfo();
return ((v & MASK) == SIGNATURE);
}
} // namespace
ViewContainer::ViewContainer() :
m_view(NULL),
m_bTrackingMouse(FALSE),
m_manipProc(NULL),
m_inertiaProc(NULL),
m_manipEventSink(NULL),
m_gestureSupportCls(NULL),
m_lastMouseMovePosition(-1),
m_mouseButtonDownCounter(0),
m_deadKeyWParam(0)
{
m_kbLayout = ::GetKeyboardLayout(0);
m_idLang = LOWORD(m_kbLayout);
m_codePage = LangToCodePage(m_idLang);
m_lastTouchInputCount = 0;
}
jobject ViewContainer::GetView()
{
return GetGlassView() != NULL ? GetGlassView()->GetView() : NULL;
}
void ViewContainer::InitDropTarget(HWND hwnd)
{
if (!hwnd) {
return;
}
m_spDropTarget =
std::auto_ptr<IDropTarget>(new GlassDropTarget(this, hwnd));
}
void ViewContainer::ReleaseDropTarget()
{
m_spDropTarget = std::auto_ptr<IDropTarget>();
}
void ViewContainer::InitManipProcessor(HWND hwnd)
{
if (IS_WIN7) {
::RegisterTouchWindow(hwnd, TWF_WANTPALM);
HRESULT hr = ::CoCreateInstance(CLSID_ManipulationProcessor,
NULL,
CLSCTX_INPROC_SERVER,
IID_IUnknown,
(VOID**)(&m_manipProc)
);
if (SUCCEEDED(hr)) {
::CoCreateInstance(CLSID_InertiaProcessor,
NULL,
CLSCTX_INPROC_SERVER,
IID_IUnknown,
(VOID**)(&m_inertiaProc)
);
m_manipEventSink =
new ManipulationEventSinkWithInertia(m_manipProc, m_inertiaProc, this, hwnd);
}
const DWORD_PTR dwHwndTabletProperty =
TABLET_DISABLE_PENTAPFEEDBACK |
TABLET_DISABLE_PENBARRELFEEDBACK |
TABLET_DISABLE_FLICKS;
::SetProp(hwnd, MICROSOFT_TABLETPENSERVICE_PROPERTY, reinterpret_cast<HANDLE>(dwHwndTabletProperty));
if (!m_gestureSupportCls) {
JNIEnv *env = GetEnv();
const jclass cls = GlassApplication::ClassForName(env,
"com.sun.glass.ui.win.WinGestureSupport");
m_gestureSupportCls = (jclass)env->NewGlobalRef(cls);
env->DeleteLocalRef(cls);
ASSERT(m_gestureSupportCls);
}
}
}
void ViewContainer::ReleaseManipProcessor()
{
if (IS_WIN7) {
if (m_manipProc) {
m_manipProc->Release();
m_manipProc = NULL;
}
if (m_inertiaProc) {
m_inertiaProc->Release();
m_inertiaProc = NULL;
}
if (m_manipEventSink) {
m_manipEventSink->Release();
m_manipEventSink = NULL;
}
}
if (m_gestureSupportCls) {
JNIEnv *env = GetEnv();
env->DeleteGlobalRef(m_gestureSupportCls);
m_gestureSupportCls = 0;
}
}
void ViewContainer::HandleViewInputLangChange(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam)
{
if (!GetGlassView()) {
return;
}
m_kbLayout = reinterpret_cast<HKL>(lParam);
m_idLang = LOWORD(m_kbLayout);
m_codePage = LangToCodePage(m_idLang);
m_deadKeyWParam = 0;
}
void ViewContainer::NotifyViewMoved(HWND hwnd)
{
if (!hwnd || !GetGlassView()) {
return;
}
JNIEnv* env = GetEnv();
env->CallVoidMethod(GetView(), javaIDs.View.notifyView,
com_sun_glass_events_ViewEvent_MOVE);
CheckAndClearException(env);
}
void ViewContainer::NotifyViewSize(HWND hwnd)
{
if (!hwnd || !GetGlassView()) {
return;
}
RECT r;
if (::GetClientRect(hwnd, &r)) {
JNIEnv* env = GetEnv();
env->CallVoidMethod(GetView(), javaIDs.View.notifyResize,
r.right-r.left, r.bottom - r.top);
CheckAndClearException(env);
}
}
void ViewContainer::HandleViewPaintEvent(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam)
{
if (!GetGlassView()) {
return;
}
RECT r;
if (!::GetUpdateRect(hwnd, &r, FALSE)) {
return;
}
JNIEnv* env = GetEnv();
env->CallVoidMethod(GetView(), javaIDs.View.notifyRepaint,
r.left, r.top, r.right-r.left, r.bottom-r.top);
CheckAndClearException(env);
}
LRESULT ViewContainer::HandleViewGetAccessible(HWND hwnd, WPARAM wParam, LPARAM lParam)
{
if (!GetGlassView()) {
return NULL;
}
/* WM_GETOBJECT is sent to request different object types,
* always test the type to avoid unnecessary work.
*/
LRESULT lr = NULL;
if (static_cast<long>(lParam) == static_cast<long>(UiaRootObjectId)) {
/* The client is requesting UI Automation. */
JNIEnv* env = GetEnv();
if (!env) return NULL;
jlong pProvider = env->CallLongMethod(GetView(), javaIDs.View.getAccessible);
CheckAndClearException(env);
/* It is possible WM_GETOBJECT is sent before the toolkit is ready to
* create the accessible object (getAccessible returns NULL).
* On Windows 7, calling UiaReturnRawElementProvider() with a NULL provider
* returns an invalid LRESULT which stops further WM_GETOBJECT to be sent,
* effectively disabling accessibility for the window.
*/
if (pProvider) {
lr = UiaReturnRawElementProvider(hwnd, wParam, lParam, reinterpret_cast<IRawElementProviderSimple*>(pProvider));
}
} else if (static_cast<long>(lParam) == static_cast<long>(OBJID_CLIENT)) {
/* By default JAWS does not send WM_GETOBJECT with UiaRootObjectId till
* a focus event is raised by UiaRaiseAutomationEvent().
* In some systems (i.e. touch monitors), OBJID_CLIENT are sent when
* no screen reader is active. Test for SPI_GETSCREENREADER and
* UiaClientsAreListening() to avoid initializing accessibility
* unnecessarily.
*/
UINT screenReader = 0;
::SystemParametersInfo(SPI_GETSCREENREADER, 0, &screenReader, 0);
if (screenReader && UiaClientsAreListening()) {
JNIEnv* env = GetEnv();
if (env) {
/* Calling getAccessible() initializes accessibility which
* eventually raises the focus events required to indicate to
* JAWS to use UIA for this window.
*
* Note: do not return the accessible object for OBJID_CLIENT,
* that would create an UIA-MSAA bridge. That problem with the
* bridge is that it does not respect
* ProviderOptions_UseComThreading.
*/
env->CallLongMethod(GetView(), javaIDs.View.getAccessible);
CheckAndClearException(env);
}
}
}
return lr;
}
void ViewContainer::HandleViewSizeEvent(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam)
{
if (wParam == SIZE_MINIMIZED) {
return;
}
NotifyViewSize(hwnd);
}
void ViewContainer::HandleViewMenuEvent(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam)
{
if (!GetGlassView()) {
return;
}
if ((HWND)wParam != hwnd) {
return;
}
jboolean isKeyboardTrigger = lParam == (LPARAM)-1;
if (isKeyboardTrigger) {
lParam = ::GetMessagePos ();
}
POINT pt;
int absX = pt.x = GET_X_LPARAM(lParam);
int absY = pt.y = GET_Y_LPARAM(lParam);
::ScreenToClient (hwnd, &pt);
if (!isKeyboardTrigger) {
RECT rect;
::GetClientRect(hwnd, &rect);
if (!::PtInRect(&rect, pt)) {
return;
}
}
// unmirror the x coordinate
LONG style = ::GetWindowLong(hwnd, GWL_EXSTYLE);
if (style & WS_EX_LAYOUTRTL) {
RECT rect = {0};
::GetClientRect(hwnd, &rect);
pt.x = max(0, rect.right - rect.left) - pt.x;
}
JNIEnv* env = GetEnv();
env->CallVoidMethod(GetView(), javaIDs.View.notifyMenu, pt.x, pt.y, absX, absY, isKeyboardTrigger);
CheckAndClearException(env);
}
void ViewContainer::HandleViewKeyEvent(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam)
{
if (!GetGlassView()) {
return;
}
static const BYTE KEY_STATE_DOWN = 0x80;
UINT wKey = static_cast<UINT>(wParam);
UINT flags = HIWORD(lParam);
jint jKeyCode = WindowsKeyToJavaKey(wKey);
if (flags & (1 << 8)) {
// this is an extended key (e.g. Right ALT == AltGr)
switch (jKeyCode) {
case com_sun_glass_events_KeyEvent_VK_ALT:
jKeyCode = com_sun_glass_events_KeyEvent_VK_ALT_GRAPH;
break;
}
}
BYTE kbState[256];
if (!::GetKeyboardState(kbState)) {
return;
}
jint jModifiers = GetModifiers();
if (jModifiers & com_sun_glass_events_KeyEvent_MODIFIER_CONTROL) {
kbState[VK_CONTROL] &= ~KEY_STATE_DOWN;
}
WORD mbChar;
UINT scancode = ::MapVirtualKeyEx(wKey, 0, m_kbLayout);
// Depress modifiers to map a Unicode char to a key code
kbState[VK_CONTROL] &= ~0x80;
kbState[VK_SHIFT] &= ~0x80;
kbState[VK_MENU] &= ~0x80;
int converted = ::ToAsciiEx(wKey, scancode, kbState,
&mbChar, 0, m_kbLayout);
wchar_t wChar[4] = {0};
int unicodeConverted = ::ToUnicodeEx(wKey, scancode, kbState,
wChar, 4, 0, m_kbLayout);
// Some virtual codes require special handling
switch (wKey) {
case 0x00BA:// VK_OEM_1
case 0x00BB:// VK_OEM_PLUS
case 0x00BC:// VK_OEM_COMMA
case 0x00BD:// VK_OEM_MINUS
case 0x00BE:// VK_OEM_PERIOD
case 0x00BF:// VK_OEM_2
case 0x00C0:// VK_OEM_3
case 0x00DB:// VK_OEM_4
case 0x00DC:// VK_OEM_5
case 0x00DD:// VK_OEM_6
case 0x00DE:// VK_OEM_7
case 0x00DF:// VK_OEM_8
case 0x00E2:// VK_OEM_102
if (unicodeConverted < 0) {
// Dead key
switch (wChar[0]) {
case L'`': jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_GRAVE; break;
case L'\'': jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_ACUTE; break;
case 0x00B4: jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_ACUTE; break;
case L'^': jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_CIRCUMFLEX; break;
case L'~': jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_TILDE; break;
case 0x02DC: jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_TILDE; break;
case 0x00AF: jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_MACRON; break;
case 0x02D8: jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_BREVE; break;
case 0x02D9: jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_ABOVEDOT; break;
case L'"': jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_DIAERESIS; break;
case 0x00A8: jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_DIAERESIS; break;
case 0x02DA: jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_ABOVERING; break;
case 0x02DD: jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_DOUBLEACUTE; break;
case 0x02C7: jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_CARON; break; // aka hacek
case L',': jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_CEDILLA; break;
case 0x00B8: jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_CEDILLA; break;
case 0x02DB: jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_OGONEK; break;
case 0x037A: jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_IOTA; break; // ASCII ???
case 0x309B: jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_VOICED_SOUND; break;
case 0x309C: jKeyCode = com_sun_glass_events_KeyEvent_VK_DEAD_SEMIVOICED_SOUND; break;
default: jKeyCode = com_sun_glass_events_KeyEvent_VK_UNDEFINED; break;
};
} else if (unicodeConverted == 1) {
switch (wChar[0]) {
case L'!': jKeyCode = com_sun_glass_events_KeyEvent_VK_EXCLAMATION; break;
case L'"': jKeyCode = com_sun_glass_events_KeyEvent_VK_DOUBLE_QUOTE; break;
case L'#': jKeyCode = com_sun_glass_events_KeyEvent_VK_NUMBER_SIGN; break;
case L'$': jKeyCode = com_sun_glass_events_KeyEvent_VK_DOLLAR; break;
case L'&': jKeyCode = com_sun_glass_events_KeyEvent_VK_AMPERSAND; break;
case L'\'': jKeyCode = com_sun_glass_events_KeyEvent_VK_QUOTE; break;
case L'(': jKeyCode = com_sun_glass_events_KeyEvent_VK_LEFT_PARENTHESIS; break;
case L')': jKeyCode = com_sun_glass_events_KeyEvent_VK_RIGHT_PARENTHESIS; break;
case L'*': jKeyCode = com_sun_glass_events_KeyEvent_VK_ASTERISK; break;
case L'+': jKeyCode = com_sun_glass_events_KeyEvent_VK_PLUS; break;
case L',': jKeyCode = com_sun_glass_events_KeyEvent_VK_COMMA; break;
case L'-': jKeyCode = com_sun_glass_events_KeyEvent_VK_MINUS; break;
case L'.': jKeyCode = com_sun_glass_events_KeyEvent_VK_PERIOD; break;
case L'/': jKeyCode = com_sun_glass_events_KeyEvent_VK_SLASH; break;
case L':': jKeyCode = com_sun_glass_events_KeyEvent_VK_COLON; break;
case L';': jKeyCode = com_sun_glass_events_KeyEvent_VK_SEMICOLON; break;
case L'<': jKeyCode = com_sun_glass_events_KeyEvent_VK_LESS; break;
case L'=': jKeyCode = com_sun_glass_events_KeyEvent_VK_EQUALS; break;
case L'>': jKeyCode = com_sun_glass_events_KeyEvent_VK_GREATER; break;
case L'@': jKeyCode = com_sun_glass_events_KeyEvent_VK_AT; break;
case L'[': jKeyCode = com_sun_glass_events_KeyEvent_VK_OPEN_BRACKET; break;
case L'\\': jKeyCode = com_sun_glass_events_KeyEvent_VK_BACK_SLASH; break;
case L']': jKeyCode = com_sun_glass_events_KeyEvent_VK_CLOSE_BRACKET; break;
case L'^': jKeyCode = com_sun_glass_events_KeyEvent_VK_CIRCUMFLEX; break;
case L'_': jKeyCode = com_sun_glass_events_KeyEvent_VK_UNDERSCORE; break;
case L'`': jKeyCode = com_sun_glass_events_KeyEvent_VK_BACK_QUOTE; break;
case L'{': jKeyCode = com_sun_glass_events_KeyEvent_VK_BRACELEFT; break;
case L'}': jKeyCode = com_sun_glass_events_KeyEvent_VK_BRACERIGHT; break;
case 0x00A1: jKeyCode = com_sun_glass_events_KeyEvent_VK_INV_EXCLAMATION; break;
case 0x20A0: jKeyCode = com_sun_glass_events_KeyEvent_VK_EURO_SIGN; break;
default: jKeyCode = com_sun_glass_events_KeyEvent_VK_UNDEFINED; break;
}
} else if (unicodeConverted == 0 || unicodeConverted > 1) {
jKeyCode = com_sun_glass_events_KeyEvent_VK_UNDEFINED;
}
break;
};
int keyCharCount = 0;
jchar keyChars[4];
const bool isAutoRepeat = (msg == WM_KEYDOWN || msg == WM_SYSKEYDOWN)
&& (lParam & (1 << 30));
if (converted < 0) {
// Dead key
return;
} else if (converted == 0) {
// No translation available
keyCharCount = 0;
// This includes SHIFT, CONTROL, ALT, etc.
// RT-17062: suppress auto-repeated events for modifier keys
if (isAutoRepeat) {
switch (jKeyCode) {
case com_sun_glass_events_KeyEvent_VK_SHIFT:
case com_sun_glass_events_KeyEvent_VK_CONTROL:
case com_sun_glass_events_KeyEvent_VK_ALT:
case com_sun_glass_events_KeyEvent_VK_ALT_GRAPH:
case com_sun_glass_events_KeyEvent_VK_WINDOWS:
return;
}
}
} else {
// Handle some special cases
if ((wKey == VK_BACK) ||
(wKey == VK_ESCAPE))
{
keyCharCount = 0;
} else {
keyCharCount = ::MultiByteToWideChar(m_codePage, MB_PRECOMPOSED,
(LPCSTR)&mbChar, 2, (LPWSTR)keyChars,
4 * sizeof(jchar)) - 1;
if (keyCharCount <= 0) {
return;
}
}
}
JNIEnv* env = GetEnv();
jcharArray jKeyChars = env->NewCharArray(keyCharCount);
if (jKeyChars) {
if (keyCharCount) {
env->SetCharArrayRegion(jKeyChars, 0, keyCharCount, keyChars);
CheckAndClearException(env);
}
if (jKeyCode == com_sun_glass_events_KeyEvent_VK_PRINTSCREEN &&
(msg == WM_KEYUP || msg == WM_SYSKEYUP))
{
// MS Windows doesn't send WM_KEYDOWN for the PrintScreen key,
// so we synthesize one
env->CallVoidMethod(GetView(), javaIDs.View.notifyKey,
com_sun_glass_events_KeyEvent_PRESS,
jKeyCode, jKeyChars, jModifiers);
CheckAndClearException(env);
}
if (GetGlassView()) {
env->CallVoidMethod(GetView(), javaIDs.View.notifyKey,
(msg == WM_KEYDOWN || msg == WM_SYSKEYDOWN) ?
com_sun_glass_events_KeyEvent_PRESS : com_sun_glass_events_KeyEvent_RELEASE,
jKeyCode, jKeyChars, jModifiers);
CheckAndClearException(env);
}
// MS Windows doesn't send WM_CHAR for the Delete key,
// so we synthesize one
if (jKeyCode == com_sun_glass_events_KeyEvent_VK_DELETE &&
(msg == WM_KEYDOWN || msg == WM_SYSKEYDOWN) &&
GetGlassView())
{
// 0x7F == U+007F - a Unicode character for DELETE
SendViewTypedEvent(1, (jchar)0x7F);
}
env->DeleteLocalRef(jKeyChars);
}
}
void ViewContainer::SendViewTypedEvent(int repCount, jchar wChar)
{
if (!GetGlassView()) {
return;
}
JNIEnv* env = GetEnv();
jcharArray jKeyChars = env->NewCharArray(repCount);
if (jKeyChars) {
jchar* nKeyChars = env->GetCharArrayElements(jKeyChars, NULL);
if (nKeyChars) {
for (int i = 0; i < repCount; i++) {
nKeyChars[i] = wChar;
}
env->ReleaseCharArrayElements(jKeyChars, nKeyChars, 0);
env->CallVoidMethod(GetView(), javaIDs.View.notifyKey,
com_sun_glass_events_KeyEvent_TYPED,
com_sun_glass_events_KeyEvent_VK_UNDEFINED, jKeyChars,
GetModifiers());
CheckAndClearException(env);
}
env->DeleteLocalRef(jKeyChars);
}
}
void ViewContainer::HandleViewDeadKeyEvent(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam)
{
if (!GetGlassView()) {
return;
}
if (!m_deadKeyWParam) {
// HandleViewKeyEvent() calls ::ToAsciiEx and ::ToUnicodeEx which clear
// the dead key status from the keyboard layout. We store the current dead
// key here to use it when processing WM_CHAR in order to get the
// actual character typed.
m_deadKeyWParam = wParam;
} else {
// There already was another dead key pressed previously. Clear it
// and send two separate TYPED events instead to emulate native behavior.
SendViewTypedEvent(1, (jchar)m_deadKeyWParam);
SendViewTypedEvent(1, (jchar)wParam);
m_deadKeyWParam = 0;
}
// Since we handle dead keys ourselves, reset the keyboard dead key status (if any)
static BYTE kbState[256];
::GetKeyboardState(kbState);
WORD ignored;
::ToAsciiEx(VK_SPACE, ::MapVirtualKey(VK_SPACE, 0),
kbState, &ignored, 0, m_kbLayout);
}
void ViewContainer::HandleViewTypedEvent(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam)
{
if (!GetGlassView()) {
return;
}
int repCount = LOWORD(lParam);
jchar wChar;
if (!m_deadKeyWParam) {
wChar = (jchar)wParam;
} else {
// The character is composed together with the dead key, which
// may be translated into one or more combining characters.
const size_t COMP_SIZE = 5;
wchar_t comp[COMP_SIZE] = { (wchar_t)wParam };
// Some dead keys need additional translation:
// http://www.fileformat.info/info/unicode/block/combining_diacritical_marks/images.htm
// Also see awt_Component.cpp for the original dead keys table
if (LOBYTE(m_idLang) == LANG_GREEK) {
switch (m_deadKeyWParam) {
case L']': comp[1] = 0x300; break; // varia
case L';': comp[1] = 0x301; break; // oxia (wrong? generates tonos, not oxia)
case L'-': comp[1] = 0x304; break; // macron
case L'_': comp[1] = 0x306; break; // vrachy
case L':': comp[1] = 0x308; break; // dialytika
case L'"': comp[1] = 0x314; break; // dasia
case 0x0384: comp[1] = 0x341; break; // tonos
case L'[': comp[1] = 0x342; break; // perispomeni
case L'\'': comp[1] = 0x343; break; // psili
case L'~': comp[1] = 0x344; break; // dialytika oxia
case L'{': comp[1] = 0x345; break; // ypogegrammeni
case L'`': comp[1] = 0x308; comp[2] = 0x300; break; // dialytika varia
case L'\\': comp[1] = 0x313; comp[2] = 0x300; break; // psili varia
case L'/': comp[1] = 0x313; comp[2] = 0x301; break; // psili oxia
case L'=': comp[1] = 0x313; comp[2] = 0x342; break; // psili perispomeni
case L'|': comp[1] = 0x314; comp[2] = 0x300; break; // dasia varia
case L'?': comp[1] = 0x314; comp[2] = 0x301; break; // dasia oxia
case L'+': comp[1] = 0x314; comp[2] = 0x342; break; // dasia perispomeni
// AltGr dead chars don't work. Maybe kbd isn't reset properly?
// case 0x1fc1: comp[1] = 0x308; comp[2] = 0x342; break; // dialytika perispomeni
// case 0x1fde: comp[1] = 0x314; comp[2] = 0x301; comp[3] = 0x345; break; // dasia oxia ypogegrammeni
default: comp[1] = static_cast<wchar_t>(m_deadKeyWParam); break;
}
} else if (HIWORD(m_kbLayout) == 0xF0B1 && LOBYTE(m_idLang) == LANG_LATVIAN) {
// The Latvian (Standard) keyboard, available in Win 8.1 and later.
switch (m_deadKeyWParam) {
case L'\'':
case L'"':
// Note: " is Shift-' and automatically capitalizes the typed
// character in native Win 8.1 apps. We don't do this, so the user
// needs to keep the Shift key down. This is probably the common use
// case anyway.
switch (wParam) {
case L'A': case L'a':
case L'E': case L'e':
case L'I': case L'i':
case L'U': case L'u':
comp[1] = 0x304; break; // macron
case L'C': case L'c':
case L'S': case L's':
case L'Z': case L'z':
comp[1] = 0x30c; break; // caron
case L'G': case L'g':
case L'K': case L'k':
case L'L': case L'l':
case L'N': case L'n':
comp[1] = 0x327; break; // cedilla
default:
comp[1] = static_cast<wchar_t>(m_deadKeyWParam); break;
} break;
default: comp[1] = static_cast<wchar_t>(m_deadKeyWParam); break;
}
} else {
switch (m_deadKeyWParam) {
case L'`': comp[1] = 0x300; break;
case L'\'': comp[1] = 0x301; break;
case 0x00B4: comp[1] = 0x301; break;
case L'^': comp[1] = 0x302; break;
case L'~': comp[1] = 0x303; break;
case 0x02DC: comp[1] = 0x303; break;
case 0x00AF: comp[1] = 0x304; break;
case 0x02D8: comp[1] = 0x306; break;
case 0x02D9: comp[1] = 0x307; break;
case L'"': comp[1] = 0x308; break;
case 0x00A8: comp[1] = 0x308; break;
case 0x00B0: comp[1] = 0x30A; break;
case 0x02DA: comp[1] = 0x30A; break;
case 0x02DD: comp[1] = 0x30B; break;
case 0x02C7: comp[1] = 0x30C; break;
case L',': comp[1] = 0x327; break;
case 0x00B8: comp[1] = 0x327; break;
case 0x02DB: comp[1] = 0x328; break;
default: comp[1] = static_cast<wchar_t>(m_deadKeyWParam); break;
}
}
int compSize = 3;
for (int i = 1; i < COMP_SIZE; i++) {
if (comp[i] == L'\0') {
compSize = i + 1;
break;
}
}
wchar_t out[3];
int res = ::FoldString(MAP_PRECOMPOSED, (LPWSTR)comp, compSize, (LPWSTR)out, 3);
if (res > 0) {
wChar = (jchar)out[0];
if (res == 3) {
// The character cannot be accented, so we send a TYPED event
// for the dead key itself first.
SendViewTypedEvent(1, (jchar)m_deadKeyWParam);
}
} else {
// Folding failed. Use the untranslated original character then
wChar = (jchar)wParam;
}
// Clear the dead key
m_deadKeyWParam = 0;
}
SendViewTypedEvent(repCount, wChar);
}
BOOL ViewContainer::HandleViewMouseEvent(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam)
{
if (!GetGlassView()) {
return FALSE;
}
int type = 0;
int button = com_sun_glass_events_MouseEvent_BUTTON_NONE;
POINT pt; // client coords
jdouble wheelRotation = 0.0;
if (msg == WM_MOUSELEAVE) {
type = com_sun_glass_events_MouseEvent_EXIT;
// get the coords (the message does not contain them)
lParam = ::GetMessagePos();
pt.x = GET_X_LPARAM(lParam);
pt.y = GET_Y_LPARAM(lParam);
// this is screen coords, convert to client
::ScreenToClient(hwnd, &pt);
// Windows has finished tracking mouse pointer already
m_bTrackingMouse = FALSE;
m_lastMouseMovePosition = -1;
} else {
// for all other messages lParam contains cursor coords
pt.x = GET_X_LPARAM(lParam);
pt.y = GET_Y_LPARAM(lParam);
switch (msg) {
case WM_MOUSEMOVE:
if (lParam == m_lastMouseMovePosition) {
// Avoid sending synthetic MOVE/DRAG events if
// the pointer hasn't moved actually.
// Just consume the messages.
return TRUE;
} else {
m_lastMouseMovePosition = lParam;
}
// See RT-11305 regarding the GetCapture() check
if ((wParam & (MK_LBUTTON | MK_RBUTTON | MK_MBUTTON)) != 0 && ::GetCapture() == hwnd) {
type = com_sun_glass_events_MouseEvent_DRAG;
} else {
type = com_sun_glass_events_MouseEvent_MOVE;
}
// Due to RT-11305 we should report the pressed button for both
// MOVE and DRAG. This also enables one to filter out these
// events in client code in case they're undesired.
if (wParam & MK_RBUTTON) {
button = com_sun_glass_events_MouseEvent_BUTTON_RIGHT;
} else if (wParam & MK_LBUTTON) {
button = com_sun_glass_events_MouseEvent_BUTTON_LEFT;
} else if (wParam & MK_MBUTTON) {
button = com_sun_glass_events_MouseEvent_BUTTON_OTHER;
}
break;
case WM_LBUTTONDOWN:
type = com_sun_glass_events_MouseEvent_DOWN;
button = com_sun_glass_events_MouseEvent_BUTTON_LEFT;
break;
case WM_LBUTTONUP:
type = com_sun_glass_events_MouseEvent_UP;
button = com_sun_glass_events_MouseEvent_BUTTON_LEFT;
break;
case WM_RBUTTONDOWN:
type = com_sun_glass_events_MouseEvent_DOWN;
button = com_sun_glass_events_MouseEvent_BUTTON_RIGHT;
break;
case WM_RBUTTONUP:
type = com_sun_glass_events_MouseEvent_UP;
button = com_sun_glass_events_MouseEvent_BUTTON_RIGHT;
break;
case WM_MBUTTONDOWN:
type = com_sun_glass_events_MouseEvent_DOWN;
button = com_sun_glass_events_MouseEvent_BUTTON_OTHER;
break;
case WM_MBUTTONUP:
type = com_sun_glass_events_MouseEvent_UP;
button = com_sun_glass_events_MouseEvent_BUTTON_OTHER;
break;
case WM_MOUSEWHEEL:
case WM_MOUSEHWHEEL:
{
// MS Windows always sends WHEEL events to the focused window.
// Redirect the message to a Glass window under the mouse
// cursor instead to match Mac behavior
HWND hwndUnderCursor = ::ChildWindowFromPointEx(
::GetDesktopWindow(), pt,
CWP_SKIPDISABLED | CWP_SKIPINVISIBLE);
if (hwndUnderCursor && hwndUnderCursor != hwnd)
{
DWORD hWndUnderCursorProcess;
::GetWindowThreadProcessId(hwndUnderCursor, &hWndUnderCursorProcess);
if (::GetCurrentProcessId() == hWndUnderCursorProcess) {
return (BOOL)::SendMessage(hwndUnderCursor, msg, wParam, lParam);
}
}
// if there's none, proceed as usual
type = com_sun_glass_events_MouseEvent_WHEEL;
wheelRotation = (jdouble)GET_WHEEL_DELTA_WPARAM(wParam) / WHEEL_DELTA;
}
break;
}
}
switch (type) {
case 0:
// not handled
return FALSE;
case com_sun_glass_events_MouseEvent_DOWN:
m_mouseButtonDownCounter++;
if (::GetCapture() != hwnd) {
::SetCapture(hwnd);
}
break;
case com_sun_glass_events_MouseEvent_UP:
if (m_mouseButtonDownCounter) {
m_mouseButtonDownCounter--;
} //else { internal inconsistency; quite unimportant though }
if (::GetCapture() == hwnd && !m_mouseButtonDownCounter) {
::ReleaseCapture();
}
break;
}
// get screen coords
POINT ptAbs = pt;
if (type == com_sun_glass_events_MouseEvent_WHEEL) {
::ScreenToClient(hwnd, &pt);
} else {
::ClientToScreen(hwnd, &ptAbs);
}
// unmirror the x coordinate
LONG style = ::GetWindowLong(hwnd, GWL_EXSTYLE);
if (style & WS_EX_LAYOUTRTL) {
RECT rect = {0};
::GetClientRect(hwnd, &rect);
pt.x = max(0, rect.right - rect.left) - pt.x;
}
jint jModifiers = GetModifiers();
const jboolean isSynthesized = jboolean(IsTouchEvent());
JNIEnv *env = GetEnv();
if (!m_bTrackingMouse && type != com_sun_glass_events_MouseEvent_EXIT) {
TRACKMOUSEEVENT trackData;
trackData.cbSize = sizeof(trackData);
trackData.dwFlags = TME_LEAVE;
trackData.hwndTrack = hwnd;
trackData.dwHoverTime = HOVER_DEFAULT;
if (::TrackMouseEvent(&trackData)) {
// Mouse tracking will be canceled automatically upon receiving WM_MOUSELEAVE
m_bTrackingMouse = TRUE;
}
// Note that (ViewContainer*)this != (BaseWnd*)this. We could use
// dynamic_case<>() instead, but it would fail later if 'this' is
// already deleted. So we use FromHandle() which is safe.
const BaseWnd *origWnd = BaseWnd::FromHandle(hwnd);
env->CallVoidMethod(GetView(), javaIDs.View.notifyMouse,
com_sun_glass_events_MouseEvent_ENTER,
com_sun_glass_events_MouseEvent_BUTTON_NONE,
pt.x, pt.y, ptAbs.x, ptAbs.y,
jModifiers, JNI_FALSE, isSynthesized);
CheckAndClearException(env);
// At this point 'this' might have already been deleted if the app
// closed the window while processing the ENTER event. Hence the check:
if (!::IsWindow(hwnd) || BaseWnd::FromHandle(hwnd) != origWnd ||
!GetGlassView())
{
return TRUE;
}
}
switch (type) {
case com_sun_glass_events_MouseEvent_DOWN:
GlassDropSource::SetDragButton(button);
break;
case com_sun_glass_events_MouseEvent_UP:
GlassDropSource::SetDragButton(0);
break;
}
if (type == com_sun_glass_events_MouseEvent_WHEEL) {
jdouble dx, dy;
if (msg == WM_MOUSEHWHEEL) { // native horizontal scroll
// Negate the value to be more "natural"
dx = -wheelRotation;
dy = 0.0;
} else if (msg == WM_MOUSEWHEEL && LOWORD(wParam) & MK_SHIFT) {
// Do not negate the emulated horizontal scroll amount
dx = wheelRotation;
dy = 0.0;
} else { // vertical scroll
dx = 0.0;
dy = wheelRotation;
}
jint ls, cs;
UINT val = 0;
::SystemParametersInfo(SPI_GETWHEELSCROLLLINES, 0, &val, 0);
ls = (jint)val;
val = 0;
::SystemParametersInfo(SPI_GETWHEELSCROLLCHARS, 0, &val, 0);
cs = (jint)val;
env->CallVoidMethod(GetView(), javaIDs.View.notifyScroll,
pt.x, pt.y, ptAbs.x, ptAbs.y,
dx, dy, jModifiers, ls, cs, 3, 3, (jdouble)40.0, (jdouble)40.0);
} else {
env->CallVoidMethod(GetView(), javaIDs.View.notifyMouse,
type, button, pt.x, pt.y, ptAbs.x, ptAbs.y,
jModifiers,
type == com_sun_glass_events_MouseEvent_UP && button == com_sun_glass_events_MouseEvent_BUTTON_RIGHT,
isSynthesized);
}
CheckAndClearException(env);
return TRUE;
}
void ViewContainer::NotifyCaptureChanged(HWND hwnd, HWND to)
{
m_mouseButtonDownCounter = 0;
}
void ViewContainer::ResetMouseTracking(HWND hwnd)
{
if (!m_bTrackingMouse) {
return;
}
// We don't expect WM_MOUSELEAVE anymore, so we cancel mouse tracking manually
TRACKMOUSEEVENT trackData;
trackData.cbSize = sizeof(trackData);
trackData.dwFlags = TME_LEAVE | TME_CANCEL;
trackData.hwndTrack = hwnd;
trackData.dwHoverTime = HOVER_DEFAULT;
::TrackMouseEvent(&trackData);
m_bTrackingMouse = FALSE;
if (!GetGlassView()) {
return;
}
POINT ptAbs;
::GetCursorPos(&ptAbs);
POINT pt = ptAbs;
::ScreenToClient(hwnd, &pt);
// unmirror the x coordinate
LONG style = ::GetWindowLong(hwnd, GWL_EXSTYLE);
if (style & WS_EX_LAYOUTRTL) {
RECT rect = {0};
::GetClientRect(hwnd, &rect);
pt.x = max(0, rect.right - rect.left) - pt.x;
}
JNIEnv *env = GetEnv();
env->CallVoidMethod(GetView(), javaIDs.View.notifyMouse,
com_sun_glass_events_MouseEvent_EXIT, 0, pt.x, pt.y, ptAbs.x, ptAbs.y,
GetModifiers(),
JNI_FALSE,
JNI_FALSE);
CheckAndClearException(env);
}
BOOL ViewContainer::HandleViewInputMethodEvent(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam)
{
GlassView* gv = GetGlassView();
if (!gv) {
return FALSE;
}
switch (msg) {
case WM_IME_ENDCOMPOSITION:
SendInputMethodEvent(NULL, 0, NULL, 0, NULL, NULL, 0, 0, 0);
case WM_IME_STARTCOMPOSITION:
return gv->IsInputMethodEventEnabled();
case WM_IME_COMPOSITION:
if (gv->IsInputMethodEventEnabled()) {
WmImeComposition(hwnd, wParam, lParam);
return TRUE;
}
break;
case WM_IME_NOTIFY:
if (gv->IsInputMethodEventEnabled()) {
WmImeNotify(hwnd, wParam, lParam);
}
break;
default:
return FALSE;
}
return FALSE;
}
void ViewContainer::WmImeComposition(HWND hwnd, WPARAM wParam, LPARAM lParam)
{
BOOL ret = FALSE;
JNIEnv *env = GetEnv();
int* bndClauseW = NULL;
int* bndAttrW = NULL;
BYTE* valAttrW = NULL;
int cClauseW = 0;
GlassInputTextInfo textInfo = GlassInputTextInfo(this);
HIMC hIMC = ImmGetContext(hwnd);
ASSERT(hIMC!=0);
try {
textInfo.GetContextData(hIMC, lParam);
jstring jtextString = textInfo.GetText();
if ((lParam & GCS_RESULTSTR && jtextString != NULL) ||
(lParam & GCS_COMPSTR)) {
int cursorPosW = textInfo.GetCursorPosition();
int cAttrW = textInfo.GetAttributeInfo(bndAttrW, valAttrW);
cClauseW = textInfo.GetClauseInfo(bndClauseW);
SendInputMethodEvent(jtextString,
cClauseW, bndClauseW,
cAttrW, bndAttrW, valAttrW,
textInfo.GetCommittedTextLength(),
cursorPosW, cursorPosW);
}
ImmReleaseContext(hwnd, hIMC);
} catch (...) {
// since GetClauseInfo and GetAttributeInfo could throw exception, we have to release
// the pointer here.
delete [] bndClauseW;
delete [] bndAttrW;
delete [] valAttrW;
ImmReleaseContext(hwnd, hIMC);
throw;
}
/* Free the storage allocated. Since jtextString won't be passed from threads
* to threads, we just use the local ref and it will be deleted within the destructor
* of GlassInputTextInfo object.
*/
delete [] bndClauseW;
delete [] bndAttrW;
delete [] valAttrW;
CheckAndClearException(env);
}
void ViewContainer::WmImeNotify(HWND hwnd, WPARAM wParam, LPARAM lParam)
{
if (wParam == IMN_OPENCANDIDATE || wParam == IMN_CHANGECANDIDATE) {
JNIEnv *env = GetEnv();
POINT curPos;
UINT bits = 1;
HIMC hIMC = ImmGetContext(hwnd);
CANDIDATEFORM cf;
GetCandidatePos(&curPos);
::ScreenToClient(hwnd, &curPos);
for (int iCandType=0; iCandType<32; iCandType++, bits<<=1) {
if (lParam & bits) {
cf.dwIndex = iCandType;
cf.dwStyle = CFS_CANDIDATEPOS;
// The constant offset is needed because Windows is moving the IM window
cf.ptCurrentPos.x = curPos.x - 6;
cf.ptCurrentPos.y = curPos.y - 15;
::ImmSetCandidateWindow(hIMC, &cf);
}
}
ImmReleaseContext(hwnd, hIMC);
}
}
//
// generate and post InputMethodEvent
//
void ViewContainer::SendInputMethodEvent(jstring text,
int cClause, int* rgClauseBoundary,
int cAttrBlock, int* rgAttrBoundary, BYTE *rgAttrValue,
int commitedTextLength, int caretPos, int visiblePos)
{
JNIEnv *env = GetEnv();
// assumption for array type casting
ASSERT(sizeof(int)==sizeof(jint));
ASSERT(sizeof(BYTE)==sizeof(jbyte));
// caluse information
jintArray clauseBoundary = NULL;
if (cClause && rgClauseBoundary) {
// convert clause boundary offset array to java array
clauseBoundary = env->NewIntArray(cClause+1);
if (clauseBoundary) {
env->SetIntArrayRegion(clauseBoundary, 0, cClause+1, (jint *)rgClauseBoundary);
CheckAndClearException(env);
}
}
// attribute information
jintArray attrBoundary = NULL;
jbyteArray attrValue = NULL;
if (cAttrBlock && rgAttrBoundary && rgAttrValue) {
// convert attribute boundary offset array to java array
attrBoundary = env->NewIntArray(cAttrBlock+1);
if (attrBoundary) {
env->SetIntArrayRegion(attrBoundary, 0, cAttrBlock+1, (jint *)rgAttrBoundary);
CheckAndClearException(env);
}
// convert attribute value byte array to java array
attrValue = env->NewByteArray(cAttrBlock);
if (attrValue) {
env->SetByteArrayRegion(attrValue, 0, cAttrBlock, (jbyte *)rgAttrValue);
CheckAndClearException(env);
}
}
env->CallBooleanMethod(GetView(), javaIDs.View.notifyInputMethod,
text, clauseBoundary, attrBoundary,
attrValue, commitedTextLength, caretPos, visiblePos);
CheckAndClearException(env);
if (clauseBoundary) {
env->DeleteLocalRef(clauseBoundary);
}
if (attrBoundary) {
env->DeleteLocalRef(attrBoundary);
}
if (attrValue) {
env->DeleteLocalRef(attrValue);
}
}
// Gets the candidate position
void ViewContainer::GetCandidatePos(LPPOINT curPos)
{
JNIEnv *env = GetEnv();
double* nativePos;
jdoubleArray pos = (jdoubleArray)env->CallObjectMethod(GetView(),
javaIDs.View.notifyInputMethodCandidatePosRequest,
0);
nativePos = env->GetDoubleArrayElements(pos, NULL);
if (nativePos) {
curPos->x = (int)nativePos[0];
curPos->y = (int)nativePos[1];
env->ReleaseDoubleArrayElements(pos, nativePos, 0);
}
}
namespace {
class AutoTouchInputHandle {
HTOUCHINPUT m_h;
private:
AutoTouchInputHandle(const AutoTouchInputHandle&);
AutoTouchInputHandle& operator=(const AutoTouchInputHandle&);
public:
explicit AutoTouchInputHandle(LPARAM lParam): m_h((HTOUCHINPUT)lParam) {
}
~AutoTouchInputHandle() {
if (m_h) {
::CloseTouchInputHandle(m_h);
}
}
operator HTOUCHINPUT() const {
return m_h;
}
};
static BOOL debugTouch = false;
static char * touchEventName(unsigned int dwFlags) {
if (dwFlags & TOUCHEVENTF_MOVE) {
return "MOVE";
}
if (dwFlags & TOUCHEVENTF_DOWN) {
return "PRESS";
}
if (dwFlags & TOUCHEVENTF_UP) {
return "RELEASE";
}
return "UNKOWN";
}
void NotifyTouchInput(
HWND hWnd, jobject view, jclass gestureSupportCls,
const TOUCHINPUT* ti, unsigned count)
{
JNIEnv *env = GetEnv();
// TBD: set to 'true' if source device is a touch screen
// and to 'false' if source device is a touch pad.
// So far assume source device on Windows is always a touch screen.
const bool isDirect = true;
jint modifiers = GetModifiers();
env->CallStaticObjectMethod(gestureSupportCls,
javaIDs.Gestures.notifyBeginTouchEventMID,
view, modifiers, jboolean(isDirect),
jint(count));
CheckAndClearException(env);
for (; count; --count, ++ti) {
jlong touchID = jlong(ti->dwID);
jint eventID = 0;
if (ti->dwFlags & TOUCHEVENTF_MOVE) {
eventID = com_sun_glass_events_TouchEvent_TOUCH_MOVED;
}
if (ti->dwFlags & TOUCHEVENTF_DOWN) {
eventID = com_sun_glass_events_TouchEvent_TOUCH_PRESSED;
}
if (ti->dwFlags & TOUCHEVENTF_UP) {
eventID = com_sun_glass_events_TouchEvent_TOUCH_RELEASED;
}
POINT screen;
POINT client;
client.x = screen.x = LONG(ti->x / 100);
client.y = screen.y = LONG(ti->y / 100);
ScreenToClient(hWnd, &client);
// unmirror the x coordinate
LONG style = ::GetWindowLong(hWnd, GWL_EXSTYLE);
if (style & WS_EX_LAYOUTRTL) {
RECT rect = {0};
::GetClientRect(hWnd, &rect);
client.x = max(0, rect.right - rect.left) - client.x;
}
env->CallStaticObjectMethod(gestureSupportCls,
javaIDs.Gestures.notifyNextTouchEventMID,
view, eventID, touchID,
jint(client.x), jint(client.y),
jint(screen.x), jint(screen.y));
CheckAndClearException(env);
}
env->CallStaticObjectMethod(
gestureSupportCls, javaIDs.Gestures.notifyEndTouchEventMID, view);
CheckAndClearException(env);
}
void NotifyManipulationProcessor(
IManipulationProcessor& manipProc,
const TOUCHINPUT* ti, unsigned count)
{
for (; count; --count, ++ti) {
if (ti->dwFlags & TOUCHEVENTF_DOWN) {
manipProc.ProcessDownWithTime(ti->dwID, FLOAT(ti->x), FLOAT(ti->y), ti->dwTime);
}
if (ti->dwFlags & TOUCHEVENTF_MOVE) {
manipProc.ProcessMoveWithTime(ti->dwID, FLOAT(ti->x), FLOAT(ti->y), ti->dwTime);
}
if (ti->dwFlags & TOUCHEVENTF_UP) {
manipProc.ProcessUpWithTime(ti->dwID, FLOAT(ti->x), FLOAT(ti->y), ti->dwTime);
}
}
}
} // namespace
unsigned int ViewContainer::HandleViewTouchEvent(
HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam)
{
const UINT newCount = static_cast<UINT>(LOWORD(wParam));
TOUCHINPUT * tempTouchInputBuf;
unsigned int bufsz = newCount > 10 ? newCount : 10;
if (m_thisTouchInputBuf.size() < bufsz) {
m_thisTouchInputBuf.resize(bufsz);
}
if (newCount > 0) {
tempTouchInputBuf = new TOUCHINPUT[newCount];
do {
AutoTouchInputHandle inputInfo(lParam);
if (!::GetTouchInputInfo(inputInfo, newCount,
tempTouchInputBuf, sizeof(TOUCHINPUT))) {
delete [] tempTouchInputBuf;
return 0;
}
} while(0); // scope for 'inputInfo'
}
// Fix up the touch point stream. Some drivers seem to lose touch events,
// dropping PRESS, MOVE, UP, so we need to add them back in.
unsigned int activeCount = 0;
unsigned int pointsCount = 0;
// check first for any "lost" touches
// these need to get added to the send list of points
for (unsigned int i = 0 ; i < m_lastTouchInputCount; i++) {
if (!(m_lastTouchInputBuf[i].dwFlags & TOUCHEVENTF_UP)) {
// looking for a dwID that is
// not present in the new batch
// was not UP in the old batch
bool found = false;
for (unsigned int j = 0; j < newCount; j++) {
if (m_lastTouchInputBuf[i].dwID == tempTouchInputBuf[j].dwID) {
found = true;
//break;
}
}
if (!found) {
// We have a old event but not a new one, so release it
m_thisTouchInputBuf[pointsCount].dwFlags = TOUCHEVENTF_UP;
m_thisTouchInputBuf[pointsCount].dwID = m_lastTouchInputBuf[i].dwID;
m_thisTouchInputBuf[pointsCount].x = m_lastTouchInputBuf[i].x;
m_thisTouchInputBuf[pointsCount].y = m_lastTouchInputBuf[i].y;
if (newCount > 0) {
//use the time of the first new element for our inserted event
m_thisTouchInputBuf[pointsCount].dwTime = tempTouchInputBuf[0].dwTime;
} else {
m_thisTouchInputBuf[pointsCount].dwTime = m_lastTouchInputBuf[i].dwTime;
}
m_thisTouchInputBuf[pointsCount].dwMask = m_lastTouchInputBuf[i].dwMask;
if (debugTouch) {
printf("TOUCH FIX UP %d, %s\n", m_lastTouchInputBuf[i].dwID, touchEventName(m_lastTouchInputBuf[i].dwFlags));
}
pointsCount++;
}
}
}
if (pointsCount + newCount > m_thisTouchInputBuf.size()) {
bufsz = pointsCount + newCount;
m_thisTouchInputBuf.resize(bufsz);
}
// now fold in the current touch points
for (unsigned int i = 0 ; i < newCount; i++) {
bool found = false;
for (unsigned int j = 0 ; j < m_lastTouchInputCount; j++) {
if (m_lastTouchInputBuf[j].dwID == tempTouchInputBuf[i].dwID) {
found = true;
break;
}
}
m_thisTouchInputBuf[pointsCount].dwFlags = tempTouchInputBuf[i].dwFlags;
m_thisTouchInputBuf[pointsCount].dwID = tempTouchInputBuf[i].dwID;
m_thisTouchInputBuf[pointsCount].dwTime = tempTouchInputBuf[i].dwTime;
m_thisTouchInputBuf[pointsCount].dwMask = tempTouchInputBuf[i].dwMask;
m_thisTouchInputBuf[pointsCount].x = tempTouchInputBuf[i].x;
m_thisTouchInputBuf[pointsCount].y = tempTouchInputBuf[i].y;
if (m_thisTouchInputBuf[pointsCount].dwFlags & TOUCHEVENTF_DOWN) {
pointsCount++;
activeCount ++;
} else if (m_thisTouchInputBuf[pointsCount].dwFlags & TOUCHEVENTF_MOVE) {
if (!found) {
if (debugTouch) {
printf("TOUCH FIX MV->DOWN %d, %s\n", m_thisTouchInputBuf[pointsCount].dwID, touchEventName(m_thisTouchInputBuf[pointsCount].dwFlags));
}
m_thisTouchInputBuf[pointsCount].dwFlags = TOUCHEVENTF_DOWN;
}
pointsCount++;
activeCount ++;
} else if (m_thisTouchInputBuf[pointsCount].dwFlags & TOUCHEVENTF_UP) {
if (found) {
pointsCount++;
} else {
// UP without a previous DOWN, ignore it
}
}
}
if (debugTouch) {
printf("Touch Sequence %d/%d win=%d view=%d %d,%d,%d\n",pointsCount,activeCount,
hWnd, GetView(),
m_lastTouchInputCount, newCount, pointsCount);
for (unsigned int i = 0 ; i < m_lastTouchInputCount; i++) {
printf(" old %d, %s\n", m_lastTouchInputBuf[i].dwID, touchEventName(m_lastTouchInputBuf[i].dwFlags));
}
for (unsigned int i = 0 ; i < newCount; i++) {
printf(" in %d, %s\n", tempTouchInputBuf[i].dwID, touchEventName(tempTouchInputBuf[i].dwFlags));
}
for (unsigned int i = 0 ; i < pointsCount; i++) {
printf(" this %d, %d\n", m_thisTouchInputBuf[i].dwID, m_thisTouchInputBuf[i].dwFlags & 0x07);
}
printf(" ---\n");
fflush(stdout);
}
if (pointsCount > 0) {
NotifyTouchInput(hWnd, GetView(), m_gestureSupportCls, &m_thisTouchInputBuf[0], pointsCount);
if (m_manipProc) {
NotifyManipulationProcessor(*m_manipProc, &m_thisTouchInputBuf[0], pointsCount);
}
std::swap(m_lastTouchInputBuf, m_thisTouchInputBuf);
m_lastTouchInputCount = pointsCount;
}
if ( newCount > 0) {
delete [] tempTouchInputBuf;
}
return activeCount;
}
void ViewContainer::HandleViewTimerEvent(HWND hwnd, UINT_PTR timerID)
{
if (IDT_GLASS_INERTIAPROCESSOR == timerID) {
BOOL completed = FALSE;
HRESULT hr = m_inertiaProc->Process(&completed);
if (SUCCEEDED(hr) && completed) {
StopTouchInputInertia(hwnd);
JNIEnv *env = GetEnv();
env->CallStaticVoidMethod(m_gestureSupportCls,
javaIDs.Gestures.inertiaGestureFinishedMID, GetView());
CheckAndClearException(env);
}
}
}
void ViewContainer::NotifyGesturePerformed(HWND hWnd,
bool isDirect, bool isInertia,
FLOAT x, FLOAT y, FLOAT deltaX, FLOAT deltaY,
FLOAT scaleDelta, FLOAT expansionDelta, FLOAT rotationDelta,
FLOAT cumulativeDeltaX, FLOAT cumulativeDeltaY,
FLOAT cumulativeScale, FLOAT cumulativeExpansion,
FLOAT cumulativeRotation)
{
JNIEnv *env = GetEnv();
POINT screen;
screen.x = LONG((x + 0.5) / 100);
screen.y = LONG((y + 0.5) / 100);
POINT client;
client.x = screen.x;
client.y = screen.y;
ScreenToClient(hWnd, &client);
// unmirror the x coordinate
LONG style = ::GetWindowLong(hWnd, GWL_EXSTYLE);
if (style & WS_EX_LAYOUTRTL) {
RECT rect = {0};
::GetClientRect(hWnd, &rect);
client.x = max(0, rect.right - rect.left) - client.x;
}
jint modifiers = GetModifiers();
env->CallStaticVoidMethod(m_gestureSupportCls,
javaIDs.Gestures.gesturePerformedMID,
GetView(), modifiers,
jboolean(isDirect), jboolean(isInertia),
jint(client.x), jint(client.y),
jint(screen.x), jint(screen.y),
deltaX / 100, deltaY / 100,
cumulativeDeltaX / 100, cumulativeDeltaY / 100,
cumulativeScale, cumulativeExpansion / 100,
cumulativeRotation);
CheckAndClearException(env);
}
void ViewContainer::StartTouchInputInertia(HWND hwnd)
{
// TBD: check errors
//
// Collect initial inertia data
//
FLOAT vX, vY;
m_manipProc->GetVelocityX(&vX);
m_manipProc->GetVelocityY(&vY);
const FLOAT VELOCITY_THRESHOLD = 10.0f;
if (fabs(vX) < VELOCITY_THRESHOLD && fabs(vY) < VELOCITY_THRESHOLD) {
return;
}
// TBD: check errors
POINT origin;
GetCursorPos(&origin);
//
// Setup inertia.
//
m_inertiaProc->Reset();
m_inertiaProc->put_DesiredDeceleration(0.23f);
// Set initial origins.
m_inertiaProc->put_InitialOriginX(origin.x * 100.0f);
m_inertiaProc->put_InitialOriginY(origin.y * 100.0f);
// Set initial velocities.
m_inertiaProc->put_InitialVelocityX(vX);
m_inertiaProc->put_InitialVelocityY(vY);
// TBD: check errors
::SetTimer(hwnd, IDT_GLASS_INERTIAPROCESSOR, 16, NULL);
}
void ViewContainer::StopTouchInputInertia(HWND hwnd)
{
// TBD: check errors
::KillTimer(hwnd, IDT_GLASS_INERTIAPROCESSOR);
}
extern "C" {
JNIEXPORT void JNICALL Java_com_sun_glass_ui_win_WinGestureSupport__1initIDs(
JNIEnv *env, jclass cls)
{
javaIDs.Gestures.gesturePerformedMID =
env->GetStaticMethodID(cls, "gesturePerformed",
"(Lcom/sun/glass/ui/View;IZZIIIIFFFFFFF)V");
CheckAndClearException(env);
javaIDs.Gestures.inertiaGestureFinishedMID =
env->GetStaticMethodID(cls, "inertiaGestureFinished",
"(Lcom/sun/glass/ui/View;)V");
CheckAndClearException(env);
javaIDs.Gestures.notifyBeginTouchEventMID =
env->GetStaticMethodID(cls, "notifyBeginTouchEvent",
"(Lcom/sun/glass/ui/View;IZI)V");
CheckAndClearException(env);
javaIDs.Gestures.notifyNextTouchEventMID =
env->GetStaticMethodID(cls, "notifyNextTouchEvent",
"(Lcom/sun/glass/ui/View;IJIIII)V");
CheckAndClearException(env);
javaIDs.Gestures.notifyEndTouchEventMID =
env->GetStaticMethodID(cls, "notifyEndTouchEvent",
"(Lcom/sun/glass/ui/View;)V");
CheckAndClearException(env);
}
} // extern "C"
| gpl-2.0 |
NicolasWinckler/CbmRoot | littrack/cbm/reco/CbmLitFitTracksParallel.h | 1654 | /**
* \file CbmLitFitTracksParallel.h
* \author Andrey Lebedev <[email protected]>
* \date 2013
* \brief Task to fit TRD and MUCH tracks using parallel track fit algorithms.
*/
#ifndef CBMLITFITTRACKSPARALLEL_H_
#define CBMLITFITTRACKSPARALLEL_H_
#include "FairTask.h"
#include "TStopwatch.h"
class CbmLitFitTracksParallel : public FairTask {
public:
/**
* \brief Constructor.
*/
CbmLitFitTracksParallel();
/**
* \brief Destructor.
*/
virtual ~CbmLitFitTracksParallel();
/**
* \brief Derived from FairTask.
*/
virtual InitStatus Init();
/**
* \brief Derived from FairTask.
*/
virtual void Exec(
Option_t* opt);
/**
* \brief Derived from FairTask.
*/
virtual void Finish();
private:
/**
* \brief Read data branches.
*/
void ReadDataBranches();
/**
* \brief Performs track fit.
*/
void DoFit();
/**
* \brief Print stopwatch statistics.
*/
void PrintStopwatchStatistics();
// Data branches
TClonesArray* fGlobalTracks;
TClonesArray* fStsTracks;
TClonesArray* fTrdTracks;
TClonesArray* fMuchTracks;
TClonesArray* fTrdHits;
TClonesArray* fMuchPixelHits;
TClonesArray* fMuchStrawHits;
// stopwatches
TStopwatch fFitWatch; // stopwatch for track fit without IO
TStopwatch fFitWithIOWatch; // stopwatch for track fit including IO
CbmLitFitTracksParallel(const CbmLitFitTracksParallel&);
CbmLitFitTracksParallel& operator=(const CbmLitFitTracksParallel&);
ClassDef(CbmLitFitTracksParallel, 1);
};
#endif /* CBMLITFITTRACKSPARALLEL_H_ */
| gpl-2.0 |
brandonshults01/belle-sip | src/transports/tls_channel_polarssl.c | 20494 | /*
belle-sip - SIP (RFC3261) library.
Copyright (C) 2013 Belledonne Communications SARL
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "belle_sip_internal.h"
#include "stream_channel.h"
#ifdef HAVE_POLARSSL
/* Uncomment to get very verbose polarssl logs*/
//#define ENABLE_POLARSSL_LOGS
#include <polarssl/ssl.h>
#include <polarssl/version.h>
#include <polarssl/error.h>
#if POLARSSL_VERSION_NUMBER >= 0x01030000
#include <polarssl/x509.h>
#endif
#endif
struct belle_sip_certificates_chain {
belle_sip_object_t objet;
#ifdef HAVE_POLARSSL
#if POLARSSL_VERSION_NUMBER < 0x01030000
x509_cert cert;
#else
x509_crt cert;
#endif
#endif
};
struct belle_sip_signing_key {
belle_sip_object_t objet;
#ifdef HAVE_POLARSSL
#if POLARSSL_VERSION_NUMBER < 0x01030000
rsa_context key;
#else
pk_context key;
#endif
#endif
};
#ifdef HAVE_POLARSSL
/*************tls********/
static int tls_process_data(belle_sip_channel_t *obj,unsigned int revents);
struct belle_sip_tls_channel{
belle_sip_stream_channel_t base;
ssl_context sslctx;
#if POLARSSL_VERSION_NUMBER < 0x01030000
x509_cert root_ca;
#else
x509_crt root_ca;
#endif
struct sockaddr_storage ss;
socklen_t socklen;
int socket_connected;
char *cur_debug_msg;
belle_sip_certificates_chain_t* client_cert_chain;
belle_sip_signing_key_t* client_cert_key;
belle_tls_verify_policy_t *verify_ctx;
};
static void tls_channel_close(belle_sip_tls_channel_t *obj){
belle_sip_socket_t sock = belle_sip_source_get_socket((belle_sip_source_t*)obj);
if (sock!=-1 && belle_sip_channel_get_state((belle_sip_channel_t*)obj)!=BELLE_SIP_CHANNEL_ERROR)
ssl_close_notify(&obj->sslctx);
stream_channel_close((belle_sip_stream_channel_t*)obj);
ssl_session_reset(&obj->sslctx);
obj->socket_connected=0;
}
static void tls_channel_uninit(belle_sip_tls_channel_t *obj){
belle_sip_socket_t sock = belle_sip_source_get_socket((belle_sip_source_t*)obj);
if (sock!=(belle_sip_socket_t)-1)
tls_channel_close(obj);
ssl_free(&obj->sslctx);
#if POLARSSL_VERSION_NUMBER < 0x01030000
x509_free(&obj->root_ca);
#else
x509_crt_free(&obj->root_ca);
#endif
if (obj->cur_debug_msg)
belle_sip_free(obj->cur_debug_msg);
belle_sip_object_unref(obj->verify_ctx);
if (obj->client_cert_chain) belle_sip_object_unref(obj->client_cert_chain);
if (obj->client_cert_key) belle_sip_object_unref(obj->client_cert_key);
}
static int tls_channel_send(belle_sip_channel_t *obj, const void *buf, size_t buflen){
belle_sip_tls_channel_t* channel = (belle_sip_tls_channel_t*)obj;
int err = ssl_write(&channel->sslctx,buf,buflen);
if (err<0){
char tmp[256]={0};
if (err==POLARSSL_ERR_NET_WANT_WRITE) return -BELLESIP_EWOULDBLOCK;
error_strerror(err,tmp,sizeof(tmp));
belle_sip_error("Channel [%p]: ssl_write() error [%i]: %s",obj,err,tmp);
}
return err;
}
static int tls_channel_recv(belle_sip_channel_t *obj, void *buf, size_t buflen){
belle_sip_tls_channel_t* channel = (belle_sip_tls_channel_t*)obj;
int err = ssl_read(&channel->sslctx,buf,buflen);
if (err==POLARSSL_ERR_SSL_PEER_CLOSE_NOTIFY) return 0;
if (err<0){
char tmp[256]={0};
if (err==POLARSSL_ERR_NET_WANT_READ) return -BELLESIP_EWOULDBLOCK;
error_strerror(err,tmp,sizeof(tmp));
belle_sip_error("Channel [%p]: ssl_read() error [%i]: %s",obj, err, tmp);
}
return err;
}
static int tls_channel_connect(belle_sip_channel_t *obj, const struct addrinfo *ai){
int err= stream_channel_connect((belle_sip_stream_channel_t*)obj,ai);
if (err==0){
belle_sip_source_set_notify((belle_sip_source_t *)obj, (belle_sip_source_func_t)tls_process_data);
return 0;
}
return -1;
}
BELLE_SIP_DECLARE_CUSTOM_VPTR_BEGIN(belle_sip_tls_channel_t,belle_sip_stream_channel_t)
BELLE_SIP_DECLARE_CUSTOM_VPTR_END
BELLE_SIP_DECLARE_NO_IMPLEMENTED_INTERFACES(belle_sip_tls_channel_t);
BELLE_SIP_INSTANCIATE_CUSTOM_VPTR_BEGIN(belle_sip_tls_channel_t)
{
{
{
BELLE_SIP_VPTR_INIT(belle_sip_tls_channel_t,belle_sip_stream_channel_t,FALSE),
(belle_sip_object_destroy_t)tls_channel_uninit,
NULL,
NULL
},
"TLS",
1, /*is_reliable*/
tls_channel_connect,
tls_channel_send,
tls_channel_recv,
(void (*)(belle_sip_channel_t*))tls_channel_close
}
}
BELLE_SIP_INSTANCIATE_CUSTOM_VPTR_END
static int tls_channel_handshake(belle_sip_tls_channel_t *channel) {
int ret;
while( channel->sslctx.state != SSL_HANDSHAKE_OVER ) {
if ((ret = ssl_handshake_step( &channel->sslctx ))) {
break;
}
if (channel->sslctx.state == SSL_CLIENT_CERTIFICATE && channel->sslctx.client_auth >0) {
BELLE_SIP_INVOKE_LISTENERS_ARG1_ARG2( channel->base.base.listeners
,belle_sip_channel_listener_t
,on_auth_requested
,&channel->base.base
,NULL/*not set yet*/);
if (channel->client_cert_chain && channel->client_cert_key) {
#if POLARSSL_VERSION_NUMBER >= 0x01030000
int err;
#endif
char tmp[512]={0};
#if POLARSSL_VERSION_NUMBER < 0x01030000
x509parse_cert_info(tmp,sizeof(tmp)-1,"",&channel->client_cert_chain->cert);
#else
x509_crt_info(tmp,sizeof(tmp)-1,"",&channel->client_cert_chain->cert);
#endif
belle_sip_message("Channel [%p] found client certificate:\n%s",channel,tmp);
#if POLARSSL_VERSION_NUMBER < 0x01030000
ssl_set_own_cert(&channel->sslctx,&channel->client_cert_chain->cert,&channel->client_cert_key->key);
#else
/* allows public keys other than RSA */
if ((err=ssl_set_own_cert(&channel->sslctx,&channel->client_cert_chain->cert,&channel->client_cert_key->key))) {
error_strerror(err,tmp,sizeof(tmp)-1);
belle_sip_error("Channel [%p] cannot ssl_set_own_cert [%s]",channel,tmp);
}
/*update own cert see ssl_handshake frompolarssl*/
channel->sslctx.handshake->key_cert = channel->sslctx.key_cert;
#endif
}
}
}
return ret;
}
static int tls_process_handshake(belle_sip_channel_t *obj){
belle_sip_tls_channel_t* channel=(belle_sip_tls_channel_t*)obj;
int err=tls_channel_handshake(channel);
if (err==0){
belle_sip_message("Channel [%p]: SSL handshake finished.",obj);
belle_sip_source_set_timeout((belle_sip_source_t*)obj,-1);
belle_sip_channel_set_ready(obj,(struct sockaddr*)&channel->ss,channel->socklen);
}else if (err==POLARSSL_ERR_NET_WANT_READ || err==POLARSSL_ERR_NET_WANT_WRITE){
belle_sip_message("Channel [%p]: SSL handshake in progress...",obj);
}else{
char tmp[128];
error_strerror(err,tmp,sizeof(tmp));
belle_sip_error("Channel [%p]: SSL handshake failed : %s",obj,tmp);
return -1;
}
return 0;
}
static int tls_process_data(belle_sip_channel_t *obj,unsigned int revents){
belle_sip_tls_channel_t* channel=(belle_sip_tls_channel_t*)obj;
if (obj->state == BELLE_SIP_CHANNEL_CONNECTING ) {
if (!channel->socket_connected) {
channel->socklen=sizeof(channel->ss);
if (finalize_stream_connection((belle_sip_stream_channel_t*)obj,revents,(struct sockaddr*)&channel->ss,&channel->socklen)) {
goto process_error;
}
belle_sip_message("Channel [%p]: Connected at TCP level, now doing TLS handshake",obj);
channel->socket_connected=1;
belle_sip_source_set_events((belle_sip_source_t*)channel,BELLE_SIP_EVENT_READ|BELLE_SIP_EVENT_ERROR);
belle_sip_source_set_timeout((belle_sip_source_t*)obj,belle_sip_stack_get_transport_timeout(obj->stack));
if (tls_process_handshake(obj)==-1) goto process_error;
}else{
if (revents & BELLE_SIP_EVENT_READ){
if (tls_process_handshake(obj)==-1) goto process_error;
}else if (revents==BELLE_SIP_EVENT_TIMEOUT){
belle_sip_error("channel [%p]: SSL handshake took too much time.",obj);
goto process_error;
}else{
belle_sip_warning("channeEHHCXCCCl [%p]: unexpected event [%i] during TLS handshake.",obj,revents);
}
}
} else if ( obj->state == BELLE_SIP_CHANNEL_READY) {
return belle_sip_channel_process_data(obj,revents);
} else {
belle_sip_warning("Unexpected event [%i], for channel [%p]",revents,channel);
return BELLE_SIP_STOP;
}
return BELLE_SIP_CONTINUE;
process_error:
belle_sip_error("Cannot connect to [%s://%s:%i]",belle_sip_channel_get_transport_name(obj),obj->peer_name,obj->peer_port);
channel_set_state(obj,BELLE_SIP_CHANNEL_ERROR);
return BELLE_SIP_STOP;
}
static int polarssl_read(void * ctx, unsigned char *buf, size_t len ){
belle_sip_stream_channel_t *super=(belle_sip_stream_channel_t *)ctx;
int ret = stream_channel_recv(super,buf,len);
if (ret<0){
ret=-ret;
if (ret==BELLESIP_EWOULDBLOCK || ret==BELLESIP_EINPROGRESS || ret == EINTR )
return POLARSSL_ERR_NET_WANT_READ;
return POLARSSL_ERR_NET_CONN_RESET;
}
return ret;
}
static int polarssl_write(void * ctx, const unsigned char *buf, size_t len ){
belle_sip_stream_channel_t *super=(belle_sip_stream_channel_t *)ctx;
int ret = stream_channel_send(super, buf, len);
if (ret<0){
ret=-ret;
if (ret==BELLESIP_EWOULDBLOCK || ret==BELLESIP_EINPROGRESS || ret == EINTR )
return POLARSSL_ERR_NET_WANT_WRITE;
return POLARSSL_ERR_NET_CONN_RESET;
}
return ret;
}
static int random_generator(void *ctx, unsigned char *ptr, size_t size){
belle_sip_random_bytes(ptr, size);
return 0;
}
static const char *polarssl_certflags_to_string(char *buf, size_t size, int flags){
int i=0;
memset(buf,0,size);
size--;
if (i<size && (flags & BADCERT_EXPIRED))
i+=snprintf(buf+i,size-i,"expired ");
if (i<size && (flags & BADCERT_REVOKED))
i+=snprintf(buf+i,size-i,"revoked ");
if (i<size && (flags & BADCERT_CN_MISMATCH))
i+=snprintf(buf+i,size-i,"CN-mismatch ");
if (i<size && (flags & BADCERT_NOT_TRUSTED))
i+=snprintf(buf+i,size-i,"not-trusted ");
if (i<size && (flags & BADCERT_MISSING))
i+=snprintf(buf+i,size-i,"missing ");
if (i<size && (flags & BADCRL_NOT_TRUSTED))
i+=snprintf(buf+i,size-i,"crl-not-trusted ");
if (i<size && (flags & BADCRL_EXPIRED))
i+=snprintf(buf+i,size-i,"crl-not-expired ");
return buf;
}
#if POLARSSL_VERSION_NUMBER < 0x01030000
static int belle_sip_ssl_verify(void *data , x509_cert *cert , int depth, int *flags){
#else
static int belle_sip_ssl_verify(void *data , x509_crt *cert , int depth, int *flags){
#endif
belle_tls_verify_policy_t *verify_ctx=(belle_tls_verify_policy_t*)data;
char tmp[512];
char flags_str[128];
#if POLARSSL_VERSION_NUMBER < 0x01030000
x509parse_cert_info(tmp,sizeof(tmp),"",cert);
#else
x509_crt_info(tmp,sizeof(tmp),"",cert);
#endif
belle_sip_message("Found certificate depth=[%i], flags=[%s]:\n%s",
depth,polarssl_certflags_to_string(flags_str,sizeof(flags_str),*flags),tmp);
if (verify_ctx->exception_flags==BELLE_TLS_VERIFY_ANY_REASON){
*flags=0;
}else if (verify_ctx->exception_flags & BELLE_TLS_VERIFY_CN_MISMATCH){
*flags&=~BADCERT_CN_MISMATCH;
}
return 0;
}
static int belle_sip_tls_channel_load_root_ca(belle_sip_tls_channel_t *obj, const char *path){
struct stat statbuf;
if (stat(path,&statbuf)==0){
if (statbuf.st_mode & S_IFDIR){
#if POLARSSL_VERSION_NUMBER < 0x01030000
if (x509parse_crtpath(&obj->root_ca,path)<0){
#else
if (x509_crt_parse_path(&obj->root_ca,path)<0){
#endif
belle_sip_error("Failed to load root ca from directory %s",path);
return -1;
}
}else{
#if POLARSSL_VERSION_NUMBER < 0x01030000
if (x509parse_crtfile(&obj->root_ca,path)<0){
#else
if (x509_crt_parse_file(&obj->root_ca,path)<0){
#endif
belle_sip_error("Failed to load root ca from file %s",path);
return -1;
}
}
return 0;
}
belle_sip_error("Could not load root ca from %s: %s",path,strerror(errno));
return -1;
}
#ifdef ENABLE_POLARSSL_LOGS
/*
* polarssl does a lot of logs, some with newline, some without.
* We need to concatenate logs without new line until a new line is found.
*/
static void ssl_debug_to_belle_sip(void *context, int level, const char *str){
belle_sip_tls_channel_t *chan=(belle_sip_tls_channel_t*)context;
int len=strlen(str);
if (len>0 && (str[len-1]=='\n' || str[len-1]=='\r')){
/*eliminate the newline*/
char *tmp=belle_sip_strdup(str);
tmp[len-1]=0;
if (chan->cur_debug_msg){
belle_sip_message("ssl: %s%s",chan->cur_debug_msg,tmp);
belle_sip_free(chan->cur_debug_msg);
chan->cur_debug_msg=NULL;
}else belle_sip_message("ssl: %s",tmp);
belle_sip_free(tmp);
}else{
if (chan->cur_debug_msg){
char *tmp=belle_sip_strdup_printf("%s%s",chan->cur_debug_msg,str);
belle_sip_free(chan->cur_debug_msg);
chan->cur_debug_msg=tmp;
}else chan->cur_debug_msg=belle_sip_strdup(str);
}
}
#endif
belle_sip_channel_t * belle_sip_channel_new_tls(belle_sip_stack_t *stack, belle_tls_verify_policy_t *verify_ctx,const char *bindip, int localport, const char *peer_cname, const char *dest, int port){
belle_sip_tls_channel_t *obj=belle_sip_object_new(belle_sip_tls_channel_t);
belle_sip_stream_channel_t* super=(belle_sip_stream_channel_t*)obj;
belle_sip_stream_channel_init_client(super
,stack
,bindip,localport,peer_cname,dest,port);
ssl_init(&obj->sslctx);
#ifdef ENABLE_POLARSSL_LOGS
ssl_set_dbg(&obj->sslctx,ssl_debug_to_belle_sip,obj);
#endif
ssl_set_endpoint(&obj->sslctx,SSL_IS_CLIENT);
ssl_set_authmode(&obj->sslctx,SSL_VERIFY_REQUIRED);
ssl_set_bio(&obj->sslctx,polarssl_read,obj,polarssl_write,obj);
if (verify_ctx->root_ca && belle_sip_tls_channel_load_root_ca(obj,verify_ctx->root_ca)==0){
ssl_set_ca_chain(&obj->sslctx,&obj->root_ca,NULL,super->base.peer_cname ? super->base.peer_cname : super->base.peer_name );
}
ssl_set_rng(&obj->sslctx,random_generator,NULL);
ssl_set_verify(&obj->sslctx,belle_sip_ssl_verify,verify_ctx);
obj->verify_ctx=(belle_tls_verify_policy_t*)belle_sip_object_ref(verify_ctx);
return (belle_sip_channel_t*)obj;
}
void belle_sip_tls_channel_set_client_certificates_chain(belle_sip_tls_channel_t *channel, belle_sip_certificates_chain_t* cert_chain) {
SET_OBJECT_PROPERTY(channel,client_cert_chain,cert_chain);
}
void belle_sip_tls_channel_set_client_certificate_key(belle_sip_tls_channel_t *channel, belle_sip_signing_key_t* key){
SET_OBJECT_PROPERTY(channel,client_cert_key,key);
}
#else /*HAVE_POLLAR_SSL*/
void belle_sip_tls_channel_set_client_certificates_chain(belle_sip_tls_channel_t *obj, belle_sip_certificates_chain_t* cert_chain) {
belle_sip_error("belle_sip_channel_set_client_certificate_chain requires TLS");
}
void belle_sip_tls_channel_set_client_certificate_key(belle_sip_tls_channel_t *obj, belle_sip_signing_key_t* key) {
belle_sip_error("belle_sip_channel_set_client_certificate_key requires TLS");
}
#endif
/**************************** belle_sip_certificates_chain_t **/
static int belle_sip_certificate_fill(belle_sip_certificates_chain_t* certificate,const char* buff, size_t size,belle_sip_certificate_raw_format_t format) {
#ifdef HAVE_POLARSSL
int err;
#if POLARSSL_VERSION_NUMBER < 0x01030000
if ((err=x509parse_crt(&certificate->cert,(const unsigned char *)buff,size)) <0) {
#else
if ((err=x509_crt_parse(&certificate->cert,(const unsigned char *)buff,size)) <0) {
#endif
char tmp[128];
error_strerror(err,tmp,sizeof(tmp));
belle_sip_error("cannot parse x509 cert because [%s]",tmp);
return -1;
}
return 0;
#else /*HAVE_POLARSSL*/
return -1;
#endif
}
static int belle_sip_certificate_fill_from_file(belle_sip_certificates_chain_t* certificate,const char* path,belle_sip_certificate_raw_format_t format) {
#ifdef HAVE_POLARSSL
int err;
#if POLARSSL_VERSION_NUMBER < 0x01030000
if ((err=x509parse_crtfile(&certificate->cert, path)) <0) {
#else
if ((err=x509_crt_parse_file(&certificate->cert, path)) <0) {
#endif
char tmp[128];
error_strerror(err,tmp,sizeof(tmp));
belle_sip_error("cannot parse x509 cert because [%s]",tmp);
return -1;
}
return 0;
#else /*HAVE_POLARSSL*/
return -1;
#endif
}
/*belle_sip_certificate */
belle_sip_certificates_chain_t* belle_sip_certificates_chain_parse(const char* buff, size_t size,belle_sip_certificate_raw_format_t format) {
belle_sip_certificates_chain_t* certificate = belle_sip_object_new(belle_sip_certificates_chain_t);
if (belle_sip_certificate_fill(certificate,buff, size,format)) {
belle_sip_object_unref(certificate);
certificate=NULL;
}
return certificate;
}
belle_sip_certificates_chain_t* belle_sip_certificates_chain_parse_file(const char* path, belle_sip_certificate_raw_format_t format) {
belle_sip_certificates_chain_t* certificate = belle_sip_object_new(belle_sip_certificates_chain_t);
if (belle_sip_certificate_fill_from_file(certificate, path, format)) {
belle_sip_object_unref(certificate);
certificate=NULL;
}
return certificate;
}
static void belle_sip_certificates_chain_destroy(belle_sip_certificates_chain_t *certificate){
#ifdef HAVE_POLARSSL
#if POLARSSL_VERSION_NUMBER < 0x01030000
x509_free(&certificate->cert);
#else
x509_crt_free(&certificate->cert);
#endif
#endif
}
static void belle_sip_certificates_chain_clone(belle_sip_certificates_chain_t *certificate, const belle_sip_certificates_chain_t *orig){
belle_sip_error("belle_sip_certificate_clone not supported");
}
BELLE_SIP_DECLARE_NO_IMPLEMENTED_INTERFACES(belle_sip_certificates_chain_t);
BELLE_SIP_INSTANCIATE_VPTR(belle_sip_certificates_chain_t,belle_sip_object_t,belle_sip_certificates_chain_destroy,belle_sip_certificates_chain_clone,NULL,TRUE);
belle_sip_signing_key_t* belle_sip_signing_key_parse(const char* buff, size_t size,const char* passwd) {
#ifdef HAVE_POLARSSL
belle_sip_signing_key_t* signing_key = belle_sip_object_new(belle_sip_signing_key_t);
int err;
#if POLARSSL_VERSION_NUMBER < 0x01030000
if ((err=x509parse_key(&signing_key->key,(const unsigned char *)buff,size,(const unsigned char*)passwd,passwd?strlen(passwd):0)) <0) {
#else
pk_init(&signing_key->key);
/* for API v1.3 or greater also parses public keys other than RSA */
err=pk_parse_key(&signing_key->key,(const unsigned char *)buff,size,(const unsigned char*)passwd,passwd?strlen(passwd):0);
/* make sure cipher is RSA to be consistent with API v1.2 */
if(err==0 && !pk_can_do(&signing_key->key,POLARSSL_PK_RSA))
err=POLARSSL_ERR_PK_TYPE_MISMATCH;
if (err<0) {
#endif
char tmp[128];
error_strerror(err,tmp,sizeof(tmp));
belle_sip_error("cannot parse public key because [%s]",tmp);
#if POLARSSL_VERSION_NUMBER >= 0x01030000
pk_free(&signing_key->key);
#endif
belle_sip_object_unref(signing_key);
return NULL;
}
return signing_key;
#else /*HAVE_POLARSSL*/
return NULL;
#endif
}
belle_sip_signing_key_t* belle_sip_signing_key_parse_file(const char* path,const char* passwd) {
#ifdef HAVE_POLARSSL
belle_sip_signing_key_t* signing_key = belle_sip_object_new(belle_sip_signing_key_t);
int err;
#if POLARSSL_VERSION_NUMBER < 0x01030000
if ((err=x509parse_keyfile(&signing_key->key,path, passwd)) <0) {
#else
pk_init(&signing_key->key);
/* for API v1.3 or greater also parses public keys other than RSA */
err=pk_parse_keyfile(&signing_key->key,path, passwd);
/* make sure cipher is RSA to be consistent with API v1.2 */
if(err==0 && !pk_can_do(&signing_key->key,POLARSSL_PK_RSA))
err=POLARSSL_ERR_PK_TYPE_MISMATCH;
if (err<0) {
#endif
char tmp[128];
error_strerror(err,tmp,sizeof(tmp));
belle_sip_error("cannot parse public key because [%s]",tmp);
#if POLARSSL_VERSION_NUMBER >= 0x01030000
pk_free(&signing_key->key);
#endif
belle_sip_object_unref(signing_key);
return NULL;
}
return signing_key;
#else /*HAVE_POLARSSL*/
return NULL;
#endif
}
static void belle_sip_signing_key_destroy(belle_sip_signing_key_t *signing_key){
#ifdef HAVE_POLARSSL
#if POLARSSL_VERSION_NUMBER < 0x01030000
rsa_free(&signing_key->key);
#else
pk_free(&signing_key->key);
#endif
#endif
}
static void belle_sip_signing_key_clone(belle_sip_signing_key_t *signing_key, const belle_sip_signing_key_t *orig){
belle_sip_error("belle_sip_signing_key_clone not supported");
}
BELLE_SIP_DECLARE_NO_IMPLEMENTED_INTERFACES(belle_sip_signing_key_t);
BELLE_SIP_INSTANCIATE_VPTR(belle_sip_signing_key_t,belle_sip_object_t,belle_sip_signing_key_destroy,belle_sip_signing_key_clone,NULL,TRUE);
| gpl-2.0 |
renebentes/joomla-3.x | administrator/components/com_youtubegallery/sql/updates/mysql/3.6.0.sql | 70 | ALTER TABLE #__youtubegallery_videos ADD COLUMN rawdata text NOT NULL; | gpl-2.0 |
cangsalak/Yii2-CMS | frontend/config/main.php | 1425 | <?php
$params = array_merge(
require(__DIR__ . '/../../common/config/params.php'),
require(__DIR__ . '/../../common/config/params-local.php'),
require(__DIR__ . '/params.php'),
require(__DIR__ . '/params-local.php')
);
return [
'id' => 'app-frontend',
'basePath' => dirname(__DIR__),
'bootstrap' => ['log'],
'controllerNamespace' => 'frontend\controllers',
'components' => [
'user' => [
'identityClass' => 'common\models\User',
'enableAutoLogin' => true,
],
'log' => [
'traceLevel' => YII_DEBUG ? 3 : 0,
'targets' => [
[
'class' => 'yii\log\FileTarget',
'levels' => ['error', 'warning'],
],
],
],
'errorHandler' => [
'errorAction' => 'site/error',
],
'i18n' => [
'translations' => [
'app*' => [
'class' => 'yii\i18n\PhpMessageSource',
'basePath' => '@frontend/messages',
'sourceLanguage' => 'en',
'fileMap' => [
'app' => 'app.php',
'app/tag' => 'tag.php',
'app/poll' => 'poll.php',
'app/user' => 'user.php',
'app/form' => 'form.php',
'app/content' => 'content.php',
],
],
],
],
],
'params' => $params,
];
| gpl-2.0 |
Augustus061193/OpenCollegeGraph | d2rq/doc/javadoc/allclasses-noframe.html | 25391 | <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_33) on Fri Jun 22 11:01:57 IST 2012 -->
<TITLE>
All Classes (D2RQ)
</TITLE>
<META NAME="date" CONTENT="2012-06-22">
<LINK REL ="stylesheet" TYPE="text/css" HREF="stylesheet.css" TITLE="Style">
</HEAD>
<BODY BGCOLOR="white">
<FONT size="+1" CLASS="FrameHeadingFont">
<B>All Classes</B></FONT>
<BR>
<TABLE BORDER="0" WIDTH="100%" SUMMARY="">
<TR>
<TD NOWRAP><FONT CLASS="FrameItemFont"><A HREF="de/fuberlin/wiwiss/d2rq/expr/Add.html" title="class in de.fuberlin.wiwiss.d2rq.expr">Add</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/AliasMap.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">AliasMap</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/AliasMap.Alias.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">AliasMap.Alias</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/Attribute.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">Attribute</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/AttributeExpr.html" title="class in de.fuberlin.wiwiss.d2rq.expr">AttributeExpr</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/optimizer/expr/AttributeExprEx.html" title="class in de.fuberlin.wiwiss.d2rq.optimizer.expr">AttributeExprEx</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/AutoReloadableDataset.html" title="class in de.fuberlin.wiwiss.d2rq.server">AutoReloadableDataset</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/BeanCounter.html" title="class in de.fuberlin.wiwiss.d2rq.sql">BeanCounter</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/BinaryOperator.html" title="class in de.fuberlin.wiwiss.d2rq.expr">BinaryOperator</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/engine/BindingMaker.html" title="class in de.fuberlin.wiwiss.d2rq.engine">BindingMaker</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/values/BlankNodeID.html" title="class in de.fuberlin.wiwiss.d2rq.values">BlankNodeID</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/BooleanToIntegerCaseExpression.html" title="class in de.fuberlin.wiwiss.d2rq.expr">BooleanToIntegerCaseExpression</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/jena/CachingGraphD2RQ.html" title="class in de.fuberlin.wiwiss.d2rq.jena">CachingGraphD2RQ</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/map/ClassMap.html" title="class in de.fuberlin.wiwiss.d2rq.map">ClassMap</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/ClassMapLister.html" title="class in de.fuberlin.wiwiss.d2rq">ClassMapLister</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/ClassMapServlet.html" title="class in de.fuberlin.wiwiss.d2rq.server">ClassMapServlet</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/values/Column.html" title="class in de.fuberlin.wiwiss.d2rq.values">Column</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/ColumnRenamer.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">ColumnRenamer</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/ColumnRenamerMap.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">ColumnRenamerMap</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/CommandLineTool.html" title="class in de.fuberlin.wiwiss.d2rq">CommandLineTool</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/CompatibleRelationGroup.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">CompatibleRelationGroup</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/Concatenation.html" title="class in de.fuberlin.wiwiss.d2rq.expr">Concatenation</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/ConfigLoader.html" title="class in de.fuberlin.wiwiss.d2rq.server">ConfigLoader</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/map/Configuration.html" title="class in de.fuberlin.wiwiss.d2rq.map">Configuration</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/Conjunction.html" title="class in de.fuberlin.wiwiss.d2rq.expr">Conjunction</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/ConnectedDB.html" title="class in de.fuberlin.wiwiss.d2rq.sql">ConnectedDB</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/Constant.html" title="class in de.fuberlin.wiwiss.d2rq.expr">Constant</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/optimizer/expr/ConstantEx.html" title="class in de.fuberlin.wiwiss.d2rq.optimizer.expr">ConstantEx</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/values/ConstantValueMaker.html" title="class in de.fuberlin.wiwiss.d2rq.values">ConstantValueMaker</A>
<BR>
<A HREF="de/fuberlin/wiwiss/pubby/negotiation/ContentTypeNegotiator.html" title="class in de.fuberlin.wiwiss.pubby.negotiation">ContentTypeNegotiator</A>
<BR>
<A HREF="d2rq/d2r_query.html" title="class in d2rq">d2r_query</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/vocab/D2RConfig.html" title="class in de.fuberlin.wiwiss.d2rq.vocab">D2RConfig</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/vocab/D2RQ.html" title="class in de.fuberlin.wiwiss.d2rq.vocab">D2RQ</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/assembler/D2RQAssembler.html" title="class in de.fuberlin.wiwiss.d2rq.assembler">D2RQAssembler</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/D2RQDatasetDesc.html" title="class in de.fuberlin.wiwiss.d2rq.server">D2RQDatasetDesc</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/D2RQException.html" title="class in de.fuberlin.wiwiss.d2rq">D2RQException</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/jena/D2RQQueryHandler.html" title="class in de.fuberlin.wiwiss.d2rq.jena">D2RQQueryHandler</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/D2RServer.html" title="class in de.fuberlin.wiwiss.d2rq.server">D2RServer</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/map/Database.html" title="class in de.fuberlin.wiwiss.d2rq.map">Database</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/dbschema/DatabaseSchemaInspector.html" title="class in de.fuberlin.wiwiss.d2rq.dbschema">DatabaseSchemaInspector</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/DatasetDescriptionServlet.html" title="class in de.fuberlin.wiwiss.d2rq.server">DatasetDescriptionServlet</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/types/DataType.html" title="class in de.fuberlin.wiwiss.d2rq.sql.types">DataType</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/types/DataType.GenericType.html" title="enum in de.fuberlin.wiwiss.d2rq.sql.types">DataType.GenericType</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/nodes/DetermineNodeType.html" title="class in de.fuberlin.wiwiss.d2rq.nodes">DetermineNodeType</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/DirectoryServlet.html" title="class in de.fuberlin.wiwiss.d2rq.server">DirectoryServlet</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/Disjunction.html" title="class in de.fuberlin.wiwiss.d2rq.expr">Disjunction</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/Divide.html" title="class in de.fuberlin.wiwiss.d2rq.expr">Divide</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/download/DownloadContentQuery.html" title="class in de.fuberlin.wiwiss.d2rq.download">DownloadContentQuery</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/map/DownloadMap.html" title="class in de.fuberlin.wiwiss.d2rq.map">DownloadMap</A>
<BR>
<A HREF="d2rq/dump_rdf.html" title="class in d2rq">dump_rdf</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/Equality.html" title="class in de.fuberlin.wiwiss.d2rq.expr">Equality</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/Expression.html" title="class in de.fuberlin.wiwiss.d2rq.expr">Expression</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/ExpressionProjectionSpec.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">ExpressionProjectionSpec</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/mapgen/Filter.html" title="class in de.fuberlin.wiwiss.d2rq.mapgen">Filter</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/mapgen/Filter.IdentifierMatcher.html" title="interface in de.fuberlin.wiwiss.d2rq.mapgen"><I>Filter.IdentifierMatcher</I></A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/mapgen/FilterIncludeExclude.html" title="class in de.fuberlin.wiwiss.d2rq.mapgen">FilterIncludeExclude</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/mapgen/FilterMatchAny.html" title="class in de.fuberlin.wiwiss.d2rq.mapgen">FilterMatchAny</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/mapgen/FilterMatchColumn.html" title="class in de.fuberlin.wiwiss.d2rq.mapgen">FilterMatchColumn</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/mapgen/FilterMatchSchema.html" title="class in de.fuberlin.wiwiss.d2rq.mapgen">FilterMatchSchema</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/mapgen/FilterMatchTable.html" title="class in de.fuberlin.wiwiss.d2rq.mapgen">FilterMatchTable</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/mapgen/FilterParser.html" title="class in de.fuberlin.wiwiss.d2rq.mapgen">FilterParser</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/find/FindQuery.html" title="class in de.fuberlin.wiwiss.d2rq.find">FindQuery</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/nodes/FixedNodeMaker.html" title="class in de.fuberlin.wiwiss.d2rq.nodes">FixedNodeMaker</A>
<BR>
<A HREF="d2rq/generate_mapping.html" title="class in d2rq">generate_mapping</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/jena/GraphD2RQ.html" title="class in de.fuberlin.wiwiss.d2rq.jena">GraphD2RQ</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/engine/GraphPatternTranslator.html" title="class in de.fuberlin.wiwiss.d2rq.engine">GraphPatternTranslator</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/GreaterThan.html" title="class in de.fuberlin.wiwiss.d2rq.expr">GreaterThan</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/GreaterThanOrEqual.html" title="class in de.fuberlin.wiwiss.d2rq.expr">GreaterThanOrEqual</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/vendor/HSQLDB.html" title="class in de.fuberlin.wiwiss.d2rq.sql.vendor">HSQLDB</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/vendor/HSQLDB.HSQLDBCompatibilityDoubleDataType.html" title="class in de.fuberlin.wiwiss.d2rq.sql.vendor">HSQLDB.HSQLDBCompatibilityDoubleDataType</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/mapgen/IRIEncoder.html" title="class in de.fuberlin.wiwiss.d2rq.mapgen">IRIEncoder</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/vocab/ISWC.html" title="class in de.fuberlin.wiwiss.d2rq.vocab">ISWC</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/vocab/JDBC.html" title="class in de.fuberlin.wiwiss.d2rq.vocab">JDBC</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/JettyLauncher.html" title="class in de.fuberlin.wiwiss.d2rq.server">JettyLauncher</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/Join.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">Join</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/JoinOptimizer.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">JoinOptimizer</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/LessThan.html" title="class in de.fuberlin.wiwiss.d2rq.expr">LessThan</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/LessThanOrEqual.html" title="class in de.fuberlin.wiwiss.d2rq.expr">LessThanOrEqual</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/Log4jHelper.html" title="class in de.fuberlin.wiwiss.d2rq">Log4jHelper</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/map/MapObject.html" title="class in de.fuberlin.wiwiss.d2rq.map">MapObject</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/parser/MapParser.html" title="class in de.fuberlin.wiwiss.d2rq.parser">MapParser</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/map/Mapping.html" title="class in de.fuberlin.wiwiss.d2rq.map">Mapping</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/mapgen/MappingGenerator.html" title="class in de.fuberlin.wiwiss.d2rq.mapgen">MappingGenerator</A>
<BR>
<A HREF="de/fuberlin/wiwiss/pubby/negotiation/MediaRangeSpec.html" title="class in de.fuberlin.wiwiss.pubby.negotiation">MediaRangeSpec</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/vocab/META.html" title="class in de.fuberlin.wiwiss.d2rq.vocab">META</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/MetadataCreator.html" title="class in de.fuberlin.wiwiss.d2rq.server">MetadataCreator</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/jena/ModelD2RQ.html" title="class in de.fuberlin.wiwiss.d2rq.jena">ModelD2RQ</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/ModelResponse.html" title="class in de.fuberlin.wiwiss.d2rq.server">ModelResponse</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/Multiply.html" title="class in de.fuberlin.wiwiss.d2rq.expr">Multiply</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/MutableRelation.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">MutableRelation</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/vendor/MySQL.html" title="class in de.fuberlin.wiwiss.d2rq.sql.vendor">MySQL</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/vendor/MySQL.MySQLCompatibilityBitDataType.html" title="class in de.fuberlin.wiwiss.d2rq.sql.vendor">MySQL.MySQLCompatibilityBitDataType</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/vendor/MySQL.MySQLCompatibilityDateDataType.html" title="class in de.fuberlin.wiwiss.d2rq.sql.vendor">MySQL.MySQLCompatibilityDateDataType</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/vendor/MySQL.MySQLCompatibilityTimeDataType.html" title="class in de.fuberlin.wiwiss.d2rq.sql.vendor">MySQL.MySQLCompatibilityTimeDataType</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/vendor/MySQL.MySQLCompatibilityTimestampDataType.html" title="class in de.fuberlin.wiwiss.d2rq.sql.vendor">MySQL.MySQLCompatibilityTimestampDataType</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/NamespaceServlet.html" title="class in de.fuberlin.wiwiss.d2rq.server">NamespaceServlet</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/Negation.html" title="class in de.fuberlin.wiwiss.d2rq.expr">Negation</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/nodes/NodeMaker.html" title="interface in de.fuberlin.wiwiss.d2rq.nodes"><I>NodeMaker</I></A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/NodeRelation.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">NodeRelation</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/nodes/NodeSetConstraintBuilder.html" title="class in de.fuberlin.wiwiss.d2rq.nodes">NodeSetConstraintBuilder</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/nodes/NodeSetFilter.html" title="interface in de.fuberlin.wiwiss.d2rq.nodes"><I>NodeSetFilter</I></A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/NotNull.html" title="class in de.fuberlin.wiwiss.d2rq.expr">NotNull</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/engine/OpTableSQL.html" title="class in de.fuberlin.wiwiss.d2rq.engine">OpTableSQL</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/engine/OpUnionTableSQL.html" title="class in de.fuberlin.wiwiss.d2rq.engine">OpUnionTableSQL</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/vendor/Oracle.html" title="class in de.fuberlin.wiwiss.d2rq.sql.vendor">Oracle</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/vendor/Oracle.OracleCompatibilityTimeZoneDataType.html" title="class in de.fuberlin.wiwiss.d2rq.sql.vendor">Oracle.OracleCompatibilityTimeZoneDataType</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/vendor/Oracle.OracleCompatibilityTimeZoneLocalDataType.html" title="class in de.fuberlin.wiwiss.d2rq.sql.vendor">Oracle.OracleCompatibilityTimeZoneLocalDataType</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/OrderSpec.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">OrderSpec</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/PageServlet.html" title="class in de.fuberlin.wiwiss.d2rq.server">PageServlet</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/values/Pattern.html" title="class in de.fuberlin.wiwiss.d2rq.values">Pattern</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/values/Pattern.EncodeFunction.html" title="class in de.fuberlin.wiwiss.d2rq.values">Pattern.EncodeFunction</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/vendor/PostgreSQL.html" title="class in de.fuberlin.wiwiss.d2rq.sql.vendor">PostgreSQL</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/pp/PrettyPrinter.html" title="class in de.fuberlin.wiwiss.d2rq.pp">PrettyPrinter</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/ProjectionSpec.html" title="interface in de.fuberlin.wiwiss.d2rq.algebra"><I>ProjectionSpec</I></A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/map/PropertyBridge.html" title="class in de.fuberlin.wiwiss.d2rq.map">PropertyBridge</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/map/PropertyMap.html" title="class in de.fuberlin.wiwiss.d2rq.map">PropertyMap</A>
<BR>
<A HREF="de/fuberlin/wiwiss/pubby/negotiation/PubbyNegotiator.html" title="class in de.fuberlin.wiwiss.pubby.negotiation">PubbyNegotiator</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/engine/PushDownOpFilterVisitor.html" title="class in de.fuberlin.wiwiss.d2rq.engine">PushDownOpFilterVisitor</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/engine/QueryEngineD2RQ.html" title="class in de.fuberlin.wiwiss.d2rq.engine">QueryEngineD2RQ</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/engine/QueryIterTableSQL.html" title="class in de.fuberlin.wiwiss.d2rq.engine">QueryIterTableSQL</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/Quoter.html" title="interface in de.fuberlin.wiwiss.d2rq.sql"><I>Quoter</I></A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/Quoter.PatternDoublingQuoter.html" title="class in de.fuberlin.wiwiss.d2rq.sql">Quoter.PatternDoublingQuoter</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/Relation.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">Relation</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/RelationalOperators.html" title="interface in de.fuberlin.wiwiss.d2rq.algebra"><I>RelationalOperators</I></A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/parser/RelationBuilder.html" title="class in de.fuberlin.wiwiss.d2rq.parser">RelationBuilder</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/RelationImpl.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">RelationImpl</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/RelationName.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">RelationName</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/RequestParamHandler.html" title="class in de.fuberlin.wiwiss.d2rq.server">RequestParamHandler</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/ResourceDescriber.html" title="class in de.fuberlin.wiwiss.d2rq">ResourceDescriber</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/ResourceDescriptionServlet.html" title="class in de.fuberlin.wiwiss.d2rq.server">ResourceDescriptionServlet</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/map/ResourceMap.html" title="class in de.fuberlin.wiwiss.d2rq.map">ResourceMap</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/ResourceServlet.html" title="class in de.fuberlin.wiwiss.d2rq.server">ResourceServlet</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/ResultRow.html" title="interface in de.fuberlin.wiwiss.d2rq.sql"><I>ResultRow</I></A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/ResultRowMap.html" title="class in de.fuberlin.wiwiss.d2rq.sql">ResultRowMap</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/RootServlet.html" title="class in de.fuberlin.wiwiss.d2rq.server">RootServlet</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/vocab/SD.html" title="class in de.fuberlin.wiwiss.d2rq.vocab">SD</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/SelectStatementBuilder.html" title="class in de.fuberlin.wiwiss.d2rq.sql">SelectStatementBuilder</A>
<BR>
<A HREF="d2rq/server.html" title="class in d2rq">server</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/vocab/SKOS.html" title="class in de.fuberlin.wiwiss.d2rq.vocab">SKOS</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/SQL.html" title="class in de.fuberlin.wiwiss.d2rq.sql">SQL</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/vendor/SQL92.html" title="class in de.fuberlin.wiwiss.d2rq.sql.vendor">SQL92</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/types/SQLApproximateNumeric.html" title="class in de.fuberlin.wiwiss.d2rq.sql.types">SQLApproximateNumeric</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/types/SQLBinary.html" title="class in de.fuberlin.wiwiss.d2rq.sql.types">SQLBinary</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/types/SQLBit.html" title="class in de.fuberlin.wiwiss.d2rq.sql.types">SQLBit</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/types/SQLBoolean.html" title="class in de.fuberlin.wiwiss.d2rq.sql.types">SQLBoolean</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/types/SQLCharacterString.html" title="class in de.fuberlin.wiwiss.d2rq.sql.types">SQLCharacterString</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/types/SQLDate.html" title="class in de.fuberlin.wiwiss.d2rq.sql.types">SQLDate</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/types/SQLExactNumeric.html" title="class in de.fuberlin.wiwiss.d2rq.sql.types">SQLExactNumeric</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/SQLExpression.html" title="class in de.fuberlin.wiwiss.d2rq.expr">SQLExpression</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/values/SQLExpressionValueMaker.html" title="class in de.fuberlin.wiwiss.d2rq.values">SQLExpressionValueMaker</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/types/SQLInterval.html" title="class in de.fuberlin.wiwiss.d2rq.sql.types">SQLInterval</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/SQLIterator.html" title="class in de.fuberlin.wiwiss.d2rq.sql">SQLIterator</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/SQLScriptLoader.html" title="class in de.fuberlin.wiwiss.d2rq.sql">SQLScriptLoader</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/vendor/SQLServer.html" title="class in de.fuberlin.wiwiss.d2rq.sql.vendor">SQLServer</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/types/SQLTime.html" title="class in de.fuberlin.wiwiss.d2rq.sql.types">SQLTime</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/types/SQLTimestamp.html" title="class in de.fuberlin.wiwiss.d2rq.sql.types">SQLTimestamp</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/Subtract.html" title="class in de.fuberlin.wiwiss.d2rq.expr">Subtract</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/SystemLoader.html" title="class in de.fuberlin.wiwiss.d2rq">SystemLoader</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/optimizer/expr/TransformExprToSQLApplyer.html" title="class in de.fuberlin.wiwiss.d2rq.optimizer.expr">TransformExprToSQLApplyer</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/engine/TransformFilterCNF.html" title="class in de.fuberlin.wiwiss.d2rq.engine">TransformFilterCNF</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/engine/TransformFilterCNF.DeMorganLawApplyer.html" title="class in de.fuberlin.wiwiss.d2rq.engine">TransformFilterCNF.DeMorganLawApplyer</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/engine/TransformFilterCNF.DistributiveLawApplyer.html" title="class in de.fuberlin.wiwiss.d2rq.engine">TransformFilterCNF.DistributiveLawApplyer</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/engine/TransformOpBGP.html" title="class in de.fuberlin.wiwiss.d2rq.engine">TransformOpBGP</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/map/TranslationTable.html" title="class in de.fuberlin.wiwiss.d2rq.map">TranslationTable</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/map/TranslationTable.Translation.html" title="class in de.fuberlin.wiwiss.d2rq.map">TranslationTable.Translation</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/csv/TranslationTableParser.html" title="class in de.fuberlin.wiwiss.d2rq.csv">TranslationTableParser</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/values/Translator.html" title="interface in de.fuberlin.wiwiss.d2rq.values"><I>Translator</I></A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/find/TripleQueryIter.html" title="class in de.fuberlin.wiwiss.d2rq.find">TripleQueryIter</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/TripleRelation.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">TripleRelation</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/nodes/TypedNodeMaker.html" title="class in de.fuberlin.wiwiss.d2rq.nodes">TypedNodeMaker</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/nodes/TypedNodeMaker.NodeType.html" title="interface in de.fuberlin.wiwiss.d2rq.nodes"><I>TypedNodeMaker.NodeType</I></A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/expr/UnaryMinus.html" title="class in de.fuberlin.wiwiss.d2rq.expr">UnaryMinus</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/types/UnsupportedDataType.html" title="class in de.fuberlin.wiwiss.d2rq.sql.types">UnsupportedDataType</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/find/URIMakerRule.html" title="class in de.fuberlin.wiwiss.d2rq.find">URIMakerRule</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/values/ValueDecorator.html" title="class in de.fuberlin.wiwiss.d2rq.values">ValueDecorator</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/values/ValueDecorator.ValueConstraint.html" title="interface in de.fuberlin.wiwiss.d2rq.values"><I>ValueDecorator.ValueConstraint</I></A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/values/ValueMaker.html" title="interface in de.fuberlin.wiwiss.d2rq.values"><I>ValueMaker</I></A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/engine/VarCollector.html" title="class in de.fuberlin.wiwiss.d2rq.engine">VarCollector</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/algebra/VariableConstraints.html" title="class in de.fuberlin.wiwiss.d2rq.algebra">VariableConstraints</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/VelocityWrapper.html" title="class in de.fuberlin.wiwiss.d2rq.server">VelocityWrapper</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/sql/vendor/Vendor.html" title="interface in de.fuberlin.wiwiss.d2rq.sql.vendor"><I>Vendor</I></A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/vocab/VocabularySummarizer.html" title="class in de.fuberlin.wiwiss.d2rq.vocab">VocabularySummarizer</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/vocab/VoID.html" title="class in de.fuberlin.wiwiss.d2rq.vocab">VoID</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/mapgen/W3CMappingGenerator.html" title="class in de.fuberlin.wiwiss.d2rq.mapgen">W3CMappingGenerator</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/server/WebappInitListener.html" title="class in de.fuberlin.wiwiss.d2rq.server">WebappInitListener</A>
<BR>
<A HREF="de/fuberlin/wiwiss/d2rq/optimizer/expr/XSD.html" title="class in de.fuberlin.wiwiss.d2rq.optimizer.expr">XSD</A>
<BR>
</FONT></TD>
</TR>
</TABLE>
</BODY>
</HTML>
| gpl-2.0 |
Debian/openjfx | modules/web/src/main/native/Source/WebCore/page/PageOverlay.h | 5603 | /*
* Copyright (C) 2010 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include "Color.h"
#include "FloatPoint.h"
#include "IntRect.h"
#include "Timer.h"
#include <wtf/RefCounted.h>
#include <wtf/text/WTFString.h>
namespace WebCore {
class Frame;
class GraphicsContext;
class GraphicsLayer;
class Page;
class PageOverlayController;
class PlatformMouseEvent;
class PageOverlay final : public RefCounted<PageOverlay> {
WTF_MAKE_NONCOPYABLE(PageOverlay);
WTF_MAKE_FAST_ALLOCATED;
public:
class Client {
protected:
virtual ~Client() { }
public:
virtual void willMoveToPage(PageOverlay&, Page*) = 0;
virtual void didMoveToPage(PageOverlay&, Page*) = 0;
virtual void drawRect(PageOverlay&, GraphicsContext&, const IntRect& dirtyRect) = 0;
virtual bool mouseEvent(PageOverlay&, const PlatformMouseEvent&) = 0;
virtual void didScrollFrame(PageOverlay&, Frame&) { }
virtual bool copyAccessibilityAttributeStringValueForPoint(PageOverlay&, String /* attribute */, FloatPoint, String&) { return false; }
virtual bool copyAccessibilityAttributeBoolValueForPoint(PageOverlay&, String /* attribute */, FloatPoint, bool&) { return false; }
virtual Vector<String> copyAccessibilityAttributeNames(PageOverlay&, bool /* parameterizedNames */) { return { }; }
};
enum class OverlayType {
View, // Fixed to the view size; does not scale or scroll with the document, repaints on scroll.
Document, // Scales and scrolls with the document.
};
WEBCORE_EXPORT static Ref<PageOverlay> create(Client&, OverlayType = OverlayType::View);
WEBCORE_EXPORT virtual ~PageOverlay();
WEBCORE_EXPORT PageOverlayController* controller() const;
typedef uint64_t PageOverlayID;
virtual PageOverlayID pageOverlayID() const { return m_pageOverlayID; }
void setPage(Page*);
Page* page() const { return m_page; }
WEBCORE_EXPORT void setNeedsDisplay(const IntRect& dirtyRect);
WEBCORE_EXPORT void setNeedsDisplay();
void drawRect(GraphicsContext&, const IntRect& dirtyRect);
bool mouseEvent(const PlatformMouseEvent&);
void didScrollFrame(Frame&);
bool copyAccessibilityAttributeStringValueForPoint(String attribute, FloatPoint parameter, String& value);
bool copyAccessibilityAttributeBoolValueForPoint(String attribute, FloatPoint parameter, bool& value);
Vector<String> copyAccessibilityAttributeNames(bool parameterizedNames);
void startFadeInAnimation();
void startFadeOutAnimation();
WEBCORE_EXPORT void stopFadeOutAnimation();
WEBCORE_EXPORT void clear();
Client& client() const { return m_client; }
enum class FadeMode { DoNotFade, Fade };
OverlayType overlayType() { return m_overlayType; }
WEBCORE_EXPORT IntRect bounds() const;
WEBCORE_EXPORT IntRect frame() const;
WEBCORE_EXPORT void setFrame(IntRect);
WEBCORE_EXPORT IntSize viewToOverlayOffset() const;
const Color& backgroundColor() const { return m_backgroundColor; }
void setBackgroundColor(const Color&);
void setShouldIgnoreMouseEventsOutsideBounds(bool flag) { m_shouldIgnoreMouseEventsOutsideBounds = flag; }
// FIXME: PageOverlay should own its layer, instead of PageOverlayController.
WEBCORE_EXPORT GraphicsLayer& layer();
bool needsSynchronousScrolling() const { return m_needsSynchronousScrolling; }
void setNeedsSynchronousScrolling(bool needsSynchronousScrolling) { m_needsSynchronousScrolling = needsSynchronousScrolling; }
private:
explicit PageOverlay(Client&, OverlayType);
void startFadeAnimation();
void fadeAnimationTimerFired();
Client& m_client;
Page* m_page { nullptr };
Timer m_fadeAnimationTimer;
double m_fadeAnimationStartTime { 0 };
double m_fadeAnimationDuration;
enum FadeAnimationType {
NoAnimation,
FadeInAnimation,
FadeOutAnimation,
};
FadeAnimationType m_fadeAnimationType { NoAnimation };
float m_fractionFadedIn { 1 };
bool m_needsSynchronousScrolling;
OverlayType m_overlayType;
IntRect m_overrideFrame;
Color m_backgroundColor { Color::transparent };
PageOverlayID m_pageOverlayID;
bool m_shouldIgnoreMouseEventsOutsideBounds { true };
};
} // namespace WebKit
| gpl-2.0 |
Keruspe/systemd | src/shared/install.c | 112759 | /* SPDX-License-Identifier: LGPL-2.1+ */
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <fnmatch.h>
#include <limits.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include "alloc-util.h"
#include "conf-files.h"
#include "conf-parser.h"
#include "dirent-util.h"
#include "extract-word.h"
#include "fd-util.h"
#include "fileio.h"
#include "fs-util.h"
#include "hashmap.h"
#include "install-printf.h"
#include "install.h"
#include "locale-util.h"
#include "log.h"
#include "macro.h"
#include "mkdir.h"
#include "path-lookup.h"
#include "path-util.h"
#include "rm-rf.h"
#include "set.h"
#include "special.h"
#include "stat-util.h"
#include "string-table.h"
#include "string-util.h"
#include "strv.h"
#include "unit-name.h"
#define UNIT_FILE_FOLLOW_SYMLINK_MAX 64
typedef enum SearchFlags {
SEARCH_LOAD = 1 << 0,
SEARCH_FOLLOW_CONFIG_SYMLINKS = 1 << 1,
SEARCH_DROPIN = 1 << 2,
} SearchFlags;
typedef struct {
OrderedHashmap *will_process;
OrderedHashmap *have_processed;
} InstallContext;
typedef enum {
PRESET_UNKNOWN,
PRESET_ENABLE,
PRESET_DISABLE,
} PresetAction;
typedef struct {
char *pattern;
PresetAction action;
char **instances;
} PresetRule;
typedef struct {
PresetRule *rules;
size_t n_rules;
} Presets;
static bool unit_file_install_info_has_rules(const UnitFileInstallInfo *i) {
assert(i);
return !strv_isempty(i->aliases) ||
!strv_isempty(i->wanted_by) ||
!strv_isempty(i->required_by);
}
static bool unit_file_install_info_has_also(const UnitFileInstallInfo *i) {
assert(i);
return !strv_isempty(i->also);
}
static void presets_freep(Presets *p) {
size_t i;
if (!p)
return;
for (i = 0; i < p->n_rules; i++) {
free(p->rules[i].pattern);
strv_free(p->rules[i].instances);
}
free(p->rules);
p->n_rules = 0;
}
bool unit_type_may_alias(UnitType type) {
return IN_SET(type,
UNIT_SERVICE,
UNIT_SOCKET,
UNIT_TARGET,
UNIT_DEVICE,
UNIT_TIMER,
UNIT_PATH);
}
bool unit_type_may_template(UnitType type) {
return IN_SET(type,
UNIT_SERVICE,
UNIT_SOCKET,
UNIT_TARGET,
UNIT_TIMER,
UNIT_PATH);
}
static const char *unit_file_type_table[_UNIT_FILE_TYPE_MAX] = {
[UNIT_FILE_TYPE_REGULAR] = "regular",
[UNIT_FILE_TYPE_SYMLINK] = "symlink",
[UNIT_FILE_TYPE_MASKED] = "masked",
};
DEFINE_PRIVATE_STRING_TABLE_LOOKUP_TO_STRING(unit_file_type, UnitFileType);
static int in_search_path(const LookupPaths *p, const char *path) {
_cleanup_free_ char *parent = NULL;
char **i;
assert(path);
parent = dirname_malloc(path);
if (!parent)
return -ENOMEM;
STRV_FOREACH(i, p->search_path)
if (path_equal(parent, *i))
return true;
return false;
}
static const char* skip_root(const LookupPaths *p, const char *path) {
char *e;
assert(p);
assert(path);
if (!p->root_dir)
return path;
e = path_startswith(path, p->root_dir);
if (!e)
return NULL;
/* Make sure the returned path starts with a slash */
if (e[0] != '/') {
if (e == path || e[-1] != '/')
return NULL;
e--;
}
return e;
}
static int path_is_generator(const LookupPaths *p, const char *path) {
_cleanup_free_ char *parent = NULL;
assert(p);
assert(path);
parent = dirname_malloc(path);
if (!parent)
return -ENOMEM;
return path_equal_ptr(parent, p->generator) ||
path_equal_ptr(parent, p->generator_early) ||
path_equal_ptr(parent, p->generator_late);
}
static int path_is_transient(const LookupPaths *p, const char *path) {
_cleanup_free_ char *parent = NULL;
assert(p);
assert(path);
parent = dirname_malloc(path);
if (!parent)
return -ENOMEM;
return path_equal_ptr(parent, p->transient);
}
static int path_is_control(const LookupPaths *p, const char *path) {
_cleanup_free_ char *parent = NULL;
assert(p);
assert(path);
parent = dirname_malloc(path);
if (!parent)
return -ENOMEM;
return path_equal_ptr(parent, p->persistent_control) ||
path_equal_ptr(parent, p->runtime_control);
}
static int path_is_config(const LookupPaths *p, const char *path, bool check_parent) {
_cleanup_free_ char *parent = NULL;
assert(p);
assert(path);
/* Note that we do *not* have generic checks for /etc or /run in place, since with
* them we couldn't discern configuration from transient or generated units */
if (check_parent) {
parent = dirname_malloc(path);
if (!parent)
return -ENOMEM;
path = parent;
}
return path_equal_ptr(path, p->persistent_config) ||
path_equal_ptr(path, p->runtime_config);
}
static int path_is_runtime(const LookupPaths *p, const char *path, bool check_parent) {
_cleanup_free_ char *parent = NULL;
const char *rpath;
assert(p);
assert(path);
/* Everything in /run is considered runtime. On top of that we also add
* explicit checks for the various runtime directories, as safety net. */
rpath = skip_root(p, path);
if (rpath && path_startswith(rpath, "/run"))
return true;
if (check_parent) {
parent = dirname_malloc(path);
if (!parent)
return -ENOMEM;
path = parent;
}
return path_equal_ptr(path, p->runtime_config) ||
path_equal_ptr(path, p->generator) ||
path_equal_ptr(path, p->generator_early) ||
path_equal_ptr(path, p->generator_late) ||
path_equal_ptr(path, p->transient) ||
path_equal_ptr(path, p->runtime_control);
}
static int path_is_vendor(const LookupPaths *p, const char *path) {
const char *rpath;
assert(p);
assert(path);
rpath = skip_root(p, path);
if (!rpath)
return 0;
if (path_startswith(rpath, "/usr"))
return true;
#if HAVE_SPLIT_USR
if (path_startswith(rpath, "/lib"))
return true;
#endif
return path_equal(rpath, SYSTEM_DATA_UNIT_PATH);
}
int unit_file_changes_add(
UnitFileChange **changes,
size_t *n_changes,
UnitFileChangeType type,
const char *path,
const char *source) {
_cleanup_free_ char *p = NULL, *s = NULL;
UnitFileChange *c;
assert(path);
assert(!changes == !n_changes);
if (!changes)
return 0;
c = reallocarray(*changes, *n_changes + 1, sizeof(UnitFileChange));
if (!c)
return -ENOMEM;
*changes = c;
p = strdup(path);
if (source)
s = strdup(source);
if (!p || (source && !s))
return -ENOMEM;
path_simplify(p, false);
if (s)
path_simplify(s, false);
c[*n_changes] = (UnitFileChange) { type, p, s };
p = s = NULL;
(*n_changes) ++;
return 0;
}
void unit_file_changes_free(UnitFileChange *changes, size_t n_changes) {
size_t i;
assert(changes || n_changes == 0);
for (i = 0; i < n_changes; i++) {
free(changes[i].path);
free(changes[i].source);
}
free(changes);
}
void unit_file_dump_changes(int r, const char *verb, const UnitFileChange *changes, size_t n_changes, bool quiet) {
size_t i;
bool logged = false;
assert(changes || n_changes == 0);
/* If verb is not specified, errors are not allowed! */
assert(verb || r >= 0);
for (i = 0; i < n_changes; i++) {
assert(verb || changes[i].type >= 0);
switch(changes[i].type) {
case UNIT_FILE_SYMLINK:
if (!quiet)
log_info("Created symlink %s %s %s.",
changes[i].path,
special_glyph(SPECIAL_GLYPH_ARROW),
changes[i].source);
break;
case UNIT_FILE_UNLINK:
if (!quiet)
log_info("Removed %s.", changes[i].path);
break;
case UNIT_FILE_IS_MASKED:
if (!quiet)
log_info("Unit %s is masked, ignoring.", changes[i].path);
break;
case UNIT_FILE_IS_DANGLING:
if (!quiet)
log_info("Unit %s is an alias to a unit that is not present, ignoring.",
changes[i].path);
break;
case -EEXIST:
if (changes[i].source)
log_error_errno(changes[i].type,
"Failed to %s unit, file %s already exists and is a symlink to %s.",
verb, changes[i].path, changes[i].source);
else
log_error_errno(changes[i].type,
"Failed to %s unit, file %s already exists.",
verb, changes[i].path);
logged = true;
break;
case -ERFKILL:
log_error_errno(changes[i].type, "Failed to %s unit, unit %s is masked.",
verb, changes[i].path);
logged = true;
break;
case -EADDRNOTAVAIL:
log_error_errno(changes[i].type, "Failed to %s unit, unit %s is transient or generated.",
verb, changes[i].path);
logged = true;
break;
case -ELOOP:
log_error_errno(changes[i].type, "Failed to %s unit, refusing to operate on linked unit file %s",
verb, changes[i].path);
logged = true;
break;
case -ENOENT:
log_error_errno(changes[i].type, "Failed to %s unit, unit %s does not exist.", verb, changes[i].path);
logged = true;
break;
default:
assert(changes[i].type < 0);
log_error_errno(changes[i].type, "Failed to %s unit, file %s: %m.",
verb, changes[i].path);
logged = true;
}
}
if (r < 0 && !logged)
log_error_errno(r, "Failed to %s: %m.", verb);
}
/**
* Checks if two paths or symlinks from wd are the same, when root is the root of the filesystem.
* wc should be the full path in the host file system.
*/
static bool chroot_symlinks_same(const char *root, const char *wd, const char *a, const char *b) {
assert(path_is_absolute(wd));
/* This will give incorrect results if the paths are relative and go outside
* of the chroot. False negatives are possible. */
if (!root)
root = "/";
a = strjoina(path_is_absolute(a) ? root : wd, "/", a);
b = strjoina(path_is_absolute(b) ? root : wd, "/", b);
return path_equal_or_files_same(a, b, 0);
}
static int create_symlink(
const LookupPaths *paths,
const char *old_path,
const char *new_path,
bool force,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_free_ char *dest = NULL, *dirname = NULL;
const char *rp;
int r;
assert(old_path);
assert(new_path);
rp = skip_root(paths, old_path);
if (rp)
old_path = rp;
/* Actually create a symlink, and remember that we did. Is
* smart enough to check if there's already a valid symlink in
* place.
*
* Returns 1 if a symlink was created or already exists and points to
* the right place, or negative on error.
*/
mkdir_parents_label(new_path, 0755);
if (symlink(old_path, new_path) >= 0) {
unit_file_changes_add(changes, n_changes, UNIT_FILE_SYMLINK, new_path, old_path);
return 1;
}
if (errno != EEXIST) {
unit_file_changes_add(changes, n_changes, -errno, new_path, NULL);
return -errno;
}
r = readlink_malloc(new_path, &dest);
if (r < 0) {
/* translate EINVAL (non-symlink exists) to EEXIST */
if (r == -EINVAL)
r = -EEXIST;
unit_file_changes_add(changes, n_changes, r, new_path, NULL);
return r;
}
dirname = dirname_malloc(new_path);
if (!dirname)
return -ENOMEM;
if (chroot_symlinks_same(paths->root_dir, dirname, dest, old_path)) {
log_debug("Symlink %s → %s already exists", new_path, dest);
return 1;
}
if (!force) {
unit_file_changes_add(changes, n_changes, -EEXIST, new_path, dest);
return -EEXIST;
}
r = symlink_atomic(old_path, new_path);
if (r < 0) {
unit_file_changes_add(changes, n_changes, r, new_path, NULL);
return r;
}
unit_file_changes_add(changes, n_changes, UNIT_FILE_UNLINK, new_path, NULL);
unit_file_changes_add(changes, n_changes, UNIT_FILE_SYMLINK, new_path, old_path);
return 1;
}
static int mark_symlink_for_removal(
Set **remove_symlinks_to,
const char *p) {
char *n;
int r;
assert(p);
r = set_ensure_allocated(remove_symlinks_to, &path_hash_ops);
if (r < 0)
return r;
n = strdup(p);
if (!n)
return -ENOMEM;
path_simplify(n, false);
r = set_consume(*remove_symlinks_to, n);
if (r == -EEXIST)
return 0;
if (r < 0)
return r;
return 1;
}
static int remove_marked_symlinks_fd(
Set *remove_symlinks_to,
int fd,
const char *path,
const char *config_path,
const LookupPaths *lp,
bool dry_run,
bool *restart,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_closedir_ DIR *d = NULL;
struct dirent *de;
int r = 0;
assert(remove_symlinks_to);
assert(fd >= 0);
assert(path);
assert(config_path);
assert(lp);
assert(restart);
d = fdopendir(fd);
if (!d) {
safe_close(fd);
return -errno;
}
rewinddir(d);
FOREACH_DIRENT(de, d, return -errno) {
dirent_ensure_type(d, de);
if (de->d_type == DT_DIR) {
_cleanup_free_ char *p = NULL;
int nfd, q;
nfd = openat(fd, de->d_name, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC|O_NOFOLLOW);
if (nfd < 0) {
if (errno == ENOENT)
continue;
if (r == 0)
r = -errno;
continue;
}
p = path_make_absolute(de->d_name, path);
if (!p) {
safe_close(nfd);
return -ENOMEM;
}
/* This will close nfd, regardless whether it succeeds or not */
q = remove_marked_symlinks_fd(remove_symlinks_to, nfd, p, config_path, lp, dry_run, restart, changes, n_changes);
if (q < 0 && r == 0)
r = q;
} else if (de->d_type == DT_LNK) {
_cleanup_free_ char *p = NULL, *dest = NULL;
const char *rp;
bool found;
int q;
if (!unit_name_is_valid(de->d_name, UNIT_NAME_ANY))
continue;
p = path_make_absolute(de->d_name, path);
if (!p)
return -ENOMEM;
path_simplify(p, false);
q = readlink_malloc(p, &dest);
if (q == -ENOENT)
continue;
if (q < 0) {
if (r == 0)
r = q;
continue;
}
/* We remove all links pointing to a file or path that is marked, as well as all files sharing
* the same name as a file that is marked. */
found = set_contains(remove_symlinks_to, dest) ||
set_contains(remove_symlinks_to, basename(dest)) ||
set_contains(remove_symlinks_to, de->d_name);
if (!found)
continue;
if (!dry_run) {
if (unlinkat(fd, de->d_name, 0) < 0 && errno != ENOENT) {
if (r == 0)
r = -errno;
unit_file_changes_add(changes, n_changes, -errno, p, NULL);
continue;
}
(void) rmdir_parents(p, config_path);
}
unit_file_changes_add(changes, n_changes, UNIT_FILE_UNLINK, p, NULL);
/* Now, remember the full path (but with the root prefix removed) of
* the symlink we just removed, and remove any symlinks to it, too. */
rp = skip_root(lp, p);
q = mark_symlink_for_removal(&remove_symlinks_to, rp ?: p);
if (q < 0)
return q;
if (q > 0 && !dry_run)
*restart = true;
}
}
return r;
}
static int remove_marked_symlinks(
Set *remove_symlinks_to,
const char *config_path,
const LookupPaths *lp,
bool dry_run,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_close_ int fd = -1;
bool restart;
int r = 0;
assert(config_path);
assert(lp);
if (set_size(remove_symlinks_to) <= 0)
return 0;
fd = open(config_path, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC);
if (fd < 0)
return errno == ENOENT ? 0 : -errno;
do {
int q, cfd;
restart = false;
cfd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
if (cfd < 0)
return -errno;
/* This takes possession of cfd and closes it */
q = remove_marked_symlinks_fd(remove_symlinks_to, cfd, config_path, config_path, lp, dry_run, &restart, changes, n_changes);
if (r == 0)
r = q;
} while (restart);
return r;
}
static int is_symlink_with_known_name(const UnitFileInstallInfo *i, const char *name) {
int r;
if (streq(name, i->name))
return true;
if (strv_contains(i->aliases, name))
return true;
/* Look for template symlink matching DefaultInstance */
if (i->default_instance && unit_name_is_valid(i->name, UNIT_NAME_TEMPLATE)) {
_cleanup_free_ char *s = NULL;
r = unit_name_replace_instance(i->name, i->default_instance, &s);
if (r < 0) {
if (r != -EINVAL)
return r;
} else if (streq(name, s))
return true;
}
return false;
}
static int find_symlinks_fd(
const char *root_dir,
const UnitFileInstallInfo *i,
bool match_aliases,
bool ignore_same_name,
int fd,
const char *path,
const char *config_path,
bool *same_name_link) {
_cleanup_closedir_ DIR *d = NULL;
struct dirent *de;
int r = 0;
assert(i);
assert(fd >= 0);
assert(path);
assert(config_path);
assert(same_name_link);
d = fdopendir(fd);
if (!d) {
safe_close(fd);
return -errno;
}
FOREACH_DIRENT(de, d, return -errno) {
dirent_ensure_type(d, de);
if (de->d_type == DT_DIR) {
_cleanup_free_ char *p = NULL;
int nfd, q;
nfd = openat(fd, de->d_name, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC|O_NOFOLLOW);
if (nfd < 0) {
if (errno == ENOENT)
continue;
if (r == 0)
r = -errno;
continue;
}
p = path_make_absolute(de->d_name, path);
if (!p) {
safe_close(nfd);
return -ENOMEM;
}
/* This will close nfd, regardless whether it succeeds or not */
q = find_symlinks_fd(root_dir, i, match_aliases, ignore_same_name, nfd,
p, config_path, same_name_link);
if (q > 0)
return 1;
if (r == 0)
r = q;
} else if (de->d_type == DT_LNK) {
_cleanup_free_ char *p = NULL, *dest = NULL;
bool found_path = false, found_dest, b = false;
int q;
/* Acquire symlink name */
p = path_make_absolute(de->d_name, path);
if (!p)
return -ENOMEM;
/* Acquire symlink destination */
q = readlink_malloc(p, &dest);
if (q == -ENOENT)
continue;
if (q < 0) {
if (r == 0)
r = q;
continue;
}
/* Make absolute */
if (!path_is_absolute(dest)) {
char *x;
x = prefix_root(root_dir, dest);
if (!x)
return -ENOMEM;
free_and_replace(dest, x);
}
assert(unit_name_is_valid(i->name, UNIT_NAME_ANY));
if (!ignore_same_name)
/* Check if the symlink itself matches what we are looking for.
*
* If ignore_same_name is specified, we are in one of the directories which
* have lower priority than the unit file, and even if a file or symlink with
* this name was found, we should ignore it. */
found_path = streq(de->d_name, i->name);
/* Check if what the symlink points to matches what we are looking for */
found_dest = streq(basename(dest), i->name);
if (found_path && found_dest) {
_cleanup_free_ char *t = NULL;
/* Filter out same name links in the main
* config path */
t = path_make_absolute(i->name, config_path);
if (!t)
return -ENOMEM;
b = path_equal(t, p);
}
if (b)
*same_name_link = true;
else if (found_path || found_dest) {
if (!match_aliases)
return 1;
/* Check if symlink name is in the set of names used by [Install] */
q = is_symlink_with_known_name(i, de->d_name);
if (q < 0)
return q;
if (q > 0)
return 1;
}
}
}
return r;
}
static int find_symlinks(
const char *root_dir,
const UnitFileInstallInfo *i,
bool match_name,
bool ignore_same_name,
const char *config_path,
bool *same_name_link) {
int fd;
assert(i);
assert(config_path);
assert(same_name_link);
fd = open(config_path, O_RDONLY|O_NONBLOCK|O_DIRECTORY|O_CLOEXEC);
if (fd < 0) {
if (IN_SET(errno, ENOENT, ENOTDIR, EACCES))
return 0;
return -errno;
}
/* This takes possession of fd and closes it */
return find_symlinks_fd(root_dir, i, match_name, ignore_same_name, fd,
config_path, config_path, same_name_link);
}
static int find_symlinks_in_scope(
UnitFileScope scope,
const LookupPaths *paths,
const UnitFileInstallInfo *i,
bool match_name,
UnitFileState *state) {
bool same_name_link_runtime = false, same_name_link_config = false;
bool enabled_in_runtime = false, enabled_at_all = false;
bool ignore_same_name = false;
char **p;
int r;
assert(paths);
assert(i);
/* As we iterate over the list of search paths in paths->search_path, we may encounter "same name"
* symlinks. The ones which are "below" (i.e. have lower priority) than the unit file itself are
* efectively masked, so we should ignore them. */
STRV_FOREACH(p, paths->search_path) {
bool same_name_link = false;
r = find_symlinks(paths->root_dir, i, match_name, ignore_same_name, *p, &same_name_link);
if (r < 0)
return r;
if (r > 0) {
/* We found symlinks in this dir? Yay! Let's see where precisely it is enabled. */
if (path_equal_ptr(*p, paths->persistent_config)) {
/* This is the best outcome, let's return it immediately. */
*state = UNIT_FILE_ENABLED;
return 1;
}
/* look for global enablement of user units */
if (scope == UNIT_FILE_USER && path_is_user_config_dir(*p)) {
*state = UNIT_FILE_ENABLED;
return 1;
}
r = path_is_runtime(paths, *p, false);
if (r < 0)
return r;
if (r > 0)
enabled_in_runtime = true;
else
enabled_at_all = true;
} else if (same_name_link) {
if (path_equal_ptr(*p, paths->persistent_config))
same_name_link_config = true;
else {
r = path_is_runtime(paths, *p, false);
if (r < 0)
return r;
if (r > 0)
same_name_link_runtime = true;
}
}
/* Check if next iteration will be "below" the unit file (either a regular file
* or a symlink), and hence should be ignored */
if (!ignore_same_name && path_startswith(i->path, *p))
ignore_same_name = true;
}
if (enabled_in_runtime) {
*state = UNIT_FILE_ENABLED_RUNTIME;
return 1;
}
/* Here's a special rule: if the unit we are looking for is an instance, and it symlinked in the search path
* outside of runtime and configuration directory, then we consider it statically enabled. Note we do that only
* for instance, not for regular names, as those are merely aliases, while instances explicitly instantiate
* something, and hence are a much stronger concept. */
if (enabled_at_all && unit_name_is_valid(i->name, UNIT_NAME_INSTANCE)) {
*state = UNIT_FILE_STATIC;
return 1;
}
/* Hmm, we didn't find it, but maybe we found the same name
* link? */
if (same_name_link_config) {
*state = UNIT_FILE_LINKED;
return 1;
}
if (same_name_link_runtime) {
*state = UNIT_FILE_LINKED_RUNTIME;
return 1;
}
return 0;
}
static void install_info_free(UnitFileInstallInfo *i) {
if (!i)
return;
free(i->name);
free(i->path);
strv_free(i->aliases);
strv_free(i->wanted_by);
strv_free(i->required_by);
strv_free(i->also);
free(i->default_instance);
free(i->symlink_target);
free(i);
}
static void install_context_done(InstallContext *c) {
assert(c);
c->will_process = ordered_hashmap_free_with_destructor(c->will_process, install_info_free);
c->have_processed = ordered_hashmap_free_with_destructor(c->have_processed, install_info_free);
}
static UnitFileInstallInfo *install_info_find(InstallContext *c, const char *name) {
UnitFileInstallInfo *i;
i = ordered_hashmap_get(c->have_processed, name);
if (i)
return i;
return ordered_hashmap_get(c->will_process, name);
}
static int install_info_may_process(
const UnitFileInstallInfo *i,
const LookupPaths *paths,
UnitFileChange **changes,
size_t *n_changes) {
assert(i);
assert(paths);
/* Checks whether the loaded unit file is one we should process, or is masked,
* transient or generated and thus not subject to enable/disable operations. */
if (i->type == UNIT_FILE_TYPE_MASKED) {
unit_file_changes_add(changes, n_changes, -ERFKILL, i->path, NULL);
return -ERFKILL;
}
if (path_is_generator(paths, i->path) ||
path_is_transient(paths, i->path)) {
unit_file_changes_add(changes, n_changes, -EADDRNOTAVAIL, i->path, NULL);
return -EADDRNOTAVAIL;
}
return 0;
}
/**
* Adds a new UnitFileInstallInfo entry under name in the InstallContext.will_process
* hashmap, or retrieves the existing one if already present.
*
* Returns negative on error, 0 if the unit was already known, 1 otherwise.
*/
static int install_info_add(
InstallContext *c,
const char *name,
const char *path,
bool auxiliary,
UnitFileInstallInfo **ret) {
UnitFileInstallInfo *i = NULL;
int r;
assert(c);
assert(name || path);
if (!name)
name = basename(path);
if (!unit_name_is_valid(name, UNIT_NAME_ANY))
return -EINVAL;
i = install_info_find(c, name);
if (i) {
i->auxiliary = i->auxiliary && auxiliary;
if (ret)
*ret = i;
return 0;
}
r = ordered_hashmap_ensure_allocated(&c->will_process, &string_hash_ops);
if (r < 0)
return r;
i = new(UnitFileInstallInfo, 1);
if (!i)
return -ENOMEM;
*i = (UnitFileInstallInfo) {
.type = _UNIT_FILE_TYPE_INVALID,
.auxiliary = auxiliary,
};
i->name = strdup(name);
if (!i->name) {
r = -ENOMEM;
goto fail;
}
if (path) {
i->path = strdup(path);
if (!i->path) {
r = -ENOMEM;
goto fail;
}
}
r = ordered_hashmap_put(c->will_process, i->name, i);
if (r < 0)
goto fail;
if (ret)
*ret = i;
return 1;
fail:
install_info_free(i);
return r;
}
static int config_parse_alias(
const char *unit,
const char *filename,
unsigned line,
const char *section,
unsigned section_line,
const char *lvalue,
int ltype,
const char *rvalue,
void *data,
void *userdata) {
UnitType type;
assert(unit);
assert(filename);
assert(lvalue);
assert(rvalue);
type = unit_name_to_type(unit);
if (!unit_type_may_alias(type))
return log_syntax(unit, LOG_WARNING, filename, line, 0,
"Alias= is not allowed for %s units, ignoring.",
unit_type_to_string(type));
return config_parse_strv(unit, filename, line, section, section_line,
lvalue, ltype, rvalue, data, userdata);
}
static int config_parse_also(
const char *unit,
const char *filename,
unsigned line,
const char *section,
unsigned section_line,
const char *lvalue,
int ltype,
const char *rvalue,
void *data,
void *userdata) {
UnitFileInstallInfo *info = userdata, *alsoinfo = NULL;
InstallContext *c = data;
int r;
assert(unit);
assert(filename);
assert(lvalue);
assert(rvalue);
for (;;) {
_cleanup_free_ char *word = NULL, *printed = NULL;
r = extract_first_word(&rvalue, &word, NULL, 0);
if (r < 0)
return r;
if (r == 0)
break;
r = install_full_printf(info, word, &printed);
if (r < 0)
return r;
if (!unit_name_is_valid(printed, UNIT_NAME_ANY))
return -EINVAL;
r = install_info_add(c, printed, NULL, true, &alsoinfo);
if (r < 0)
return r;
r = strv_push(&info->also, printed);
if (r < 0)
return r;
printed = NULL;
}
return 0;
}
static int config_parse_default_instance(
const char *unit,
const char *filename,
unsigned line,
const char *section,
unsigned section_line,
const char *lvalue,
int ltype,
const char *rvalue,
void *data,
void *userdata) {
UnitFileInstallInfo *i = data;
_cleanup_free_ char *printed = NULL;
int r;
assert(unit);
assert(filename);
assert(lvalue);
assert(rvalue);
if (unit_name_is_valid(unit, UNIT_NAME_INSTANCE))
/* When enabling an instance, we might be using a template unit file,
* but we should ignore DefaultInstance silently. */
return 0;
if (!unit_name_is_valid(unit, UNIT_NAME_TEMPLATE))
return log_syntax(unit, LOG_WARNING, filename, line, 0,
"DefaultInstance= only makes sense for template units, ignoring.");
r = install_full_printf(i, rvalue, &printed);
if (r < 0)
return r;
if (!unit_instance_is_valid(printed))
return -EINVAL;
return free_and_replace(i->default_instance, printed);
}
static int unit_file_load(
InstallContext *c,
UnitFileInstallInfo *info,
const char *path,
const char *root_dir,
SearchFlags flags) {
const ConfigTableItem items[] = {
{ "Install", "Alias", config_parse_alias, 0, &info->aliases },
{ "Install", "WantedBy", config_parse_strv, 0, &info->wanted_by },
{ "Install", "RequiredBy", config_parse_strv, 0, &info->required_by },
{ "Install", "DefaultInstance", config_parse_default_instance, 0, info },
{ "Install", "Also", config_parse_also, 0, c },
{}
};
UnitType type;
_cleanup_fclose_ FILE *f = NULL;
_cleanup_close_ int fd = -1;
struct stat st;
int r;
assert(info);
assert(path);
if (!(flags & SEARCH_DROPIN)) {
/* Loading or checking for the main unit file… */
type = unit_name_to_type(info->name);
if (type < 0)
return -EINVAL;
if (unit_name_is_valid(info->name, UNIT_NAME_TEMPLATE|UNIT_NAME_INSTANCE) && !unit_type_may_template(type))
return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
"Unit type %s cannot be templated.", unit_type_to_string(type));
if (!(flags & SEARCH_LOAD)) {
r = lstat(path, &st);
if (r < 0)
return -errno;
if (null_or_empty(&st))
info->type = UNIT_FILE_TYPE_MASKED;
else if (S_ISREG(st.st_mode))
info->type = UNIT_FILE_TYPE_REGULAR;
else if (S_ISLNK(st.st_mode))
return -ELOOP;
else if (S_ISDIR(st.st_mode))
return -EISDIR;
else
return -ENOTTY;
return 0;
}
fd = open(path, O_RDONLY|O_CLOEXEC|O_NOCTTY|O_NOFOLLOW);
if (fd < 0)
return -errno;
} else {
/* Operating on a drop-in file. If we aren't supposed to load the unit file drop-ins don't matter, let's hence shortcut this. */
if (!(flags & SEARCH_LOAD))
return 0;
fd = chase_symlinks_and_open(path, root_dir, 0, O_RDONLY|O_CLOEXEC|O_NOCTTY, NULL);
if (fd < 0)
return fd;
}
if (fstat(fd, &st) < 0)
return -errno;
if (null_or_empty(&st)) {
if ((flags & SEARCH_DROPIN) == 0)
info->type = UNIT_FILE_TYPE_MASKED;
return 0;
}
r = stat_verify_regular(&st);
if (r < 0)
return r;
f = fdopen(fd, "r");
if (!f)
return -errno;
fd = -1;
/* c is only needed if we actually load the file (it's referenced from items[] btw, in case you wonder.) */
assert(c);
r = config_parse(info->name, path, f,
NULL,
config_item_table_lookup, items,
CONFIG_PARSE_RELAXED|CONFIG_PARSE_ALLOW_INCLUDE, info);
if (r < 0)
return log_debug_errno(r, "Failed to parse %s: %m", info->name);
if ((flags & SEARCH_DROPIN) == 0)
info->type = UNIT_FILE_TYPE_REGULAR;
return
(int) strv_length(info->aliases) +
(int) strv_length(info->wanted_by) +
(int) strv_length(info->required_by);
}
static int unit_file_load_or_readlink(
InstallContext *c,
UnitFileInstallInfo *info,
const char *path,
const char *root_dir,
SearchFlags flags) {
_cleanup_free_ char *target = NULL;
int r;
r = unit_file_load(c, info, path, root_dir, flags);
if (r != -ELOOP || (flags & SEARCH_DROPIN))
return r;
/* This is a symlink, let's read it. */
r = readlink_malloc(path, &target);
if (r < 0)
return r;
if (path_equal(target, "/dev/null"))
info->type = UNIT_FILE_TYPE_MASKED;
else {
const char *bn;
UnitType a, b;
bn = basename(target);
if (unit_name_is_valid(info->name, UNIT_NAME_PLAIN)) {
if (!unit_name_is_valid(bn, UNIT_NAME_PLAIN))
return -EINVAL;
} else if (unit_name_is_valid(info->name, UNIT_NAME_INSTANCE)) {
if (!unit_name_is_valid(bn, UNIT_NAME_INSTANCE|UNIT_NAME_TEMPLATE))
return -EINVAL;
} else if (unit_name_is_valid(info->name, UNIT_NAME_TEMPLATE)) {
if (!unit_name_is_valid(bn, UNIT_NAME_TEMPLATE))
return -EINVAL;
} else
return -EINVAL;
/* Enforce that the symlink destination does not
* change the unit file type. */
a = unit_name_to_type(info->name);
b = unit_name_to_type(bn);
if (a < 0 || b < 0 || a != b)
return -EINVAL;
if (path_is_absolute(target))
/* This is an absolute path, prefix the root so that we always deal with fully qualified paths */
info->symlink_target = prefix_root(root_dir, target);
else
/* This is a relative path, take it relative to the dir the symlink is located in. */
info->symlink_target = file_in_same_dir(path, target);
if (!info->symlink_target)
return -ENOMEM;
info->type = UNIT_FILE_TYPE_SYMLINK;
}
return 0;
}
static int unit_file_search(
InstallContext *c,
UnitFileInstallInfo *info,
const LookupPaths *paths,
SearchFlags flags) {
const char *dropin_dir_name = NULL, *dropin_template_dir_name = NULL;
_cleanup_strv_free_ char **dirs = NULL, **files = NULL;
_cleanup_free_ char *template = NULL;
bool found_unit = false;
int r, result;
char **p;
assert(info);
assert(paths);
/* Was this unit already loaded? */
if (info->type != _UNIT_FILE_TYPE_INVALID)
return 0;
if (info->path)
return unit_file_load_or_readlink(c, info, info->path, paths->root_dir, flags);
assert(info->name);
if (unit_name_is_valid(info->name, UNIT_NAME_INSTANCE)) {
r = unit_name_template(info->name, &template);
if (r < 0)
return r;
}
STRV_FOREACH(p, paths->search_path) {
_cleanup_free_ char *path = NULL;
path = strjoin(*p, "/", info->name);
if (!path)
return -ENOMEM;
r = unit_file_load_or_readlink(c, info, path, paths->root_dir, flags);
if (r >= 0) {
info->path = TAKE_PTR(path);
result = r;
found_unit = true;
break;
} else if (!IN_SET(r, -ENOENT, -ENOTDIR, -EACCES))
return r;
}
if (!found_unit && template) {
/* Unit file doesn't exist, however instance
* enablement was requested. We will check if it is
* possible to load template unit file. */
STRV_FOREACH(p, paths->search_path) {
_cleanup_free_ char *path = NULL;
path = strjoin(*p, "/", template);
if (!path)
return -ENOMEM;
r = unit_file_load_or_readlink(c, info, path, paths->root_dir, flags);
if (r >= 0) {
info->path = TAKE_PTR(path);
result = r;
found_unit = true;
break;
} else if (!IN_SET(r, -ENOENT, -ENOTDIR, -EACCES))
return r;
}
}
if (!found_unit)
return log_debug_errno(SYNTHETIC_ERRNO(ENOENT),
"Cannot find unit %s%s%s.",
info->name, template ? " or " : "", strempty(template));
if (info->type == UNIT_FILE_TYPE_MASKED)
return result;
/* Search for drop-in directories */
dropin_dir_name = strjoina(info->name, ".d");
STRV_FOREACH(p, paths->search_path) {
char *path;
path = path_join(*p, dropin_dir_name);
if (!path)
return -ENOMEM;
r = strv_consume(&dirs, path);
if (r < 0)
return r;
}
if (template) {
dropin_template_dir_name = strjoina(template, ".d");
STRV_FOREACH(p, paths->search_path) {
char *path;
path = path_join(*p, dropin_template_dir_name);
if (!path)
return -ENOMEM;
r = strv_consume(&dirs, path);
if (r < 0)
return r;
}
}
/* Load drop-in conf files */
r = conf_files_list_strv(&files, ".conf", NULL, 0, (const char**) dirs);
if (r < 0)
return log_debug_errno(r, "Failed to get list of conf files: %m");
STRV_FOREACH(p, files) {
r = unit_file_load_or_readlink(c, info, *p, paths->root_dir, flags | SEARCH_DROPIN);
if (r < 0)
return log_debug_errno(r, "Failed to load conf file %s: %m", *p);
}
return result;
}
static int install_info_follow(
InstallContext *c,
UnitFileInstallInfo *i,
const char *root_dir,
SearchFlags flags,
bool ignore_different_name) {
assert(c);
assert(i);
if (i->type != UNIT_FILE_TYPE_SYMLINK)
return -EINVAL;
if (!i->symlink_target)
return -EINVAL;
/* If the basename doesn't match, the caller should add a
* complete new entry for this. */
if (!ignore_different_name && !streq(basename(i->symlink_target), i->name))
return -EXDEV;
free_and_replace(i->path, i->symlink_target);
i->type = _UNIT_FILE_TYPE_INVALID;
return unit_file_load_or_readlink(c, i, i->path, root_dir, flags);
}
/**
* Search for the unit file. If the unit name is a symlink, follow the symlink to the
* target, maybe more than once. Propagate the instance name if present.
*/
static int install_info_traverse(
UnitFileScope scope,
InstallContext *c,
const LookupPaths *paths,
UnitFileInstallInfo *start,
SearchFlags flags,
UnitFileInstallInfo **ret) {
UnitFileInstallInfo *i;
unsigned k = 0;
int r;
assert(paths);
assert(start);
assert(c);
r = unit_file_search(c, start, paths, flags);
if (r < 0)
return r;
i = start;
while (i->type == UNIT_FILE_TYPE_SYMLINK) {
/* Follow the symlink */
if (++k > UNIT_FILE_FOLLOW_SYMLINK_MAX)
return -ELOOP;
if (!(flags & SEARCH_FOLLOW_CONFIG_SYMLINKS)) {
r = path_is_config(paths, i->path, true);
if (r < 0)
return r;
if (r > 0)
return -ELOOP;
}
r = install_info_follow(c, i, paths->root_dir, flags, false);
if (r == -EXDEV) {
_cleanup_free_ char *buffer = NULL;
const char *bn;
/* Target has a different name, create a new
* install info object for that, and continue
* with that. */
bn = basename(i->symlink_target);
if (unit_name_is_valid(i->name, UNIT_NAME_INSTANCE) &&
unit_name_is_valid(bn, UNIT_NAME_TEMPLATE)) {
_cleanup_free_ char *instance = NULL;
r = unit_name_to_instance(i->name, &instance);
if (r < 0)
return r;
r = unit_name_replace_instance(bn, instance, &buffer);
if (r < 0)
return r;
if (streq(buffer, i->name)) {
/* We filled in the instance, and the target stayed the same? If so, then let's
* honour the link as it is. */
r = install_info_follow(c, i, paths->root_dir, flags, true);
if (r < 0)
return r;
continue;
}
bn = buffer;
}
r = install_info_add(c, bn, NULL, false, &i);
if (r < 0)
return r;
/* Try again, with the new target we found. */
r = unit_file_search(c, i, paths, flags);
if (r == -ENOENT)
/* Translate error code to highlight this specific case */
return -ENOLINK;
}
if (r < 0)
return r;
}
if (ret)
*ret = i;
return 0;
}
/**
* Call install_info_add() with name_or_path as the path (if name_or_path starts with "/")
* or the name (otherwise). root_dir is prepended to the path.
*/
static int install_info_add_auto(
InstallContext *c,
const LookupPaths *paths,
const char *name_or_path,
UnitFileInstallInfo **ret) {
assert(c);
assert(name_or_path);
if (path_is_absolute(name_or_path)) {
const char *pp;
pp = prefix_roota(paths->root_dir, name_or_path);
return install_info_add(c, NULL, pp, false, ret);
} else
return install_info_add(c, name_or_path, NULL, false, ret);
}
static int install_info_discover(
UnitFileScope scope,
InstallContext *c,
const LookupPaths *paths,
const char *name,
SearchFlags flags,
UnitFileInstallInfo **ret,
UnitFileChange **changes,
size_t *n_changes) {
UnitFileInstallInfo *i;
int r;
assert(c);
assert(paths);
assert(name);
r = install_info_add_auto(c, paths, name, &i);
if (r >= 0)
r = install_info_traverse(scope, c, paths, i, flags, ret);
if (r < 0)
unit_file_changes_add(changes, n_changes, r, name, NULL);
return r;
}
static int install_info_discover_and_check(
UnitFileScope scope,
InstallContext *c,
const LookupPaths *paths,
const char *name,
SearchFlags flags,
UnitFileInstallInfo **ret,
UnitFileChange **changes,
size_t *n_changes) {
int r;
r = install_info_discover(scope, c, paths, name, flags, ret, changes, n_changes);
if (r < 0)
return r;
return install_info_may_process(ret ? *ret : NULL, paths, changes, n_changes);
}
static int install_info_symlink_alias(
UnitFileInstallInfo *i,
const LookupPaths *paths,
const char *config_path,
bool force,
UnitFileChange **changes,
size_t *n_changes) {
char **s;
int r = 0, q;
assert(i);
assert(paths);
assert(config_path);
STRV_FOREACH(s, i->aliases) {
_cleanup_free_ char *alias_path = NULL, *dst = NULL;
q = install_full_printf(i, *s, &dst);
if (q < 0)
return q;
alias_path = path_make_absolute(dst, config_path);
if (!alias_path)
return -ENOMEM;
q = create_symlink(paths, i->path, alias_path, force, changes, n_changes);
if (r == 0)
r = q;
}
return r;
}
static int install_info_symlink_wants(
UnitFileInstallInfo *i,
const LookupPaths *paths,
const char *config_path,
char **list,
const char *suffix,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_free_ char *buf = NULL;
const char *n;
char **s;
int r = 0, q;
assert(i);
assert(paths);
assert(config_path);
if (strv_isempty(list))
return 0;
if (unit_name_is_valid(i->name, UNIT_NAME_TEMPLATE)) {
UnitFileInstallInfo instance = {
.type = _UNIT_FILE_TYPE_INVALID,
};
_cleanup_free_ char *path = NULL;
/* If this is a template, and we have no instance, don't do anything */
if (!i->default_instance)
return 1;
r = unit_name_replace_instance(i->name, i->default_instance, &buf);
if (r < 0)
return r;
instance.name = buf;
r = unit_file_search(NULL, &instance, paths, SEARCH_FOLLOW_CONFIG_SYMLINKS);
if (r < 0)
return r;
path = TAKE_PTR(instance.path);
if (instance.type == UNIT_FILE_TYPE_MASKED) {
unit_file_changes_add(changes, n_changes, -ERFKILL, path, NULL);
return -ERFKILL;
}
n = buf;
} else
n = i->name;
STRV_FOREACH(s, list) {
_cleanup_free_ char *path = NULL, *dst = NULL;
q = install_full_printf(i, *s, &dst);
if (q < 0)
return q;
if (!unit_name_is_valid(dst, UNIT_NAME_ANY)) {
r = -EINVAL;
continue;
}
path = strjoin(config_path, "/", dst, suffix, n);
if (!path)
return -ENOMEM;
q = create_symlink(paths, i->path, path, true, changes, n_changes);
if (r == 0)
r = q;
}
return r;
}
static int install_info_symlink_link(
UnitFileInstallInfo *i,
const LookupPaths *paths,
const char *config_path,
bool force,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_free_ char *path = NULL;
int r;
assert(i);
assert(paths);
assert(config_path);
assert(i->path);
r = in_search_path(paths, i->path);
if (r < 0)
return r;
if (r > 0)
return 0;
path = strjoin(config_path, "/", i->name);
if (!path)
return -ENOMEM;
return create_symlink(paths, i->path, path, force, changes, n_changes);
}
static int install_info_apply(
UnitFileInstallInfo *i,
const LookupPaths *paths,
const char *config_path,
bool force,
UnitFileChange **changes,
size_t *n_changes) {
int r, q;
assert(i);
assert(paths);
assert(config_path);
if (i->type != UNIT_FILE_TYPE_REGULAR)
return 0;
r = install_info_symlink_alias(i, paths, config_path, force, changes, n_changes);
q = install_info_symlink_wants(i, paths, config_path, i->wanted_by, ".wants/", changes, n_changes);
if (r == 0)
r = q;
q = install_info_symlink_wants(i, paths, config_path, i->required_by, ".requires/", changes, n_changes);
if (r == 0)
r = q;
q = install_info_symlink_link(i, paths, config_path, force, changes, n_changes);
/* Do not count links to the unit file towards the "carries_install_info" count */
if (r == 0 && q < 0)
r = q;
return r;
}
static int install_context_apply(
UnitFileScope scope,
InstallContext *c,
const LookupPaths *paths,
const char *config_path,
bool force,
SearchFlags flags,
UnitFileChange **changes,
size_t *n_changes) {
UnitFileInstallInfo *i;
int r;
assert(c);
assert(paths);
assert(config_path);
if (ordered_hashmap_isempty(c->will_process))
return 0;
r = ordered_hashmap_ensure_allocated(&c->have_processed, &string_hash_ops);
if (r < 0)
return r;
r = 0;
while ((i = ordered_hashmap_first(c->will_process))) {
int q;
q = ordered_hashmap_move_one(c->have_processed, c->will_process, i->name);
if (q < 0)
return q;
q = install_info_traverse(scope, c, paths, i, flags, NULL);
if (q < 0) {
unit_file_changes_add(changes, n_changes, r, i->name, NULL);
return q;
}
/* We can attempt to process a masked unit when a different unit
* that we were processing specifies it in Also=. */
if (i->type == UNIT_FILE_TYPE_MASKED) {
unit_file_changes_add(changes, n_changes, UNIT_FILE_IS_MASKED, i->path, NULL);
if (r >= 0)
/* Assume that something *could* have been enabled here,
* avoid "empty [Install] section" warning. */
r += 1;
continue;
}
if (i->type != UNIT_FILE_TYPE_REGULAR)
continue;
q = install_info_apply(i, paths, config_path, force, changes, n_changes);
if (r >= 0) {
if (q < 0)
r = q;
else
r += q;
}
}
return r;
}
static int install_context_mark_for_removal(
UnitFileScope scope,
InstallContext *c,
const LookupPaths *paths,
Set **remove_symlinks_to,
const char *config_path,
UnitFileChange **changes,
size_t *n_changes) {
UnitFileInstallInfo *i;
int r;
assert(c);
assert(paths);
assert(config_path);
/* Marks all items for removal */
if (ordered_hashmap_isempty(c->will_process))
return 0;
r = ordered_hashmap_ensure_allocated(&c->have_processed, &string_hash_ops);
if (r < 0)
return r;
while ((i = ordered_hashmap_first(c->will_process))) {
r = ordered_hashmap_move_one(c->have_processed, c->will_process, i->name);
if (r < 0)
return r;
r = install_info_traverse(scope, c, paths, i, SEARCH_LOAD|SEARCH_FOLLOW_CONFIG_SYMLINKS, NULL);
if (r == -ENOLINK) {
log_debug_errno(r, "Name %s leads to a dangling symlink, removing name.", i->name);
unit_file_changes_add(changes, n_changes, UNIT_FILE_IS_DANGLING, i->path ?: i->name, NULL);
} else if (r == -ENOENT) {
if (i->auxiliary) /* some unit specified in Also= or similar is missing */
log_debug_errno(r, "Auxiliary unit of %s not found, removing name.", i->name);
else {
log_debug_errno(r, "Unit %s not found, removing name.", i->name);
unit_file_changes_add(changes, n_changes, r, i->path ?: i->name, NULL);
}
} else if (r < 0) {
log_debug_errno(r, "Failed to find unit %s, removing name: %m", i->name);
unit_file_changes_add(changes, n_changes, r, i->path ?: i->name, NULL);
} else if (i->type == UNIT_FILE_TYPE_MASKED) {
log_debug("Unit file %s is masked, ignoring.", i->name);
unit_file_changes_add(changes, n_changes, UNIT_FILE_IS_MASKED, i->path ?: i->name, NULL);
continue;
} else if (i->type != UNIT_FILE_TYPE_REGULAR) {
log_debug("Unit %s has type %s, ignoring.", i->name, unit_file_type_to_string(i->type) ?: "invalid");
continue;
}
r = mark_symlink_for_removal(remove_symlinks_to, i->name);
if (r < 0)
return r;
}
return 0;
}
int unit_file_mask(
UnitFileScope scope,
UnitFileFlags flags,
const char *root_dir,
char **files,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_(lookup_paths_free) LookupPaths paths = {};
const char *config_path;
char **i;
int r;
assert(scope >= 0);
assert(scope < _UNIT_FILE_SCOPE_MAX);
r = lookup_paths_init(&paths, scope, 0, root_dir);
if (r < 0)
return r;
config_path = (flags & UNIT_FILE_RUNTIME) ? paths.runtime_config : paths.persistent_config;
if (!config_path)
return -ENXIO;
STRV_FOREACH(i, files) {
_cleanup_free_ char *path = NULL;
int q;
if (!unit_name_is_valid(*i, UNIT_NAME_ANY)) {
if (r == 0)
r = -EINVAL;
continue;
}
path = path_make_absolute(*i, config_path);
if (!path)
return -ENOMEM;
q = create_symlink(&paths, "/dev/null", path, !!(flags & UNIT_FILE_FORCE), changes, n_changes);
if (q < 0 && r >= 0)
r = q;
}
return r;
}
int unit_file_unmask(
UnitFileScope scope,
UnitFileFlags flags,
const char *root_dir,
char **files,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_(lookup_paths_free) LookupPaths paths = {};
_cleanup_set_free_free_ Set *remove_symlinks_to = NULL;
_cleanup_strv_free_ char **todo = NULL;
size_t n_todo = 0, n_allocated = 0;
const char *config_path;
char **i;
bool dry_run;
int r, q;
assert(scope >= 0);
assert(scope < _UNIT_FILE_SCOPE_MAX);
r = lookup_paths_init(&paths, scope, 0, root_dir);
if (r < 0)
return r;
config_path = (flags & UNIT_FILE_RUNTIME) ? paths.runtime_config : paths.persistent_config;
if (!config_path)
return -ENXIO;
dry_run = !!(flags & UNIT_FILE_DRY_RUN);
STRV_FOREACH(i, files) {
_cleanup_free_ char *path = NULL;
if (!unit_name_is_valid(*i, UNIT_NAME_ANY))
return -EINVAL;
path = path_make_absolute(*i, config_path);
if (!path)
return -ENOMEM;
r = null_or_empty_path(path);
if (r == -ENOENT)
continue;
if (r < 0)
return r;
if (r == 0)
continue;
if (!GREEDY_REALLOC0(todo, n_allocated, n_todo + 2))
return -ENOMEM;
todo[n_todo] = strdup(*i);
if (!todo[n_todo])
return -ENOMEM;
n_todo++;
}
strv_uniq(todo);
r = 0;
STRV_FOREACH(i, todo) {
_cleanup_free_ char *path = NULL;
const char *rp;
path = path_make_absolute(*i, config_path);
if (!path)
return -ENOMEM;
if (!dry_run && unlink(path) < 0) {
if (errno != ENOENT) {
if (r >= 0)
r = -errno;
unit_file_changes_add(changes, n_changes, -errno, path, NULL);
}
continue;
}
unit_file_changes_add(changes, n_changes, UNIT_FILE_UNLINK, path, NULL);
rp = skip_root(&paths, path);
q = mark_symlink_for_removal(&remove_symlinks_to, rp ?: path);
if (q < 0)
return q;
}
q = remove_marked_symlinks(remove_symlinks_to, config_path, &paths, dry_run, changes, n_changes);
if (r >= 0)
r = q;
return r;
}
int unit_file_link(
UnitFileScope scope,
UnitFileFlags flags,
const char *root_dir,
char **files,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_(lookup_paths_free) LookupPaths paths = {};
_cleanup_strv_free_ char **todo = NULL;
size_t n_todo = 0, n_allocated = 0;
const char *config_path;
char **i;
int r, q;
assert(scope >= 0);
assert(scope < _UNIT_FILE_SCOPE_MAX);
r = lookup_paths_init(&paths, scope, 0, root_dir);
if (r < 0)
return r;
config_path = (flags & UNIT_FILE_RUNTIME) ? paths.runtime_config : paths.persistent_config;
if (!config_path)
return -ENXIO;
STRV_FOREACH(i, files) {
_cleanup_free_ char *full = NULL;
struct stat st;
char *fn;
if (!path_is_absolute(*i))
return -EINVAL;
fn = basename(*i);
if (!unit_name_is_valid(fn, UNIT_NAME_ANY))
return -EINVAL;
full = prefix_root(paths.root_dir, *i);
if (!full)
return -ENOMEM;
if (lstat(full, &st) < 0)
return -errno;
r = stat_verify_regular(&st);
if (r < 0)
return r;
q = in_search_path(&paths, *i);
if (q < 0)
return q;
if (q > 0)
continue;
if (!GREEDY_REALLOC0(todo, n_allocated, n_todo + 2))
return -ENOMEM;
todo[n_todo] = strdup(*i);
if (!todo[n_todo])
return -ENOMEM;
n_todo++;
}
strv_uniq(todo);
r = 0;
STRV_FOREACH(i, todo) {
_cleanup_free_ char *new_path = NULL;
new_path = path_make_absolute(basename(*i), config_path);
if (!new_path)
return -ENOMEM;
q = create_symlink(&paths, *i, new_path, !!(flags & UNIT_FILE_FORCE), changes, n_changes);
if (q < 0 && r >= 0)
r = q;
}
return r;
}
static int path_shall_revert(const LookupPaths *paths, const char *path) {
int r;
assert(paths);
assert(path);
/* Checks whether the path is one where the drop-in directories shall be removed. */
r = path_is_config(paths, path, true);
if (r != 0)
return r;
r = path_is_control(paths, path);
if (r != 0)
return r;
return path_is_transient(paths, path);
}
int unit_file_revert(
UnitFileScope scope,
const char *root_dir,
char **files,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_set_free_free_ Set *remove_symlinks_to = NULL;
_cleanup_(lookup_paths_free) LookupPaths paths = {};
_cleanup_strv_free_ char **todo = NULL;
size_t n_todo = 0, n_allocated = 0;
char **i;
int r, q;
/* Puts a unit file back into vendor state. This means:
*
* a) we remove all drop-in snippets added by the user ("config"), add to transient units ("transient"), and
* added via "systemctl set-property" ("control"), but not if the drop-in is generated ("generated").
*
* c) if there's a vendor unit file (i.e. one in /usr) we remove any configured overriding unit files (i.e. in
* "config", but not in "transient" or "control" or even "generated").
*
* We remove all that in both the runtime and the persistent directories, if that applies.
*/
r = lookup_paths_init(&paths, scope, 0, root_dir);
if (r < 0)
return r;
STRV_FOREACH(i, files) {
bool has_vendor = false;
char **p;
if (!unit_name_is_valid(*i, UNIT_NAME_ANY))
return -EINVAL;
STRV_FOREACH(p, paths.search_path) {
_cleanup_free_ char *path = NULL, *dropin = NULL;
struct stat st;
path = path_make_absolute(*i, *p);
if (!path)
return -ENOMEM;
r = lstat(path, &st);
if (r < 0) {
if (errno != ENOENT)
return -errno;
} else if (S_ISREG(st.st_mode)) {
/* Check if there's a vendor version */
r = path_is_vendor(&paths, path);
if (r < 0)
return r;
if (r > 0)
has_vendor = true;
}
dropin = strappend(path, ".d");
if (!dropin)
return -ENOMEM;
r = lstat(dropin, &st);
if (r < 0) {
if (errno != ENOENT)
return -errno;
} else if (S_ISDIR(st.st_mode)) {
/* Remove the drop-ins */
r = path_shall_revert(&paths, dropin);
if (r < 0)
return r;
if (r > 0) {
if (!GREEDY_REALLOC0(todo, n_allocated, n_todo + 2))
return -ENOMEM;
todo[n_todo++] = TAKE_PTR(dropin);
}
}
}
if (!has_vendor)
continue;
/* OK, there's a vendor version, hence drop all configuration versions */
STRV_FOREACH(p, paths.search_path) {
_cleanup_free_ char *path = NULL;
struct stat st;
path = path_make_absolute(*i, *p);
if (!path)
return -ENOMEM;
r = lstat(path, &st);
if (r < 0) {
if (errno != ENOENT)
return -errno;
} else if (S_ISREG(st.st_mode) || S_ISLNK(st.st_mode)) {
r = path_is_config(&paths, path, true);
if (r < 0)
return r;
if (r > 0) {
if (!GREEDY_REALLOC0(todo, n_allocated, n_todo + 2))
return -ENOMEM;
todo[n_todo++] = TAKE_PTR(path);
}
}
}
}
strv_uniq(todo);
r = 0;
STRV_FOREACH(i, todo) {
_cleanup_strv_free_ char **fs = NULL;
const char *rp;
char **j;
(void) get_files_in_directory(*i, &fs);
q = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL);
if (q < 0 && q != -ENOENT && r >= 0) {
r = q;
continue;
}
STRV_FOREACH(j, fs) {
_cleanup_free_ char *t = NULL;
t = strjoin(*i, "/", *j);
if (!t)
return -ENOMEM;
unit_file_changes_add(changes, n_changes, UNIT_FILE_UNLINK, t, NULL);
}
unit_file_changes_add(changes, n_changes, UNIT_FILE_UNLINK, *i, NULL);
rp = skip_root(&paths, *i);
q = mark_symlink_for_removal(&remove_symlinks_to, rp ?: *i);
if (q < 0)
return q;
}
q = remove_marked_symlinks(remove_symlinks_to, paths.runtime_config, &paths, false, changes, n_changes);
if (r >= 0)
r = q;
q = remove_marked_symlinks(remove_symlinks_to, paths.persistent_config, &paths, false, changes, n_changes);
if (r >= 0)
r = q;
return r;
}
int unit_file_add_dependency(
UnitFileScope scope,
UnitFileFlags flags,
const char *root_dir,
char **files,
const char *target,
UnitDependency dep,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_(lookup_paths_free) LookupPaths paths = {};
_cleanup_(install_context_done) InstallContext c = {};
UnitFileInstallInfo *i, *target_info;
const char *config_path;
char **f;
int r;
assert(scope >= 0);
assert(scope < _UNIT_FILE_SCOPE_MAX);
assert(target);
if (!IN_SET(dep, UNIT_WANTS, UNIT_REQUIRES))
return -EINVAL;
if (!unit_name_is_valid(target, UNIT_NAME_ANY))
return -EINVAL;
r = lookup_paths_init(&paths, scope, 0, root_dir);
if (r < 0)
return r;
config_path = (flags & UNIT_FILE_RUNTIME) ? paths.runtime_config : paths.persistent_config;
if (!config_path)
return -ENXIO;
r = install_info_discover_and_check(scope, &c, &paths, target, SEARCH_FOLLOW_CONFIG_SYMLINKS,
&target_info, changes, n_changes);
if (r < 0)
return r;
assert(target_info->type == UNIT_FILE_TYPE_REGULAR);
STRV_FOREACH(f, files) {
char ***l;
r = install_info_discover_and_check(scope, &c, &paths, *f, SEARCH_FOLLOW_CONFIG_SYMLINKS,
&i, changes, n_changes);
if (r < 0)
return r;
assert(i->type == UNIT_FILE_TYPE_REGULAR);
/* We didn't actually load anything from the unit
* file, but instead just add in our new symlink to
* create. */
if (dep == UNIT_WANTS)
l = &i->wanted_by;
else
l = &i->required_by;
strv_free(*l);
*l = strv_new(target_info->name);
if (!*l)
return -ENOMEM;
}
return install_context_apply(scope, &c, &paths, config_path, !!(flags & UNIT_FILE_FORCE), SEARCH_FOLLOW_CONFIG_SYMLINKS, changes, n_changes);
}
int unit_file_enable(
UnitFileScope scope,
UnitFileFlags flags,
const char *root_dir,
char **files,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_(lookup_paths_free) LookupPaths paths = {};
_cleanup_(install_context_done) InstallContext c = {};
const char *config_path;
UnitFileInstallInfo *i;
char **f;
int r;
assert(scope >= 0);
assert(scope < _UNIT_FILE_SCOPE_MAX);
r = lookup_paths_init(&paths, scope, 0, root_dir);
if (r < 0)
return r;
config_path = (flags & UNIT_FILE_RUNTIME) ? paths.runtime_config : paths.persistent_config;
if (!config_path)
return -ENXIO;
STRV_FOREACH(f, files) {
r = install_info_discover_and_check(scope, &c, &paths, *f, SEARCH_LOAD|SEARCH_FOLLOW_CONFIG_SYMLINKS,
&i, changes, n_changes);
if (r < 0)
return r;
assert(i->type == UNIT_FILE_TYPE_REGULAR);
}
/* This will return the number of symlink rules that were
supposed to be created, not the ones actually created. This
is useful to determine whether the passed files had any
installation data at all. */
return install_context_apply(scope, &c, &paths, config_path, !!(flags & UNIT_FILE_FORCE), SEARCH_LOAD, changes, n_changes);
}
int unit_file_disable(
UnitFileScope scope,
UnitFileFlags flags,
const char *root_dir,
char **files,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_(lookup_paths_free) LookupPaths paths = {};
_cleanup_(install_context_done) InstallContext c = {};
_cleanup_set_free_free_ Set *remove_symlinks_to = NULL;
const char *config_path;
char **i;
int r;
assert(scope >= 0);
assert(scope < _UNIT_FILE_SCOPE_MAX);
r = lookup_paths_init(&paths, scope, 0, root_dir);
if (r < 0)
return r;
config_path = (flags & UNIT_FILE_RUNTIME) ? paths.runtime_config : paths.persistent_config;
if (!config_path)
return -ENXIO;
STRV_FOREACH(i, files) {
if (!unit_name_is_valid(*i, UNIT_NAME_ANY))
return -EINVAL;
r = install_info_add(&c, *i, NULL, false, NULL);
if (r < 0)
return r;
}
r = install_context_mark_for_removal(scope, &c, &paths, &remove_symlinks_to, config_path, changes, n_changes);
if (r < 0)
return r;
return remove_marked_symlinks(remove_symlinks_to, config_path, &paths, !!(flags & UNIT_FILE_DRY_RUN), changes, n_changes);
}
int unit_file_reenable(
UnitFileScope scope,
UnitFileFlags flags,
const char *root_dir,
char **files,
UnitFileChange **changes,
size_t *n_changes) {
char **n;
int r;
size_t l, i;
/* First, we invoke the disable command with only the basename... */
l = strv_length(files);
n = newa(char*, l+1);
for (i = 0; i < l; i++)
n[i] = basename(files[i]);
n[i] = NULL;
r = unit_file_disable(scope, flags, root_dir, n, changes, n_changes);
if (r < 0)
return r;
/* But the enable command with the full name */
return unit_file_enable(scope, flags, root_dir, files, changes, n_changes);
}
int unit_file_set_default(
UnitFileScope scope,
UnitFileFlags flags,
const char *root_dir,
const char *name,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_(lookup_paths_free) LookupPaths paths = {};
_cleanup_(install_context_done) InstallContext c = {};
UnitFileInstallInfo *i;
const char *new_path;
int r;
assert(scope >= 0);
assert(scope < _UNIT_FILE_SCOPE_MAX);
assert(name);
if (unit_name_to_type(name) != UNIT_TARGET) /* this also validates the name */
return -EINVAL;
if (streq(name, SPECIAL_DEFAULT_TARGET))
return -EINVAL;
r = lookup_paths_init(&paths, scope, 0, root_dir);
if (r < 0)
return r;
r = install_info_discover_and_check(scope, &c, &paths, name, 0, &i, changes, n_changes);
if (r < 0)
return r;
new_path = strjoina(paths.persistent_config, "/" SPECIAL_DEFAULT_TARGET);
return create_symlink(&paths, i->path, new_path, !!(flags & UNIT_FILE_FORCE), changes, n_changes);
}
int unit_file_get_default(
UnitFileScope scope,
const char *root_dir,
char **name) {
_cleanup_(lookup_paths_free) LookupPaths paths = {};
_cleanup_(install_context_done) InstallContext c = {};
UnitFileInstallInfo *i;
char *n;
int r;
assert(scope >= 0);
assert(scope < _UNIT_FILE_SCOPE_MAX);
assert(name);
r = lookup_paths_init(&paths, scope, 0, root_dir);
if (r < 0)
return r;
r = install_info_discover(scope, &c, &paths, SPECIAL_DEFAULT_TARGET, SEARCH_FOLLOW_CONFIG_SYMLINKS,
&i, NULL, NULL);
if (r < 0)
return r;
r = install_info_may_process(i, &paths, NULL, 0);
if (r < 0)
return r;
n = strdup(i->name);
if (!n)
return -ENOMEM;
*name = n;
return 0;
}
int unit_file_lookup_state(
UnitFileScope scope,
const LookupPaths *paths,
const char *name,
UnitFileState *ret) {
_cleanup_(install_context_done) InstallContext c = {};
UnitFileInstallInfo *i;
UnitFileState state;
int r;
assert(paths);
assert(name);
if (!unit_name_is_valid(name, UNIT_NAME_ANY))
return -EINVAL;
r = install_info_discover(scope, &c, paths, name, SEARCH_LOAD|SEARCH_FOLLOW_CONFIG_SYMLINKS,
&i, NULL, NULL);
if (r < 0)
return log_debug_errno(r, "Failed to discover unit %s: %m", name);
assert(IN_SET(i->type, UNIT_FILE_TYPE_REGULAR, UNIT_FILE_TYPE_MASKED));
log_debug("Found unit %s at %s (%s)", name, strna(i->path),
i->type == UNIT_FILE_TYPE_REGULAR ? "regular file" : "mask");
/* Shortcut things, if the caller just wants to know if this unit exists. */
if (!ret)
return 0;
switch (i->type) {
case UNIT_FILE_TYPE_MASKED:
r = path_is_runtime(paths, i->path, true);
if (r < 0)
return r;
state = r > 0 ? UNIT_FILE_MASKED_RUNTIME : UNIT_FILE_MASKED;
break;
case UNIT_FILE_TYPE_REGULAR:
r = path_is_generator(paths, i->path);
if (r < 0)
return r;
if (r > 0) {
state = UNIT_FILE_GENERATED;
break;
}
r = path_is_transient(paths, i->path);
if (r < 0)
return r;
if (r > 0) {
state = UNIT_FILE_TRANSIENT;
break;
}
/* Check if any of the Alias= symlinks have been created.
* We ignore other aliases, and only check those that would
* be created by systemctl enable for this unit. */
r = find_symlinks_in_scope(scope, paths, i, true, &state);
if (r < 0)
return r;
if (r > 0)
break;
/* Check if the file is known under other names. If it is,
* it might be in use. Report that as UNIT_FILE_INDIRECT. */
r = find_symlinks_in_scope(scope, paths, i, false, &state);
if (r < 0)
return r;
if (r > 0)
state = UNIT_FILE_INDIRECT;
else {
if (unit_file_install_info_has_rules(i))
state = UNIT_FILE_DISABLED;
else if (unit_file_install_info_has_also(i))
state = UNIT_FILE_INDIRECT;
else
state = UNIT_FILE_STATIC;
}
break;
default:
assert_not_reached("Unexpect unit file type.");
}
*ret = state;
return 0;
}
int unit_file_get_state(
UnitFileScope scope,
const char *root_dir,
const char *name,
UnitFileState *ret) {
_cleanup_(lookup_paths_free) LookupPaths paths = {};
int r;
assert(scope >= 0);
assert(scope < _UNIT_FILE_SCOPE_MAX);
assert(name);
r = lookup_paths_init(&paths, scope, 0, root_dir);
if (r < 0)
return r;
return unit_file_lookup_state(scope, &paths, name, ret);
}
int unit_file_exists(UnitFileScope scope, const LookupPaths *paths, const char *name) {
_cleanup_(install_context_done) InstallContext c = {};
int r;
assert(paths);
assert(name);
if (!unit_name_is_valid(name, UNIT_NAME_ANY))
return -EINVAL;
r = install_info_discover(scope, &c, paths, name, 0, NULL, NULL, NULL);
if (r == -ENOENT)
return 0;
if (r < 0)
return r;
return 1;
}
static int split_pattern_into_name_and_instances(const char *pattern, char **out_unit_name, char ***out_instances) {
_cleanup_strv_free_ char **instances = NULL;
_cleanup_free_ char *unit_name = NULL;
int r;
assert(pattern);
assert(out_instances);
assert(out_unit_name);
r = extract_first_word(&pattern, &unit_name, NULL, 0);
if (r < 0)
return r;
/* We handle the instances logic when unit name is extracted */
if (pattern) {
/* We only create instances when a rule of templated unit
* is seen. A rule like enable [email protected] a b c will
* result in an array of (a, b, c) as instance names */
if (!unit_name_is_valid(unit_name, UNIT_NAME_TEMPLATE))
return -EINVAL;
instances = strv_split(pattern, WHITESPACE);
if (!instances)
return -ENOMEM;
*out_instances = TAKE_PTR(instances);
}
*out_unit_name = TAKE_PTR(unit_name);
return 0;
}
static int read_presets(UnitFileScope scope, const char *root_dir, Presets *presets) {
_cleanup_(presets_freep) Presets ps = {};
size_t n_allocated = 0;
_cleanup_strv_free_ char **files = NULL;
char **p;
int r;
assert(scope >= 0);
assert(scope < _UNIT_FILE_SCOPE_MAX);
assert(presets);
switch (scope) {
case UNIT_FILE_SYSTEM:
r = conf_files_list(&files, ".preset", root_dir, 0,
"/etc/systemd/system-preset",
"/run/systemd/system-preset",
"/usr/local/lib/systemd/system-preset",
"/usr/lib/systemd/system-preset",
#if HAVE_SPLIT_USR
"/lib/systemd/system-preset",
#endif
NULL);
break;
case UNIT_FILE_GLOBAL:
case UNIT_FILE_USER:
r = conf_files_list(&files, ".preset", root_dir, 0,
"/etc/systemd/user-preset",
"/run/systemd/user-preset",
"/usr/local/lib/systemd/user-preset",
"/usr/lib/systemd/user-preset",
NULL);
break;
default:
assert_not_reached("Invalid unit file scope");
}
if (r < 0)
return r;
STRV_FOREACH(p, files) {
_cleanup_fclose_ FILE *f;
int n = 0;
f = fopen(*p, "re");
if (!f) {
if (errno == ENOENT)
continue;
return -errno;
}
for (;;) {
_cleanup_free_ char *line = NULL;
PresetRule rule = {};
const char *parameter;
char *l;
r = read_line(f, LONG_LINE_MAX, &line);
if (r < 0)
return r;
if (r == 0)
break;
l = strstrip(line);
n++;
if (isempty(l))
continue;
if (strchr(COMMENTS, *l))
continue;
parameter = first_word(l, "enable");
if (parameter) {
char *unit_name;
char **instances = NULL;
/* Unit_name will remain the same as parameter when no instances are specified */
r = split_pattern_into_name_and_instances(parameter, &unit_name, &instances);
if (r < 0) {
log_syntax(NULL, LOG_WARNING, *p, n, r, "Couldn't parse line '%s'. Ignoring.", line);
continue;
}
rule = (PresetRule) {
.pattern = unit_name,
.action = PRESET_ENABLE,
.instances = instances,
};
}
parameter = first_word(l, "disable");
if (parameter) {
char *pattern;
pattern = strdup(parameter);
if (!pattern)
return -ENOMEM;
rule = (PresetRule) {
.pattern = pattern,
.action = PRESET_DISABLE,
};
}
if (rule.action) {
if (!GREEDY_REALLOC(ps.rules, n_allocated, ps.n_rules + 1))
return -ENOMEM;
ps.rules[ps.n_rules++] = rule;
continue;
}
log_syntax(NULL, LOG_WARNING, *p, n, 0, "Couldn't parse line '%s'. Ignoring.", line);
}
}
*presets = ps;
ps = (Presets){};
return 0;
}
static int pattern_match_multiple_instances(
const PresetRule rule,
const char *unit_name,
char ***ret) {
_cleanup_free_ char *templated_name = NULL;
int r;
/* If no ret is needed or the rule itself does not have instances
* initalized, we return not matching */
if (!ret || !rule.instances)
return 0;
r = unit_name_template(unit_name, &templated_name);
if (r < 0)
return r;
if (!streq(rule.pattern, templated_name))
return 0;
/* Compose a list of specified instances when unit name is a template */
if (unit_name_is_valid(unit_name, UNIT_NAME_TEMPLATE)) {
_cleanup_free_ char *prefix = NULL;
_cleanup_strv_free_ char **out_strv = NULL;
char **iter;
r = unit_name_to_prefix(unit_name, &prefix);
if (r < 0)
return r;
STRV_FOREACH(iter, rule.instances) {
_cleanup_free_ char *name = NULL;
r = unit_name_build(prefix, *iter, ".service", &name);
if (r < 0)
return r;
r = strv_extend(&out_strv, name);
if (r < 0)
return r;
}
*ret = TAKE_PTR(out_strv);
return 1;
} else {
/* We now know the input unit name is an instance name */
_cleanup_free_ char *instance_name = NULL;
r = unit_name_to_instance(unit_name, &instance_name);
if (r < 0)
return r;
if (strv_find(rule.instances, instance_name))
return 1;
}
return 0;
}
static int query_presets(const char *name, const Presets presets, char ***instance_name_list) {
PresetAction action = PRESET_UNKNOWN;
size_t i;
char **s;
if (!unit_name_is_valid(name, UNIT_NAME_ANY))
return -EINVAL;
for (i = 0; i < presets.n_rules; i++)
if (pattern_match_multiple_instances(presets.rules[i], name, instance_name_list) > 0 ||
fnmatch(presets.rules[i].pattern, name, FNM_NOESCAPE) == 0) {
action = presets.rules[i].action;
break;
}
switch (action) {
case PRESET_UNKNOWN:
log_debug("Preset files don't specify rule for %s. Enabling.", name);
return 1;
case PRESET_ENABLE:
if (instance_name_list && *instance_name_list)
STRV_FOREACH(s, *instance_name_list)
log_debug("Preset files say enable %s.", *s);
else
log_debug("Preset files say enable %s.", name);
return 1;
case PRESET_DISABLE:
log_debug("Preset files say disable %s.", name);
return 0;
default:
assert_not_reached("invalid preset action");
}
}
int unit_file_query_preset(UnitFileScope scope, const char *root_dir, const char *name) {
_cleanup_(presets_freep) Presets presets = {};
int r;
r = read_presets(scope, root_dir, &presets);
if (r < 0)
return r;
return query_presets(name, presets, NULL);
}
static int execute_preset(
UnitFileScope scope,
InstallContext *plus,
InstallContext *minus,
const LookupPaths *paths,
const char *config_path,
char **files,
UnitFilePresetMode mode,
bool force,
UnitFileChange **changes,
size_t *n_changes) {
int r;
assert(plus);
assert(minus);
assert(paths);
assert(config_path);
if (mode != UNIT_FILE_PRESET_ENABLE_ONLY) {
_cleanup_set_free_free_ Set *remove_symlinks_to = NULL;
r = install_context_mark_for_removal(scope, minus, paths, &remove_symlinks_to, config_path, changes, n_changes);
if (r < 0)
return r;
r = remove_marked_symlinks(remove_symlinks_to, config_path, paths, false, changes, n_changes);
} else
r = 0;
if (mode != UNIT_FILE_PRESET_DISABLE_ONLY) {
int q;
/* Returns number of symlinks that where supposed to be installed. */
q = install_context_apply(scope, plus, paths, config_path, force, SEARCH_LOAD, changes, n_changes);
if (r >= 0) {
if (q < 0)
r = q;
else
r += q;
}
}
return r;
}
static int preset_prepare_one(
UnitFileScope scope,
InstallContext *plus,
InstallContext *minus,
LookupPaths *paths,
const char *name,
Presets presets,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_(install_context_done) InstallContext tmp = {};
_cleanup_strv_free_ char **instance_name_list = NULL;
UnitFileInstallInfo *i;
int r;
if (install_info_find(plus, name) || install_info_find(minus, name))
return 0;
r = install_info_discover(scope, &tmp, paths, name, SEARCH_FOLLOW_CONFIG_SYMLINKS,
&i, changes, n_changes);
if (r < 0)
return r;
if (!streq(name, i->name)) {
log_debug("Skipping %s because it is an alias for %s.", name, i->name);
return 0;
}
r = query_presets(name, presets, &instance_name_list);
if (r < 0)
return r;
if (r > 0) {
if (instance_name_list) {
char **s;
STRV_FOREACH(s, instance_name_list) {
r = install_info_discover_and_check(scope, plus, paths, *s, SEARCH_LOAD|SEARCH_FOLLOW_CONFIG_SYMLINKS,
&i, changes, n_changes);
if (r < 0)
return r;
}
} else {
r = install_info_discover_and_check(scope, plus, paths, name, SEARCH_LOAD|SEARCH_FOLLOW_CONFIG_SYMLINKS,
&i, changes, n_changes);
if (r < 0)
return r;
}
} else
r = install_info_discover(scope, minus, paths, name, SEARCH_FOLLOW_CONFIG_SYMLINKS,
&i, changes, n_changes);
return r;
}
int unit_file_preset(
UnitFileScope scope,
UnitFileFlags flags,
const char *root_dir,
char **files,
UnitFilePresetMode mode,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_(install_context_done) InstallContext plus = {}, minus = {};
_cleanup_(lookup_paths_free) LookupPaths paths = {};
_cleanup_(presets_freep) Presets presets = {};
const char *config_path;
char **i;
int r;
assert(scope >= 0);
assert(scope < _UNIT_FILE_SCOPE_MAX);
assert(mode < _UNIT_FILE_PRESET_MAX);
r = lookup_paths_init(&paths, scope, 0, root_dir);
if (r < 0)
return r;
config_path = (flags & UNIT_FILE_RUNTIME) ? paths.runtime_config : paths.persistent_config;
if (!config_path)
return -ENXIO;
r = read_presets(scope, root_dir, &presets);
if (r < 0)
return r;
STRV_FOREACH(i, files) {
r = preset_prepare_one(scope, &plus, &minus, &paths, *i, presets, changes, n_changes);
if (r < 0)
return r;
}
return execute_preset(scope, &plus, &minus, &paths, config_path, files, mode, !!(flags & UNIT_FILE_FORCE), changes, n_changes);
}
int unit_file_preset_all(
UnitFileScope scope,
UnitFileFlags flags,
const char *root_dir,
UnitFilePresetMode mode,
UnitFileChange **changes,
size_t *n_changes) {
_cleanup_(install_context_done) InstallContext plus = {}, minus = {};
_cleanup_(lookup_paths_free) LookupPaths paths = {};
_cleanup_(presets_freep) Presets presets = {};
const char *config_path = NULL;
char **i;
int r;
assert(scope >= 0);
assert(scope < _UNIT_FILE_SCOPE_MAX);
assert(mode < _UNIT_FILE_PRESET_MAX);
r = lookup_paths_init(&paths, scope, 0, root_dir);
if (r < 0)
return r;
config_path = (flags & UNIT_FILE_RUNTIME) ? paths.runtime_config : paths.persistent_config;
if (!config_path)
return -ENXIO;
r = read_presets(scope, root_dir, &presets);
if (r < 0)
return r;
STRV_FOREACH(i, paths.search_path) {
_cleanup_closedir_ DIR *d = NULL;
struct dirent *de;
d = opendir(*i);
if (!d) {
if (errno == ENOENT)
continue;
return -errno;
}
FOREACH_DIRENT(de, d, return -errno) {
if (!unit_name_is_valid(de->d_name, UNIT_NAME_ANY))
continue;
dirent_ensure_type(d, de);
if (!IN_SET(de->d_type, DT_LNK, DT_REG))
continue;
/* we don't pass changes[] in, because we want to handle errors on our own */
r = preset_prepare_one(scope, &plus, &minus, &paths, de->d_name, presets, NULL, 0);
if (r == -ERFKILL)
r = unit_file_changes_add(changes, n_changes,
UNIT_FILE_IS_MASKED, de->d_name, NULL);
else if (r == -ENOLINK)
r = unit_file_changes_add(changes, n_changes,
UNIT_FILE_IS_DANGLING, de->d_name, NULL);
else if (r == -EADDRNOTAVAIL) /* Ignore generated/transient units when applying preset */
continue;
if (r < 0)
return r;
}
}
return execute_preset(scope, &plus, &minus, &paths, config_path, NULL, mode, !!(flags & UNIT_FILE_FORCE), changes, n_changes);
}
static void unit_file_list_free_one(UnitFileList *f) {
if (!f)
return;
free(f->path);
free(f);
}
Hashmap* unit_file_list_free(Hashmap *h) {
return hashmap_free_with_destructor(h, unit_file_list_free_one);
}
DEFINE_TRIVIAL_CLEANUP_FUNC(UnitFileList*, unit_file_list_free_one);
int unit_file_get_list(
UnitFileScope scope,
const char *root_dir,
Hashmap *h,
char **states,
char **patterns) {
_cleanup_(lookup_paths_free) LookupPaths paths = {};
char **i;
int r;
assert(scope >= 0);
assert(scope < _UNIT_FILE_SCOPE_MAX);
assert(h);
r = lookup_paths_init(&paths, scope, 0, root_dir);
if (r < 0)
return r;
STRV_FOREACH(i, paths.search_path) {
_cleanup_closedir_ DIR *d = NULL;
struct dirent *de;
d = opendir(*i);
if (!d) {
if (errno == ENOENT)
continue;
if (IN_SET(errno, ENOTDIR, EACCES)) {
log_debug_errno(errno, "Failed to open \"%s\": %m", *i);
continue;
}
return -errno;
}
FOREACH_DIRENT(de, d, return -errno) {
_cleanup_(unit_file_list_free_onep) UnitFileList *f = NULL;
if (!unit_name_is_valid(de->d_name, UNIT_NAME_ANY))
continue;
if (!strv_fnmatch_or_empty(patterns, de->d_name, FNM_NOESCAPE))
continue;
if (hashmap_get(h, de->d_name))
continue;
dirent_ensure_type(d, de);
if (!IN_SET(de->d_type, DT_LNK, DT_REG))
continue;
f = new0(UnitFileList, 1);
if (!f)
return -ENOMEM;
f->path = path_make_absolute(de->d_name, *i);
if (!f->path)
return -ENOMEM;
r = unit_file_lookup_state(scope, &paths, de->d_name, &f->state);
if (r < 0)
f->state = UNIT_FILE_BAD;
if (!strv_isempty(states) &&
!strv_contains(states, unit_file_state_to_string(f->state)))
continue;
r = hashmap_put(h, basename(f->path), f);
if (r < 0)
return r;
f = NULL; /* prevent cleanup */
}
}
return 0;
}
static const char* const unit_file_state_table[_UNIT_FILE_STATE_MAX] = {
[UNIT_FILE_ENABLED] = "enabled",
[UNIT_FILE_ENABLED_RUNTIME] = "enabled-runtime",
[UNIT_FILE_LINKED] = "linked",
[UNIT_FILE_LINKED_RUNTIME] = "linked-runtime",
[UNIT_FILE_MASKED] = "masked",
[UNIT_FILE_MASKED_RUNTIME] = "masked-runtime",
[UNIT_FILE_STATIC] = "static",
[UNIT_FILE_DISABLED] = "disabled",
[UNIT_FILE_INDIRECT] = "indirect",
[UNIT_FILE_GENERATED] = "generated",
[UNIT_FILE_TRANSIENT] = "transient",
[UNIT_FILE_BAD] = "bad",
};
DEFINE_STRING_TABLE_LOOKUP(unit_file_state, UnitFileState);
static const char* const unit_file_change_type_table[_UNIT_FILE_CHANGE_TYPE_MAX] = {
[UNIT_FILE_SYMLINK] = "symlink",
[UNIT_FILE_UNLINK] = "unlink",
[UNIT_FILE_IS_MASKED] = "masked",
[UNIT_FILE_IS_DANGLING] = "dangling",
};
DEFINE_STRING_TABLE_LOOKUP(unit_file_change_type, UnitFileChangeType);
static const char* const unit_file_preset_mode_table[_UNIT_FILE_PRESET_MAX] = {
[UNIT_FILE_PRESET_FULL] = "full",
[UNIT_FILE_PRESET_ENABLE_ONLY] = "enable-only",
[UNIT_FILE_PRESET_DISABLE_ONLY] = "disable-only",
};
DEFINE_STRING_TABLE_LOOKUP(unit_file_preset_mode, UnitFilePresetMode);
| gpl-2.0 |
srwiser/ssctube | wp-content/plugins/indeed-social-media_v5.1/admin/tabs/isb.php | 1740 | <div class="metabox-holder indeed">
<div class="stuffbox">
<div class="ism-top-message"><b>"Share Bar"</b> - is a special Social Icons Display on the top of the page when the visitor scroll down.</div>
</div>
<div class="stuffbox">
<h3>
<label style="font-size:16px;">
AddOn Status
</label>
</h3>
<div class="inside">
<div class="submit" style="float:left; width:80%;">
This AddOn is not installed into your system. To use this section you need to install and activate the "Social Share Bar Display" AddOn.
</div>
<div class="submit" style="float:left; width:20%; text-align:center;">
</div>
<div class="clear"></div>
</div>
</div>
<div class="stuffbox">
<h3>
<label style="font-size:16px;">
AddOn Details
</label>
</h3>
<div class="inside">
<div class="ism_not_item">
<?php
if($_GET['tab'] == 'isb'){
$url = 'http://codecanyon.net/item/social-share-on-images-addon-wordpress/9719076';
$html = file_get_contents($url);
$get1 = explode( '<div class="item-preview">' , $html );
$get2 = explode( '</div>' , $get1[1] );
preg_match_all('/<img.*?>/', $get2[0], $out);
if(isset($out) && count($out) > 0){
foreach($out as $value){
echo '<div class="top-preview">'.$value[0].'</div>';
}
}
$get3 = explode( '<div class="user-html">' , $html );
$get4 = explode( '</div>' , $get3[1] );
preg_match_all('/<img.*?>/', $get4[0], $images);
if(isset($images) && count($images) > 0){
foreach($images as $img){
foreach($img as $value){
if (strpos($value,'preview') === false && strpos($value,'button') === false)
echo $value;
}
}
}
}
?>
</div>
<div class="clear"></div>
</div>
</div>
</div>
</div> | gpl-2.0 |
marco-c/wp-web-push | tests/test-utils.php | 112 | <?php
function getSentNotificationNum() {
return intval(file_get_contents('http://localhost:55555/'));
}
?>
| gpl-2.0 |
zaljayo/TestProject | lib_editor/js/trex/tool/tabletemplate.js | 3877 | /**
* @fileoverview
* Tool '서식' Source,
*
*/
TrexConfig.addTool(
"tabletemplate",
{
sync: _FALSE,
status: _TRUE,
rows: 5,
cols: 9,
options: [
{ label: 'image', data: 1 , klass: 'tx-tabletemplate-1' },
{ label: 'image', data: 2 , klass: 'tx-tabletemplate-2' },
{ label: 'image', data: 3 , klass: 'tx-tabletemplate-3' },
{ label: 'image', data: 4 , klass: 'tx-tabletemplate-4' },
{ label: 'image', data: 5 , klass: 'tx-tabletemplate-5' },
{ label: 'image', data: 6 , klass: 'tx-tabletemplate-6' },
{ label: 'image', data: 7 , klass: 'tx-tabletemplate-7' },
{ label: 'image', data: 8 , klass: 'tx-tabletemplate-8' },
{ label: 'image', data: 9 , klass: 'tx-tabletemplate-9' },
{ label: 'image', data: 10 , klass: 'tx-tabletemplate-10' },
{ label: 'image', data: 11 , klass: 'tx-tabletemplate-11' },
{ label: 'image', data: 12 , klass: 'tx-tabletemplate-12' },
{ label: 'image', data: 13 , klass: 'tx-tabletemplate-13' },
{ label: 'image', data: 14 , klass: 'tx-tabletemplate-14' },
{ label: 'image', data: 15 , klass: 'tx-tabletemplate-15' },
{ label: 'image', data: 16 , klass: 'tx-tabletemplate-16' },
{ label: 'image', data: 17 , klass: 'tx-tabletemplate-17' },
{ label: 'image', data: 18 , klass: 'tx-tabletemplate-18' },
{ label: 'image', data: 19 , klass: 'tx-tabletemplate-19' },
{ label: 'image', data: 20 , klass: 'tx-tabletemplate-20' },
{ label: 'image', data: 21 , klass: 'tx-tabletemplate-21' },
{ label: 'image', data: 22 , klass: 'tx-tabletemplate-22' },
{ label: 'image', data: 23 , klass: 'tx-tabletemplate-23' },
{ label: 'image', data: 24 , klass: 'tx-tabletemplate-24' },
{ label: 'image', data: 25 , klass: 'tx-tabletemplate-25' },
{ label: 'image', data: 26 , klass: 'tx-tabletemplate-26' },
{ label: 'image', data: 27 , klass: 'tx-tabletemplate-27' },
{ label: 'image', data: 28 , klass: 'tx-tabletemplate-28' },
{ label: 'image', data: 29 , klass: 'tx-tabletemplate-29' },
{ label: 'image', data: 30 , klass: 'tx-tabletemplate-30' },
{ label: 'image', data: 31 , klass: 'tx-tabletemplate-31' },
{ label: 'image', data: 32 , klass: 'tx-tabletemplate-32' },
{ label: 'image', data: 33 , klass: 'tx-tabletemplate-33' },
{ label: 'image', data: 34 , klass: 'tx-tabletemplate-34' },
{ label: 'image', data: 35 , klass: 'tx-tabletemplate-35' },
{ label: 'image', data: 36 , klass: 'tx-tabletemplate-36' },
{ label: 'image', data: 37 , klass: 'tx-tabletemplate-37' },
{ label: 'image', data: 38 , klass: 'tx-tabletemplate-38' },
{ label: 'image', data: 39 , klass: 'tx-tabletemplate-39' },
{ label: 'image', data: 40 , klass: 'tx-tabletemplate-40' },
{ label: 'image', data: 41 , klass: 'tx-tabletemplate-41' },
{ label: 'image', data: 42 , klass: 'tx-tabletemplate-42' },
{ label: 'image', data: 43 , klass: 'tx-tabletemplate-43' },
{ label: 'image', data: 44 , klass: 'tx-tabletemplate-44' },
{ label: 'image', data: 45 , klass: 'tx-tabletemplate-45' }
]
}
);
Trex.Tool.Tabletemplate = Trex.Class.create({
$const: {
__Identity: 'tabletemplate'
},
$extend: Trex.Tool,
oninitialized: function(config) {
var _tool = this;
var _canvas = this.canvas;
var _map = {};
config.options.each(function(option) {
_map[option.data] = {
type: option.type
};
});
var _toolHandler = function(data) {
if(!_map[data]) {
return;
}
var _table = _NULL;
_canvas.execute(function(processor) {
if (processor.table) {
_table = processor.findNode('table');
processor.table.setTemplateStyle(_table, data);
}
});
};
/* button & menu weave */
this.weave.bind(this)(
/* button */
new Trex.Button(this.buttonCfg),
/* menu */
new Trex.Menu.List(this.menuCfg),
/* handler */
_toolHandler
);
}
});
| gpl-2.0 |
andreapoggi/Mir | src/Mir/lib/Mir/R/Doc/Web.pm | 1254 | package Mir::R::Doc::Web;
#============================================================= -*-perl-*-
=head1 NAME
Mir::R::Doc::Web - ruolo per ogni documento sul web
=head1 VERSION
0.0.1
=cut
our $VERSION='0.01';
=head1 SYNOPSIS
use Something;
with 'Mir::R::Doc::Web;
my $o = Something->new();
$o->url( '...' );
$o->format( 'Html' ); # the format (eventually) identifies a sub-role...
=head1 DESCRIPTION
Questo ruolo aggiunge le proprieta' e caratteristiche
tipiche di un oggetto sul web.
=head1 AUTHOR
Marco Masetti (marco.masetti @ softeco.it )
=head1 LICENCE AND COPYRIGHT
Copyright (c) 2015 Marco Masetti (marco.masetti at softeco.it). All rights reserved.
This module is free software; you can redistribute it and/or
modify it under the same terms as Perl itself. See perldoc perlartistic.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
=head1 SUBROUTINES/METHODS
=cut
#========================================================================
use Moose::Role;
with 'Mir::R::Doc';
has 'url' => ( is => 'rw', isa => 'Str' );
has 'format' => ( is => 'rw', isa => 'Str' );
1;
| gpl-2.0 |
empeg/empeg-hijack | fs/reiserfs/utils/mkreiserfs/mkreiserfs.c | 17438 | /*
* Copyright 1996, 1997, 1998, 1999 Hans Reiser
*/
/* mkreiserfs is very simple. It supports only 4 and 8K blocks. It skip
first REISERFS_DISK_OFFSET_IN_BYTES of device, and then writes the super
block, the needed amount of bitmap blocks (this amount is calculated
based on file system size), and root block. Bitmap policy is
primitive: it assumes, that device does not have unreadable blocks,
and it occupies first blocks for super, bitmap and root blocks.
bitmap blocks are interleaved across the disk, mainly to make
resizing faster. */
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <asm/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <sys/vfs.h>
#include <time.h>
#include <sys/ioctl.h>
#include <sys/mount.h>
#include "inode.h"
#include "io.h"
#include "sb.h"
#include "misc.h"
#include "reiserfs_fs.h"
#define print_usage_and_exit() die ("Usage: %s [ -f ] device [block-count]\n\n", argv[0])
#define DEFAULT_BLOCKSIZE 4096
#define MIN_BLOCK_AMOUNT (100+JOURNAL_BLOCK_COUNT+RESERVED_FOR_PRESERVE_LIST)
struct buffer_head * g_sb_bh;
struct buffer_head * g_bitmap_bh;
struct buffer_head * g_rb_bh;
struct buffer_head * g_journal_bh ;
int g_block_size = DEFAULT_BLOCKSIZE;
unsigned long int g_block_number;
/* Given a file descriptor and an offset, check whether the offset is
a valid offset for the file - return 0 if it isn't valid or 1 if it
is */
int valid_offset( int fd, loff_t offset )
{
char ch;
if (reiserfs_llseek (fd, offset, 0) < 0)
return 0;
if (read (fd, &ch, 1) < 1)
return 0;
return 1;
}
/* calculates number of blocks on device
*/
unsigned long count_blocks (char * filename, int blocksize)
{
loff_t high, low;
int fd;
fd = open (filename, O_RDONLY);
if (fd < 0)
die ("count_blocks: open failed (%s)", strerror (errno));
#ifdef BLKGETSIZE
{
long size;
if (ioctl (fd, BLKGETSIZE, &size) >= 0) {
close (fd);
return size / (blocksize / 512);
}
}
#endif
low = 0;
for( high = 1; valid_offset (fd, high); high *= 2 )
low = high;
while (low < high - 1) {
const loff_t mid = ( low + high ) / 2;
if (valid_offset (fd, mid))
low = mid;
else
high = mid;
}
valid_offset (fd, 0);
close (fd);
return (low + 1) / (blocksize);
}
/* form super block */
void make_super_block (int dev)
{
struct reiserfs_super_block * sb;
unsigned long * oids;
if (SB_SIZE > g_block_size)
die ("mkreiserfs: blocksize (%d) too small", g_block_size);
/* get buffer for super block */
g_sb_bh = getblk (dev, REISERFS_DISK_OFFSET_IN_BYTES / g_block_size, g_block_size);
/* sb = (struct reiserfs_super_block *)g_cp_super_block;*/
sb = (struct reiserfs_super_block *)g_sb_bh->b_data;
sb->s_blocksize = g_block_size; /* block size in bytes */
sb->s_block_count = g_block_number; /* how many block reiserfs must occupy */
sb->s_state = REISERFS_VALID_FS;
sb->s_tree_height = 2;
sb->s_journal_dev = 0 ;
sb->s_orig_journal_size = JOURNAL_BLOCK_COUNT ;
sb->s_journal_trans_max = 0 ;
sb->s_journal_block_count = 0 ;
sb->s_journal_max_batch = 0 ;
sb->s_journal_max_commit_age = 0 ;
sb->s_journal_max_trans_age = 0 ;
sb->s_bmap_nr = g_block_number / (g_block_size * 8) + ((g_block_number % (g_block_size * 8)) ? 1 : 0);
memcpy (sb->s_magic, REISERFS_SUPER_MAGIC_STRING, sizeof (REISERFS_SUPER_MAGIC_STRING));
/* initialize object map */
oids = (unsigned long *)(sb + 1);
oids[0] = 1;
oids[1] = REISERFS_ROOT_OBJECTID + 1; /* objectids > REISERFS_ROOT_OBJECTID are free */
sb->s_oid_cursize = 2;
/* max size must be even */
sb->s_oid_maxsize = (g_block_size - SB_SIZE) / sizeof(unsigned long) / 2 * 2;
mark_buffer_dirty (g_sb_bh, 0);
mark_buffer_uptodate (g_sb_bh, 0);
return;
}
void zero_journal_blocks(int dev, int start, int len) {
int i ;
struct buffer_head *bh ;
int done = 0;
printf ("Initializing journal - "); fflush (stdout);
for (i = 0 ; i < len ; i++) {
print_how_far (&done, len);
bh = getblk (dev, start + i, g_block_size) ;
memset(bh->b_data, 0, g_block_size) ;
mark_buffer_dirty(bh,0) ;
mark_buffer_uptodate(bh,0) ;
bwrite (bh);
brelse(bh) ;
}
printf ("\n"); fflush (stdout);
}
/* this only sets few first bits in bitmap block. Fills not initialized
fields of super block (root block and bitmap block numbers)
*/
void make_bitmap ()
{
struct reiserfs_super_block * sb = (struct reiserfs_super_block *)g_sb_bh->b_data;
int i, j;
/* get buffer for bitmap block */
g_bitmap_bh = getblk (g_sb_bh->b_dev, g_sb_bh->b_blocknr + 1, g_sb_bh->b_size);
/* mark, that first 8K of device is busy */
for (i = 0; i < REISERFS_DISK_OFFSET_IN_BYTES / g_block_size; i ++)
set_bit (i, g_bitmap_bh->b_data);
/* mark that super block is busy */
set_bit (i++, g_bitmap_bh->b_data);
/* mark first bitmap block as busy */
set_bit (i ++, g_bitmap_bh->b_data);
/* sb->s_journal_block = g_block_number - JOURNAL_BLOCK_COUNT ; */ /* journal goes at end of disk */
sb->s_journal_block = i;
/* mark journal blocks as busy BUG! we need to check to make sure journal will fit in the first bitmap block */
for (j = 0 ; j < (JOURNAL_BLOCK_COUNT + 1); j++) /* the descriptor block goes after the journal */
set_bit (i ++, g_bitmap_bh->b_data);
/* and tree root is busy */
set_bit (i, g_bitmap_bh->b_data);
sb->s_root_block = i;
sb->s_free_blocks = sb->s_block_count - i - 1 ;
/* count bitmap blocks not resides in first s_blocksize blocks */
sb->s_free_blocks -= sb->s_bmap_nr - 1;
mark_buffer_dirty (g_bitmap_bh, 0);
mark_buffer_uptodate (g_bitmap_bh, 0);
mark_buffer_dirty (g_sb_bh, 0);
return;
}
/* form the root block of the tree (the block head, the item head, the
root directory) */
void make_root_block ()
{
struct reiserfs_super_block * sb = (struct reiserfs_super_block *)g_sb_bh->b_data;
char * rb;
struct block_head * blkh;
struct item_head * ih;
struct stat_data * sd;
struct reiserfs_de_head * deh;
struct key maxkey = {0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff};
/* get memory for root block */
/* g_cp_root_block = getmem (g_block_size);*/
/* no more cautious bitmap, kill the *2 */
/* g_rb_bh = getblk (g_sb_bh->b_dev, g_sb_bh->b_blocknr + sb->s_bmap_nr * 2 + 1, g_sb_bh->b_size); */
g_rb_bh = getblk (g_sb_bh->b_dev, sb->s_root_block, sb->s_blocksize);
rb = g_rb_bh->b_data;
/* block head */
blkh = (struct block_head *)rb;
blkh->blk_level = DISK_LEAF_NODE_LEVEL;
blkh->blk_nr_item = 0;
blkh->blk_free_space = sb->s_blocksize - BLKH_SIZE;
memcpy (&blkh->blk_right_delim_key, &maxkey, KEY_SIZE);
/* first item is stat data item of root directory */
ih = (struct item_head *)(blkh + 1);
ih->ih_key.k_dir_id = REISERFS_ROOT_PARENT_OBJECTID;
ih->ih_key.k_objectid = REISERFS_ROOT_OBJECTID;
ih->ih_key.k_offset = SD_OFFSET;
ih->ih_key.k_uniqueness = TYPE_STAT_DATA;
ih->ih_item_len = SD_SIZE;
ih->ih_item_location = sb->s_blocksize - ih->ih_item_len;
ih->u.ih_free_space = MAX_US_INT;
ih->ih_reserved = 0;
/* fill stat data */
sd = (struct stat_data *)(rb + ih->ih_item_location);
sd->sd_mode = S_IFDIR + 0755;
sd->sd_nlink = 3;
sd->sd_uid = 0; /*??*/
sd->sd_gid = 0; /*??*/
sd->sd_size = EMPTY_DIR_SIZE;
sd->sd_atime = sd->sd_ctime = sd->sd_mtime = time (NULL);
sd->u.sd_blocks = 0; /*??*/
sd->sd_first_direct_byte = MAX_UL_INT; /*??*/
blkh->blk_nr_item ++;
blkh->blk_free_space -= (IH_SIZE + ih->ih_item_len);
/* second item is root directory item, containing "." and ".." */
ih ++;
ih->ih_key.k_dir_id = REISERFS_ROOT_PARENT_OBJECTID;
ih->ih_key.k_objectid = REISERFS_ROOT_OBJECTID;
ih->ih_key.k_offset = DOT_OFFSET;
ih->ih_key.k_uniqueness = DIRENTRY_UNIQUENESS/*DOT_UNIQUENESS*/;
#ifdef REISERFS_ALIGNED
ih->ih_item_len = DEH_SIZE * 2 + 4 + 4;
#else
ih->ih_item_len = DEH_SIZE * 2 + strlen (".") + strlen ("..")/* + sizeof (unsigned long)*/;
#endif
ih->ih_item_location = (ih-1)->ih_item_location - ih->ih_item_len;
ih->u.ih_entry_count = 2;
ih->ih_reserved = 0;
deh = (struct reiserfs_de_head *)(rb + ih->ih_item_location);
/* "." */
deh[0].deh_offset = DOT_OFFSET;
/* deh[0].deh_uniqueness = DOT_UNIQUENESS;*/
deh[0].deh_dir_id = ih->ih_key.k_dir_id;
deh[0].deh_objectid = ih->ih_key.k_objectid;
#ifdef REISERFS_ALIGNED
deh[0].deh_location = ih->ih_item_len - 4;
#else
deh[0].deh_location = ih->ih_item_len - strlen (".");
#endif
/*mark_de_without_sd (&(deh[0]));*/
clear_bit (DEH_Statdata, &(deh[0].deh_state));
/*mark_de_with_directory_id (&(deh[0]));*/
/* clear_bit (DEH_AdditionalKeyComponent, &(deh[0].deh_state));*/
/*mark_de_visible (&(deh[0]));*/
set_bit (DEH_Visible, &(deh[0].deh_state));
/* ".." */
deh[1].deh_offset = DOT_DOT_OFFSET;
/* deh[1].deh_uniqueness = DOT_DOT_UNIQUENESS;*/
deh[1].deh_dir_id = 0;
deh[1].deh_objectid = REISERFS_ROOT_PARENT_OBJECTID; /* as key of root directory is [REISERFS_ROOT_PARENT_OBJECTID,
REISERFS_ROOT_OBJECTID],
so objectid of root directory
parent direcotry is REISERFS_ROOT_PARENT_OBJECTID */
#ifdef REISERFS_ALIGNED
deh[1].deh_location = deh[0].deh_location - 4;
#else
deh[1].deh_location = deh[0].deh_location - strlen ("..");
#endif
/*mark_de_without_sd (&(deh[1]));*/
clear_bit (DEH_Statdata, &(deh[1].deh_state));
/*mark_de_with_directory_id (&(deh[1]));*/
/*set_bit (DEH_AdditionalKeyComponent, &(deh[1].deh_state));*/
/*mark_de_visible (&(deh[1]));*/
set_bit (DEH_Visible, &(deh[1].deh_state));
#ifdef REISERFS_ALIGNED
strncpy(rb + ih->ih_item_location + deh[0].deh_location, ".", 4);
strncpy(rb + ih->ih_item_location + deh[1].deh_location, "..", 4);
#else
memcpy (rb + ih->ih_item_location + deh[0].deh_location, ".", strlen ("."));
memcpy (rb + ih->ih_item_location + deh[1].deh_location, "..", strlen (".."));
#endif
/* objectid of parent directory of object pointed by ".." */
/**(unsigned long *)(rb + ih->ih_item_location + deh[1].deh_location + strlen ("..")) = 0;*/
blkh->blk_nr_item ++;
blkh->blk_free_space -= (IH_SIZE + ih->ih_item_len);
mark_buffer_dirty (g_rb_bh, 0);
mark_buffer_uptodate (g_rb_bh, 0);
return;
}
/*
* write the super block, the bitmap blocks and the root of the tree
*/
void write_super_and_root_blocks ()
{
struct reiserfs_super_block * sb = (struct reiserfs_super_block *)g_sb_bh->b_data;
int i;
zero_journal_blocks(g_sb_bh->b_dev, sb->s_journal_block, JOURNAL_BLOCK_COUNT + 1) ;
/* super block */
bwrite (g_sb_bh);
/* bitmap blocks */
for (i = 0; i < sb->s_bmap_nr; i ++) {
if (i != 0) {
g_bitmap_bh->b_blocknr = i * sb->s_blocksize * 8;
memset (g_bitmap_bh->b_data, 0, g_bitmap_bh->b_size);
set_bit (0,g_bitmap_bh->b_data);
}
if (i == sb->s_bmap_nr - 1) {
int j;
/* fill unused part of last bitmap block with 1s */
if (sb->s_block_count % (sb->s_blocksize * 8))
for (j = sb->s_block_count % (sb->s_blocksize * 8); j < sb->s_blocksize * 8; j ++) {
set_bit (j, g_bitmap_bh->b_data);
}
}
/* write true bitmap */
mark_buffer_dirty (g_bitmap_bh, 0);
bwrite (g_bitmap_bh);
#if 0
/* write cautious bitmap */
g_bitmap_bh->b_blocknr += sb->s_bmap_nr;
mark_buffer_dirty (g_bitmap_bh, 0);
bwrite (g_bitmap_bh);
g_bitmap_bh->b_blocknr -= sb->s_bmap_nr;
#endif
}
/* root block */
bwrite (g_rb_bh);
brelse (g_rb_bh);
brelse (g_bitmap_bh);
brelse (g_sb_bh);
}
char buf[20];
#include <linux/kdev_t.h>
char * devname (int dev)
{
struct stat st;
if (fstat (dev, &st) != 0)
die ("stat failed");
sprintf (buf, "0x%x:0x%x", MAJOR((int)st.st_rdev), MINOR((int)st.st_rdev));
return buf;
}
void report (void)
{
struct reiserfs_super_block * sb = (struct reiserfs_super_block *)g_sb_bh->b_data;
unsigned int i;
printf ("Block size %d bytes\n", sb->s_blocksize);
printf ("Block count %d\n", sb->s_block_count);
printf ("First %ld blocks skipped\n", g_sb_bh->b_blocknr);
printf ("Super block is in %ld\n", g_sb_bh->b_blocknr);
printf ("Bitmap blocks are : \n\t%ld", g_bitmap_bh->b_blocknr);
for (i = 1; i < sb->s_bmap_nr; i ++) {
printf (", %d", i * sb->s_blocksize * 8);
}
printf ("\nJournal size %d (blocks %d-%d of device %s)\n",
JOURNAL_BLOCK_COUNT, sb->s_journal_block,
sb->s_journal_block + JOURNAL_BLOCK_COUNT, devname (g_sb_bh->b_dev));
printf ("Root block %u\n", sb->s_root_block);
printf ("Used %d blocks\n", sb->s_block_count - sb->s_free_blocks);
fflush (stdout);
}
/* discard 1st 2k block partition. This should be enough to make
mount not see ext2 (and others) on mkreiserfs'd partition;
NOW it clear the first 2k block to avoid wrong vfat mounting
(it search its "super block" in 1st 512 bytes)
We also clear the original old journaled superblock (8k offset).
*/
void invalidate_other_formats (int dev)
{
struct buffer_head * bh;
bh = getblk (dev, 0, 2048);
mark_buffer_uptodate (bh, 1);
mark_buffer_dirty (bh, 1);
bwrite (bh);
brelse (bh);
bh = getblk(dev, REISERFS_OLD_DISK_OFFSET_IN_BYTES / 1024, 1024) ;
if (!bh) {
printf("Unable to get block to clear the old reiserfs superblock\n") ;
return ;
}
mark_buffer_uptodate (bh, 1);
mark_buffer_dirty (bh, 1);
bwrite (bh);
brelse (bh);
}
int main (int argc, char **argv)
{
char *tmp;
int dev;
int force = 0;
struct stat statbuf;
char * device_name;
char c;
printf ("\n\n<-----------MKREISERFS, 1999----------->\n%s\n",
reiserfs_get_version_string());
#if 1
if (0) {
/* ???? */
getblk (0,0,0);
iput (0);
}
#endif
if (argc < 2)
print_usage_and_exit ();
/* init_buffer_mem ();*/
while ( ( c = getopt( argc, argv, "f" ) ) != EOF )
switch( c )
{
case 'f' : /* force if file is not a block device */
force = 1;
break;
#if 0 /* -b is not supported with the journal code */
case 'b' : /* -k n - where n is 1,2 or 4 */
g_block_size = (int) strtol (optarg, &tmp, 0);
if ( *tmp || ( g_block_size != 1 && g_block_size != 2 && g_block_size != 4 ))
die ("mkreiserfs: bad block size : %s\n", optarg);
g_block_size *= 1024;
break;
#endif /* -b */
default :
print_usage_and_exit ();
}
device_name = argv [optind];
/* get block number for file system */
if (optind == argc - 2) {
g_block_number = strtol (argv[optind + 1], &tmp, 0);
if (*tmp == 0) { /* The string is integer */
if (g_block_number > count_blocks (device_name, g_block_size))
die ("mkreiserfs: specified block number (%d) is too high", g_block_number);
/* else if (g_block_number < MIN_BLOCK_AMOUNT)
die ("mkreiserfs: specified block number (%d) is too low", g_block_number);
*/
} else {
die ("mkreiserfs: bad block count : %s\n", argv[optind + 1]);
}
} else
if (optind == argc - 1) {
/* number of blocks is not specified */
g_block_number = count_blocks (device_name, g_block_size);
tmp = "";
} else
print_usage_and_exit ();
g_block_number = g_block_number / 8 * 8;
/* if (*tmp || g_block_number % 8 || (g_block_number == 0))
/ * block amount specified is not a valid integer * /
die ("mkreiserfs: bad block count : %s\n", argv[optind + 1]);
*/
if (g_block_number < MIN_BLOCK_AMOUNT)
die ("mkreiserfs: block number %d (truncated to n*8) is too low",
g_block_number);
if (is_mounted (device_name))
die ("mkreiserfs: '%s' contains a mounted file system\n", device_name);
/* open_device will die if it could not open device */
dev = open (device_name, O_RDWR);
if (dev == -1)
die ("mkreiserfs: can not open '%s': %s", device_name, strerror (errno));
if (fstat (dev, &statbuf) < 0)
die ("mkreiserfs: unable to stat %s", device_name);
if (!S_ISBLK (statbuf.st_mode) && ( force == 1 ))
die ("mkreiserfs: '%s (%o)' is not a block device", device_name, statbuf.st_mode);
else /* Ignore any 'full' fixed disk devices */
if ( statbuf.st_rdev == 0x0300 || statbuf.st_rdev == 0x0340
|| statbuf.st_rdev == 0x0400 || statbuf.st_rdev == 0x0410
|| statbuf.st_rdev == 0x0420 || statbuf.st_rdev == 0x0430
|| statbuf.st_rdev == 0x0d00 || statbuf.st_rdev == 0x0d40 )
/* ???? */
die ("mkreiserfs: will not try to make filesystem on '%s'", device_name);
/* these fill buffers (super block, first bitmap, root block) with
reiserfs structures */
make_super_block (dev);
make_bitmap ();
make_root_block ();
report ();
printf ("ATTENTION: ALL DATA WILL BE LOST ON '%s'! (y/n)", device_name);
c = getchar ();
if (c != 'y' && c != 'Y')
die ("mkreiserfs: Disk was not formatted");
invalidate_other_formats (dev);
write_super_and_root_blocks ();
check_and_free_mem ();
printf ("Syncing.."); fflush (stdout);
close(dev) ;
sync ();
printf ("\n\nReiserFS core development sponsored by SuSE Labs (suse.com)\n\nJournaling sponsored by MP3.com.\n\nItem handlers sponsored by Ecila.com\n\nTo learn about the programmers and ReiserFS, please go to\nhttp://www.devlinux.com/namesys\n\nHave fun.\n\n"); fflush (stdout);
return 0;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.